url
stringlengths
6
1.61k
fetch_time
int64
1,368,856,904B
1,726,893,854B
content_mime_type
stringclasses
3 values
warc_filename
stringlengths
108
138
warc_record_offset
int32
9.6k
1.74B
warc_record_length
int32
664
793k
text
stringlengths
45
1.04M
token_count
int32
22
711k
char_count
int32
45
1.04M
metadata
stringlengths
439
443
score
float64
2.52
5.09
int_score
int64
3
5
crawl
stringclasses
93 values
snapshot_type
stringclasses
2 values
language
stringclasses
1 value
language_score
float64
0.06
1
https://hg.stevelosh.com/euler/rev/829e38d1f825
1,660,404,662,000,000,000
text/html
crawl-data/CC-MAIN-2022-33/segments/1659882571959.66/warc/CC-MAIN-20220813142020-20220813172020-00431.warc.gz
320,491,845
8,488
# 829e38d1f825 `Fuck a `LOOP`` author Steve Losh Fri, 10 Feb 2017 21:11:38 +0000 a66997c0fad3 32aa6dc56935 (none) src/euler.lisp src/primes.lisp vendor/make-quickutils.lisp vendor/quickutils.lisp ## Changes ```diff -r a66997c0fad3 -r 829e38d1f825 src/euler.lisp --- a/src/euler.lisp Fri Feb 10 20:26:42 2017 +0000 +++ b/src/euler.lisp Fri Feb 10 21:11:38 2017 +0000 @@ -10,7 +10,7 @@ (let ((s (format nil "~D" n))) (string= s (reverse s)))) -(defun palindrome-p (n) +(defun palindromep (n) "Return whether `n` is a palindrome (in base 10)." (assert (>= n 0) (n) "~A must be a non-negative integer" n) ;; All even-length base-10 palindromes are divisible by 11, so we can shortcut @@ -26,20 +26,9 @@ (not (dividesp n 11))) nil) (t (definitely-palindrome-p n)))) -(defun range (from below) - (loop :for i :from from :below below - :collect i)) - -(defun square (n) - (* n n)) - - -(defun random-exclusive (min max) - "Return an integer in the range (`min`, `max`)." - (+ 1 min (random (- max min 1)))) -(defun dividesp (n divisor) - "Return whether `n` is evenly divisible by `divisor`." - (zerop (mod n divisor))) +(defun sum (sequence) + (iterate (for n :in-whatever sequence) + (sum n))) ;;;; Problems ----------------------------------------------------------------- @@ -48,10 +37,10 @@ ;; we get 3, 5, 6 and 9. The sum of these multiples is 23. ;; ;; Find the sum of all the multiples of 3 or 5 below 1000. - (loop :for i :from 1 :below 1000 - :when (or (dividesp i 3) - (dividesp i 5)) - :sum i)) + (iterate (for i :from 1 :below 1000) + (when (or (dividesp i 3) + (dividesp i 5)) + (sum i)))) (defun problem-2 () ;; Each new term in the Fibonacci sequence is generated by adding the previous @@ -61,12 +50,13 @@ ;; ;; By considering the terms in the Fibonacci sequence whose values do not ;; exceed four million, find the sum of the even-valued terms. - (loop :with p = 0 - :with n = 1 - :while (<= n 4000000) - :when (evenp n) :sum n - :do (psetf p n - n (+ p n)))) + (iterate (with a = 0) + (with b = 1) + (while (<= b 4000000)) + (when (evenp b) + (sum b)) + (psetf a b + b (+ a b)))) (defun problem-3 () ;; The prime factors of 13195 are 5, 7, 13 and 29. @@ -79,13 +69,11 @@ ;; from the product of two 2-digit numbers is 9009 = 91 × 99. ;; ;; Find the largest palindrome made from the product of two 3-digit numbers. - (let ((result (list))) - (loop :for i :from 0 :to 999 - :do (loop :for j :from 0 :to 999 - :for product = (* i j) - :when (palindrome-p product) - :do (push product result))) - (apply #'max result))) + (iterate (for-nested ((i :from 0 :to 999) + (j :from 0 :to 999))) + (for product = (* i j)) + (when (palindromep product) + (maximize product)))) (defun problem-5 () ;; 2520 is the smallest number that can be divided by each of the numbers from @@ -93,7 +81,7 @@ ;; ;; What is the smallest positive number that is evenly divisible by all of the ;; numbers from 1 to 20? - (let ((divisors (range 11 21))) + (iterate ;; all numbers are divisible by 1 and we can skip checking everything <= 10 ;; because: ;; @@ -106,18 +94,17 @@ ;; anything divisible by 16 is automatically divisible by 8 ;; anything divisible by 18 is automatically divisible by 9 ;; anything divisible by 20 is automatically divisible by 10 - (loop :for i - :from 20 :by 20 ; it must be divisible by 20 - :when (every (lambda (n) (dividesp i n)) - divisors) - :return i))) + (with divisors = (range 11 21)) + (for i :from 20 :by 20) ; it must be divisible by 20 + (finding i :such-that (every (lambda (n) (dividesp i n)) + divisors)))) (defun problem-6 () ;; The sum of the squares of the first ten natural numbers is, - ;; 1^2 + 2^2 + ... + 10^2 = 385 + ;; 1² + 2² + ... + 10² = 385 ;; ;; The square of the sum of the first ten natural numbers is, - ;; (1 + 2 + ... + 10)^2 = 55^2 = 3025 + ;; (1 + 2 + ... + 10)² = 55² = 3025 ;; ;; Hence the difference between the sum of the squares of the first ten ;; natural numbers and the square of the sum is 3025 − 385 = 2640. @@ -125,37 +112,55 @@ ;; Find the difference between the sum of the squares of the first one hundred ;; natural numbers and the square of the sum. (flet ((sum-of-squares (to) - (loop :for i :from 1 :to to - :sum (square i))) + (sum (range 1 (1+ to) :key #'square))) (square-of-sum (to) - (square (loop :for i :from 1 :to to - :sum i)))) + (square (sum (range 1 (1+ to)))))) (abs (- (sum-of-squares 100) ; apparently it wants the absolute value (square-of-sum 100))))) (defun problem-7 () + ;; By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see + ;; that the 6th prime is 13. + ;; + ;; What is the 10 001st prime number? (nth-prime 10001)) (defun problem-8 () - (let ((digits (map 'list #'digit-char-p - "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"))) - (loop :for window :in (n-grams 13 digits) - :maximize (apply #'* window)))) + ;; The four adjacent digits in the 1000-digit number that have the greatest + ;; product are 9 × 9 × 8 × 9 = 5832. + ;; + ;; Find the thirteen adjacent digits in the 1000-digit number that have the + ;; greatest product. What is the value of this product? + (let ((digits (map 'list #'digit-char-p + "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"))) + (iterate (for window :in (n-grams 13 digits)) + (maximize (apply #'* window))))) (defun problem-9 () + ;; A Pythagorean triplet is a set of three natural numbers, a < b < c, for + ;; which: + ;; + ;; a² + b² = c² + ;; + ;; For example, 3² + 4² = 9 + 16 = 25 = 5². + ;; + ;; There exists exactly one Pythagorean triplet for which a + b + c = 1000. + ;; Find the product abc. (flet ((pythagorean-triplet-p (a b c) (= (+ (square a) (square b)) (square c)))) - (block search - (loop :for c :from 998 :downto 1 ; they must add up to 1000, so C can be at most 998 - :do (loop :for a :from (- 999 c) :downto 1 ; A can be at most 999 - C (to leave 1 for B) - :for b = (- 1000 c a) - :when (pythagorean-triplet-p a b c) - :do (return-from search (* a b c))))))) + ;; They must add up to 1000, so C can be at most 998. + ;; A can be at most 999 - C (to leave 1 for B). + (iterate (for c :from 998 :downto 1) + (iterate (for a :from (- 999 c) :downto 1) + (for b = (- 1000 c a)) + (when (pythagorean-triplet-p a b c) + (return-from problem-9 (* a b c))))))) (defun problem-10 () - (loop :for p :in (primes-below 2000000) - :sum p)) + ;; The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17. + ;; Find the sum of all the primes below two million. + (sum (primes-below 2000000))) ;;;; Tests --------------------------------------------------------------------``` ```diff -r a66997c0fad3 -r 829e38d1f825 src/primes.lisp --- a/src/primes.lisp Fri Feb 10 20:26:42 2017 +0000 +++ b/src/primes.lisp Fri Feb 10 21:11:38 2017 +0000 @@ -71,7 +71,7 @@ (flet ((fermat-check (a) (= (expmod a n n) a))) (loop :repeat tests - :when (not (fermat-check (random-exclusive 0 n))) + :when (not (fermat-check (random-range-exclusive 0 n))) :do (return nil) :finally (return t)))) @@ -112,7 +112,7 @@ :when (= y (1- n)) :do (return t)))))) (loop :repeat k - :for a = (random-exclusive 1 (1- n)) + :for a = (random-range-exclusive 1 (1- n)) :always (strong-liar-p a))))))) (defun brute-force-prime-p (n)``` ```diff -r a66997c0fad3 -r 829e38d1f825 vendor/make-quickutils.lisp --- a/vendor/make-quickutils.lisp Fri Feb 10 20:26:42 2017 +0000 +++ b/vendor/make-quickutils.lisp Fri Feb 10 21:11:38 2017 +0000 @@ -5,10 +5,11 @@ :utilities '( :define-constant + :ensure-boolean + :n-grams + :range :switch - :ensure-boolean :with-gensyms - :n-grams ) :package "EULER.QUICKUTILS")``` ```diff -r a66997c0fad3 -r 829e38d1f825 vendor/quickutils.lisp --- a/vendor/quickutils.lisp Fri Feb 10 20:26:42 2017 +0000 +++ b/vendor/quickutils.lisp Fri Feb 10 21:11:38 2017 +0000 @@ -2,7 +2,7 @@ ;;;; See http://quickutil.org for details. ;;;; To regenerate: -;;;; (qtlc:save-utils-as "quickutils.lisp" :utilities '(:DEFINE-CONSTANT :SWITCH :ENSURE-BOOLEAN :WITH-GENSYMS :N-GRAMS) :ensure-package T :package "EULER.QUICKUTILS") +;;;; (qtlc:save-utils-as "quickutils.lisp" :utilities '(:DEFINE-CONSTANT :ENSURE-BOOLEAN :N-GRAMS :RANGE :SWITCH :WITH-GENSYMS) :ensure-package T :package "EULER.QUICKUTILS") (unless (find-package "EULER.QUICKUTILS") @@ -13,9 +13,10 @@ (in-package "EULER.QUICKUTILS") (when (boundp '*utilities*) - (setf *utilities* (union *utilities* '(:DEFINE-CONSTANT :STRING-DESIGNATOR + (setf *utilities* (union *utilities* '(:DEFINE-CONSTANT :ENSURE-BOOLEAN :TAKE + :N-GRAMS :RANGE :STRING-DESIGNATOR :WITH-GENSYMS :EXTRACT-FUNCTION-NAME - :SWITCH :ENSURE-BOOLEAN :TAKE :N-GRAMS)))) + :SWITCH)))) (defun %reevaluate-constant (name value test) (if (not (boundp name)) @@ -54,6 +55,40 @@ ,@(when documentation `(,documentation)))) + (defun ensure-boolean (x) + "Convert `x` into a Boolean value." + (and x t)) + + + (defun take (n sequence) + "Take the first `n` elements from `sequence`." + (subseq sequence 0 n)) + + + (defun n-grams (n sequence) + "Find all `n`-grams of the sequence `sequence`." + (assert (and (plusp n) + (<= n (length sequence)))) + + (etypecase sequence + ;; Lists + (list (loop :repeat (1+ (- (length sequence) n)) + :for seq :on sequence + :collect (take n seq))) + + ;; General sequences + (sequence (loop :for i :to (- (length sequence) n) + :collect (subseq sequence i (+ i n)))))) + + + (defun range (start end &key (step 1) (key 'identity)) + "Return the list of numbers `n` such that `start <= n < end` and +`n = start + k*step` for suitable integers `k`. If a function `key` is +provided, then apply it to each number." + (assert (<= start end)) + (loop :for i :from start :below end :by step :collecting (funcall key i))) + + (deftype string-designator () "A string designator type. A string designator is either a string, a symbol, or a character." @@ -147,34 +182,8 @@ "Like `switch`, but signals a continuable error if no key matches." (generate-switch-body whole object clauses test key '(cerror "Return NIL from CSWITCH."))) - - (defun ensure-boolean (x) - "Convert `x` into a Boolean value." - (and x t)) - - - (defun take (n sequence) - "Take the first `n` elements from `sequence`." - (subseq sequence 0 n)) - - - (defun n-grams (n sequence) - "Find all `n`-grams of the sequence `sequence`." - (assert (and (plusp n) - (<= n (length sequence)))) - - (etypecase sequence - ;; Lists - (list (loop :repeat (1+ (- (length sequence) n)) - :for seq :on sequence - :collect (take n seq))) - - ;; General sequences - (sequence (loop :for i :to (- (length sequence) n) - :collect (subseq sequence i (+ i n)))))) -
4,411
13,699
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.625
3
CC-MAIN-2022-33
longest
en
0.569205
http://what-when-how.com/Tutorial/topic-187o2rrq2s/HTML5-Canvas-737.html
1,511,462,447,000,000,000
text/html
crawl-data/CC-MAIN-2017-47/segments/1510934806856.86/warc/CC-MAIN-20171123180631-20171123200631-00648.warc.gz
303,958,200
4,107
HTML and CSS Reference In-Depth Information A* with Node Weights For Example 8-16 , we will be adding weighs to our nodes. We'll do this by simply adding in some grass tiles to the tile map we have been using in the previous examples. By doing this, we can change the A* search result in a path avoiding the grass tiles has a lower total node value sum than one that travels over the grass tiles. We can add to the weight of each open node by simply giving it a number higher than 1. We have created our tile sheet to make this very simple. The third tile (or tile index 2) is a grass tile. With astar.as , as long as a tile has a node value greater than 0, it is considered a movable ues into account, is the lowest, the path will cross these nodes with higher values. To demon- strate this, we will now add some grass tiles to the tile map. The changes for Example 8-16 are below. Notice that we are also removing the diagonal movement from Example 8-15 , but it is not mandatory that you do so. We will look at that in Example 8-17 : //Example 8-16 changes to example 8-15 tile map var var mapRows = 15 ; var var mapCols = 15 ; var var tileMap = [ [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ] ,[ 0 , 1 , 2 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 ] ,[ 0 , 1 , 0 , 1 , 0 , 0 , 1 , 0 , 1 , 0 , 0 , 1 , 0 , 1 , 0 ] ,[ 0 , 1 , 0 , 1 , 0 , 0 , 1 , 0 , 1 , 0 , 0 , 1 , 0 , 1 , 0 ] ,[ 0 , 1 , 0 , 1 , 0 , 0 , 1 , 1 , 1 , 0 , 0 , 1 , 0 , 1 , 0 ] ,[ 0 , 2 , 1 , 1 , 1 , 1 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 0 ] ,[ 0 , 1 , 0 , 0 , 0 , 1 , 0 , 0 , 0 , 1 , 0 , 0 , 0 , 1 , 0 ] ,[ 0 , 1 , 1 , 1 , 2 , 1 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 0 ] ,[ 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 ] ,[ 0 , 1 , 1 , 1 , 1 , 1 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 0 ] ,[ 0 , 1 , 0 , 1 , 0 , 0 , 1 , 1 , 1 , 0 , 0 , 1 , 0 , 1 , 0 ] ,[ 0 , 1 , 0 , 1 , 0 , 0 , 2 , 0 , 1 , 0 , 0 , 1 , 0 , 1 , 0 ] ,[ 0 , 1 , 0 , 1 , 0 , 0 , 1 , 0 , 1 , 0 , 0 , 1 , 0 , 1 , 0 ] ,[ 0 , 1 , 1 , 1 , 1 , 1 , 1 , 2 , 1 , 1 , 1 , 1 , 1 , 1 , 0 ] ,[ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ] ]; //set up a* graph var var graph = new new Graph ( tileMap ); var var startNode = { x : 4 , y : 1 }; // use values of map turned on side var var endNode = { x : 13 , y : 10 }; //create node list var var start = graph . nodes [ startNode . x ][ startNode . y ]; Search WWH :: Custom Search
1,083
2,386
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.640625
3
CC-MAIN-2017-47
latest
en
0.528915
http://www.abovetopsecret.com/forum/thread1148156/pg1
1,481,005,001,000,000,000
text/html
crawl-data/CC-MAIN-2016-50/segments/1480698541883.3/warc/CC-MAIN-20161202170901-00245-ip-10-31-129-80.ec2.internal.warc.gz
290,906,329
16,498
It looks like you're using an Ad Blocker. Thank you. Some features of ATS will be disabled while you continue to use an ad-blocker. Help ATS via PayPal: Physicists Uncover Strange Numbers in Particle Collisions page: 1 20 share: posted on Nov, 20 2016 @ 09:43 PM There is an excellent article in Wired Magazine regarding the difficulty Physicists are having with the Feynman diagrams and associated maths that go with it when examining collisions between protons vs. electrons. Here's a snippet: AT THE LARGE Hadron Collider in Geneva, physicists shoot protons around a 17-mile track and smash them together at nearly the speed of light. It’s one of the most finely tuned scientific experiments in the world, but when trying to make sense of the quantum debris, physicists begin with a strikingly simple tool called a Feynman diagram that’s not that different from how a child would depict the situation. Feynman diagrams were devised by Richard Feynman in the 1940s. They feature lines representing elementary particles that converge at a vertex (which represents a collision) and then diverge from there to represent the pieces that emerge from the crash. Those lines either shoot off alone or converge again. The chain of collisions can be as long as a physicist dares to consider. To that schematic physicists then add numbers, for the mass, momentum and direction of the particles involved. Then they begin a laborious accounting procedure—integrate these, add that, square this. The final result is a single number, called a Feynman probability, which quantifies the chance that the particle collision will play out as sketched. “In some sense Feynman invented this diagram to encode complicated math as a bookkeeping device,” said Sergei Gukov, a theoretical physicist and mathematician at the California Institute of Technology. Feynman diagrams have served physics well over the years, but they have limitations. One is strictly procedural. Physicists are pursuing increasingly high-energy particle collisions that require greater precision of measurement—and as the precision goes up, so does the intricacy of the Feynman diagrams that need to be calculated to generate a prediction. As we get ever closer to understanding what this universe is really made of, the struggles our physicists are having figuring this all out, is part and parcel of the adventure. Great article. And BTW - it was reprinted with permission from Quanta magazine. I wonder what they focus on... Physicists Uncover Strange Numbers in Particle Collisions posted on Nov, 20 2016 @ 10:12 PM What they are doing there will require more difficult calculations. They will find new events from particle collisions probably daily. It is a great adventure! posted on Nov, 20 2016 @ 10:25 PM Thats a rabbit hole i have no interest in.. +4 more posted on Nov, 20 2016 @ 10:49 PM originally posted by: tikbalang Thats a rabbit hole i have no interest in.. Gee, thanks for stopping by to share that with us. And to think I was concerned with what rabbit holes you were jumping into.... posted on Nov, 20 2016 @ 10:58 PM I leave the quantum leaps to the ones who 420 posted on Nov, 20 2016 @ 11:44 PM originally posted by: tikbalang I leave the quantum leaps to the ones who 420 LOL! posted on Nov, 21 2016 @ 03:46 AM You've left out the interesting part There seem to be certain patterns (strange numbers) In the complex calculations, which hint at an underlying structure. If there is one, it could greatly simplify the calculations, eventually lead to a better understanding of the nature of particles. edit on 21-11-2016 by moebius because: (no reason given) posted on Nov, 21 2016 @ 03:48 AM So, from what I understand about this article, it seems that another layer of synchronicity has made itself apparent, a strange and unexplained correlation between abstract mathematical constructs arising from pure mathematics, and the sorts of numbers being generated by studies of particle collisions. There ought be no reason, so it seems, why such a correlation between these values ought to come about, and yet there come about they do. Is that the size of it? I think this will turn out to be yet another expression of the interconnected nature of all things, and I hope that those working on the numbers involved with both the pure mathematical exploration, and the theoretical physics element appreciate the potential importance of this connection in time, and share what they learn from it for the betterment of the future! posted on Nov, 21 2016 @ 04:55 AM 2 words........ Stargate Universe....... Make it happen posted on Nov, 21 2016 @ 05:46 AM Good to see that there is a shake up under way and an new algorithm is on the horizon to help explain and better piece this world together, It is all in the numbers. posted on Nov, 21 2016 @ 07:09 AM Don't forget 17 though (you know what I mean) Makes sense in a wave based model. posted on Nov, 21 2016 @ 07:29 AM The underlying structure which is the simulation we exist inside of. just a little bummed the single number wasn't " 42 ". What if we're just being monitored and everytime we get close another layer of " junk " is added to increase the time it will take to solve further. Kinda like moving the cheese further away when the mouse gets close, the old carrot on a stick concept. posted on Nov, 21 2016 @ 07:43 AM originally posted by: moebius You've left out the interesting part There seem to be certain patterns (strange numbers) In the complex calculations, which hint at an underlying structure. If there is one, it could greatly simplify the calculations, eventually lead to a better understanding of the nature of particles. Doesn't it seem as if they are trying to calculate chaos, or perhaps chaos is fractal in nature, or perhaps fractal in that particular chamber? edit on 21-11-2016 by InTheLight because: (no reason given) posted on Nov, 21 2016 @ 12:17 PM For those who like a little bit of imagery to enhance a description (and haven't already used Google): The comparison between Feynman Diagrams and Electrical Circuit Diagrams helped me comprehend these nifty squiggles. As far as the math behind them, I would need to borrow some extra gray-matter to get anywhere. posted on Nov, 21 2016 @ 01:05 PM originally posted by: moebius You've left out the interesting part There seem to be certain patterns (strange numbers) In the complex calculations, which hint at an underlying structure. If there is one, it could greatly simplify the calculations, eventually lead to a better understanding of the nature of particles. You're right I did. I didn't want to bury the lead so to speak. Great article - I'm glad others also took advantage of the opportunity to read it. It's mind-bending stuff. And we continue to push on the envelope that is understanding the nature of our existence... posted on Nov, 21 2016 @ 01:06 PM Thanks! Yes - visuals always help, and these speak volumes... posted on Nov, 21 2016 @ 02:07 PM The Quanta article is a great read! Thanks for the share. How to explain to non-math types?... here is my lame attempt. Repeated patterns are called "periods" like sine waves. You can measure the distance between peaks, what a uniform wave should look like, how noise disrupts that regularity (the period), describe it mathematically, etc. Oddly enough, Pi usually makes an appearance. They are doing the same with abstract algebra used to describe Feynman diagrams (FD). Since there is an unknown limit of how many FDs one needs to include (the case where particles loop back and interfere with the original, basic FD. It is noticed that the complexity grows exponentially. The example given is 3 looped FD results in 72 integrals, 4 looped FD yields 891, while 5 looped FD yields 12,672 (Quanta article)) they used gometric algebra to see if there is a trend. Using this complex analysis upon looped Feynman diagrams yields Galois numbers hinting at some unknown structure. What that means... Every Feynman diagram has a motive attached to it, but what exactly the structure of a motive is saying about the structure of its related diagram remains anyone’s guess. -Quanta article The "motive" is thought of as like the same in music (aka, motif) the tendency for different equations (cohomology is the that branch of math) to reach the same answer, like the main melody being resolved to at the end of a performance. Except this is happening in math terms upon complex FDs! I think this is a long, roundabout, way of saying, "math describes the world" and no matter what path is taken, math or physics, at the end of the day, you stand beholding the power of numbers! OP, thanks for the brain ache this morning! posted on Nov, 21 2016 @ 02:22 PM I think this is a long, roundabout, way of saying, "math describes the world" and no matter what path is taken, math or physics, at the end of the day, you stand beholding the power of numbers! And the beauty of creation and the long ways were are from understanding. posted on Nov, 21 2016 @ 03:00 PM Preaching to the choir! Just looking at the timeline, 1960s people were fighting for their pet cohomology theory, and then 1990s, the realization of motives came about. Now in 2016, finding what should be unrelated values in two vastly different fields describing the unknown application of complex FDs, yeah, what a wondrous world we live in! 55 years is not that long in science terms. Heck, they have not figured out twin primes from Euclid's time! Kind of reminds me of the internet correction codes found in string theory from a few years back. posted on Nov, 21 2016 @ 03:29 PM I think this is a long, roundabout, way of saying, "math describes the world" and no matter what path is taken, math or physics, at the end of the day, you stand beholding the power of numbers! OP, thanks for the brain ache this morning! You're welcome - join the club, or party as it's becoming. I read this very late at night and it had my head spinning and I studied QT for my grad degree - lol. It still has my head spinning, but in a cool, "buzzed" sort of way. It kind of reminds of that scene in the book (and movie) Contact by Carl Sagan where at the very end of the book after Ellie has completed her adventure she writes a computer program to compute the digits of π to heretofore-unprecedented lengths, and in so doing (at around 10 to the 20th places in the base 11 representation of π) beautiful pictures start to emerge within the digits. As someone once said a long time ago - "God is in the details" - whatever you imagine that infinite creator to be. top topics 20
2,459
10,667
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.515625
3
CC-MAIN-2016-50
longest
en
0.93968
https://people.sc.fsu.edu/~jburkardt/m_src/cvt_triangle_uniform/cvt_triangle_uniform.html
1,713,267,456,000,000,000
text/html
crawl-data/CC-MAIN-2024-18/segments/1712296817081.52/warc/CC-MAIN-20240416093441-20240416123441-00483.warc.gz
437,363,403
2,545
# cvt_triangle_uniform cvt_triangle_uniform, a MATLAB code which iteratively calculates a Centroidal Voronoi Tessellation (CVT) on a triangle with uniform density. As you watch the CVT develop, for instance, you will be able to guess something about the angle of intersection between the boundaries of the region, and the edges of Voronoi regions. CVT_TRIANGLE_UNIFORM generates a CVT in a triangle with a uniform density. The calling sequence is: [ p, t ] = cvt_triangle_uniform ( n, sample_num, delaunay_display ) • N, the number of generators. • SAMPLE_NUM, the number of sample points per generator. • DELAUNAY_DISPLAY, 0 to hide, 1 to show the Delaunay triangulation. ### Licensing: The computer code and data files described and made available on this web page are distributed under the MIT license ### Languages: cvt_triangle_uniform is available in a MATLAB version. ### Related Data and Programs: cvt_box, a MATLAB code which constructs a modified cvt in which some points are forced to lie on the boundary. ccvt_reflect, a MATLAB code which tries to construct a modified cvt in which some points are forced to lie on the boundary, using a reflection idea. cvt_1d_lloyd, a MATLAB code which computes an n-point centroidal voronoi tessellation (cvt) within the interval [0,1], under a uniform density. cvt_1d_nonuniform, a MATLAB code which allows the user to watch the evolution of a cvt computed over a 1d interval with a nonuniform density. cvt_1d_sampling, a MATLAB code which computes an n-point centroidal voronoi tessellation (cvt) within the interval [0,1], under a uniform density, using sampling to estimate the voronoi regions. cvt_2d_sampling, a MATLAB code which computes an n-point centroidal voronoi tessellation (cvt) within the unit square [0,1]x[0,1], under a uniform density, using sampling to estimate the voronoi regions. cvt_circle_nonuniform, a MATLAB code which calculates a nonuniform centroidal voronoi tessellation (cvt) over a circle. cvt_circle_uniform, a MATLAB code which calculates a centroidal voronoi tessellation (cvt) over a circle with uniform density. cvt_square_nonuniform, a MATLAB code which iteratively calculates a centroidal voronoi tessellation (cvt) over a square, with a nonuniform density. cvtm_1d, a MATLAB code which estimates a mirror-periodic centroidal voronoi tessellation (cvtm) in the periodic interval [0,1], using a version of lloyd's iteration. cvtp_1d, a MATLAB code which estimates a periodic centroidal voronoi tessellation (cvtp) in the periodic interval [0,1], using a version of lloyd's iteration. discrete_pdf_sample_2d, a MATLAB code which demonstrates how to construct a probability density function (pdf) from a table of sample data, and then to use that pdf to create new samples. florida_cvt_geo, a MATLAB code which explores the creation of a centroidal voronoi tessellation (cvt) of the state of florida, based solely on geometric considerations. test_triangulation, a MATLAB code which defines the geometry of a number of sample regions. voronoi_plot, a MATLAB code which plots the voronoi neighborhoods of points using l1, l2, linfinity or arbitrary lp norms; ### Reference: 1. Franz Aurenhammer, Voronoi diagrams - a study of a fundamental geometric data structure, ACM Computing Surveys, Volume 23, Number 3, pages 345-405, September 1991. 2. John Burkardt, Max Gunzburger, Janet Peterson, Rebecca Brannon, User Manual and Supporting Information for Library of Codes for Centroidal Voronoi Placement and Associated Zeroth, First, and Second Moment Determination, Sandia National Laboratories Technical Report SAND2002-0099, February 2002. 3. Qiang Du, Vance Faber, Max Gunzburger, Centroidal Voronoi Tessellations: Applications and Algorithms, SIAM Review, Volume 41, Number 4, December 1999, pages 637-676. ### Source Code: Last revised on 23 December 2018.
939
3,871
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.515625
3
CC-MAIN-2024-18
latest
en
0.797338
http://glewengineering.com/tag/cfd/
1,539,831,531,000,000,000
text/html
crawl-data/CC-MAIN-2018-43/segments/1539583511642.47/warc/CC-MAIN-20181018022028-20181018043528-00100.warc.gz
156,531,604
18,567
## Wind Load CFD Modeling for Rooftop Elements Compared to ASCE 7 2018-02-20T11:28:46+00:00 Wind Load Comparison between CFD and ASCE 7 for Rooftop shapes Welcome back to our blog series on the phenomenon called Wind Load CFD Modeling and how it affects civil and mechanical engineers.  Wind Load is the force that blowing wind exerts on any device or structure that extends above ground level.  After an initial introduction to the factors that affect wind load on an object, I compared three different sets of wind load calculation methods using three simple objects, hypothetically placed on a 100-foot-tall building.  Starting with generic drag equation for the first equation, added two modification coefficients called "gust factor" and "exposure coefficient" for the second equation.    I used the model for rooftop objects for the [...] Wind Load CFD Modeling for Rooftop Elements Compared to ASCE 7 2018-02-20T11:28:46+00:00 ## Comparing Wind Load Calculation Methods 2018-02-21T15:26:59+00:00 Wind Load Calculation Wind Load Calculation is an overview of the force that blowing wind exerts on a tall object.  A number of factors that influence the actual wind load on a real building, include the surrounding terrain, nearby structures,  trees, and typical weather patterns for the area.  Comparing wind load calculations are most complicated . Calculations try to account for as many of these external factors as possible, to the point where the wind load section in American Society for Civil Engineer’s ASCE 7 standard, spans five chapters and over 100 pages. Fig 1: Wind load calculation example structures ASCE 7's Applications Comparing one of ASCE 7’s Applications with the two equations [...] Comparing Wind Load Calculation Methods 2018-02-21T15:26:59+00:00 ## Wind Load Calculations and Modeling 2018-02-20T12:26:27+00:00 Part 1 of our Wind Load Series “Wind load” is a calculated value representing the total force on a structure or object cause by pressure from wind moving over it.  In this blog series, we will discuss different methods for wind load calculations, the factors that influence its magnitude, and the effects a high wind load can have on a structure.  Wind load is most commonly addressed by civil and structural engineers when designing buildings, but mechanical engineers can encounter the effect when designing tall objects like cranes, telescoping communications masts or wind turbine towers. Wind Load Essentials As a force, wind load is the product of pressure distributed over an area (psf times ft2 or Pa times m2).  In [...] Wind Load Calculations and Modeling 2018-02-20T12:26:27+00:00 ## High-purity Gas Panels Part 2: Gas Distribution in the Fab 2018-02-28T10:30:54+00:00 Mechanical engineering design for high-purity processes in semiconductor fabs: gas cabinets, liquid sources and valve manifold boxes. High-purity Gas Panels Part 2: Gas Distribution in the Fab 2018-02-28T10:30:54+00:00 ## Computational Fluid Dynamics: Capabilities of Simulation CFD 2017-05-03T13:32:41+00:00 The value of computational fluid dynamics simulation software, and how it can be useful to an engineering firm. Computational Fluid Dynamics: Capabilities of Simulation CFD 2017-05-03T13:32:41+00:00 ## Alternative Energy Engineering: Energy from the Earth’s Natural Heat 2018-03-14T10:55:43+00:00 A discussion of utilizing CFD and FEA to perform stress analysis and thermal analysis on turbines that acquire geothermal energy. Alternative Energy Engineering: Energy from the Earth’s Natural Heat 2018-03-14T10:55:43+00:00 2018-03-14T11:06:42+00:00 Alternative energy opportunities and advances available utilizing Methane Gas for energy generation. ## Stress Analysis of Athletic Equipment: Shoes 2018-04-05T11:50:27+00:00 Stress analysis and mechanical engineering of the athletic shoe is discussed using CAD and computational fluid dynamics Stress Analysis of Athletic Equipment: Shoes 2018-04-05T11:50:27+00:00 ## Engineering Flood Control: Pumps 2018-03-16T13:41:21+00:00 The mechanical engineering make up of pumps using tools such as CAD and CFD to make technological advances in design and performance. Engineering Flood Control: Pumps 2018-03-16T13:41:21+00:00 ## Engineering Hurricanes : Structural 2018-03-19T10:30:07+00:00 A discussion on the impact engineers have on structural designs and tools used such as CAD and FEA. Engineering Hurricanes : Structural 2018-03-19T10:30:07+00:00 ## Engineering in Simulation: CFD 2018-03-19T10:56:26+00:00 Computational fluid dynamics is discussed and the benefits to engineering and thermal management Engineering in Simulation: CFD 2018-03-19T10:56:26+00:00 ## Thermal Management: Heat Sink Materials 2018-03-19T11:10:23+00:00 Discussion on materials used in heat sinks and their thermal management properties used by engineers in today's technology. Thermal Management: Heat Sink Materials 2018-03-19T11:10:23+00:00 ## Electronics Cooling : Thermal Interface Materials 2018-03-19T11:40:47+00:00 Discussion on thermal interface materials used with heat sinks to achieve efficient thermal management solutions for electronics cooling Electronics Cooling : Thermal Interface Materials 2018-03-19T11:40:47+00:00 ## Electronics Cooling Designs: Synthetic Jets 2017-06-08T11:53:38+00:00 Electronics Cooling presenting synthetic jets. Part of a series on different types of electronics cooling and thermal management methods. Electronics Cooling Designs: Synthetic Jets 2017-06-08T11:53:38+00:00 ## Electronics Cooling Designs: EFA 2018-03-19T13:12:26+00:00 Engineering discussion on Electrostatic Fluid Accelerators (EFA) and their effects on electronics cooling and thermal management Electronics Cooling Designs: EFA 2018-03-19T13:12:26+00:00 ## Electronics Cooling Designs: Thermoelectric Plates 2018-03-19T13:20:39+00:00 Thermoelectric Plates used for thermal management in electronics cooling is discussed from an engineering prospective. Electronics Cooling Designs: Thermoelectric Plates 2018-03-19T13:20:39+00:00 ## Electronics Cooling Designs: Heat Sinks 2018-04-10T15:11:31+00:00 Heat sinks for electronics cooling and thermal management applications are discussed. Electronics Cooling Designs: Heat Sinks 2018-04-10T15:11:31+00:00 ## Electronics Cooling Designs: Heat Pipes 2018-03-19T15:33:58+00:00 Discussion on electronics cooling and thermal management using heat pipes in conjunction with heat sinks. Electronics Cooling Designs: Heat Pipes 2018-03-19T15:33:58+00:00 ## Engineering Energy: Hydroelectric 2018-03-21T12:09:45+00:00 Discussing the engineering of hydroelectric energy and the roles played by mechanical engineers and electrical engineers using CAD and FEA as tools. Engineering Energy: Hydroelectric 2018-03-21T12:09:45+00:00 ## Computer Aided Engineering Methods 2017-05-03T14:10:13+00:00 FEA, CAD, and CFD are discussed in relation to computer aided engineering and their benefits. Computer Aided Engineering Methods 2017-05-03T14:10:13+00:00
1,786
7,028
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.375
3
CC-MAIN-2018-43
latest
en
0.829499
http://www.chegg.com/homework-help/consider-steady-two-dimensional-incompressible-flow-field-u-chapter-9-problem-55p-solution-9780077295462-exc
1,474,987,298,000,000,000
text/html
crawl-data/CC-MAIN-2016-40/segments/1474738661087.58/warc/CC-MAIN-20160924173741-00169-ip-10-143-35-109.ec2.internal.warc.gz
387,716,702
21,054
# Fluid Mechanics with Student Resources DVD (2nd Edition) View more editions Solutions for Chapter 9 Problem 55PProblem 55P: Consider a steady, two-dimensional, incompressible flow fiel... • 1728 step-by-step solutions • Solved by publishers, professors & experts • iOS, Android, & web Over 90% of students who use Chegg Study report better grades. May 2015 Survey of Chegg Study Users Chapter: Problem: Consider a steady, two-dimensional, incompressible flow field for which the u velocity component is u = ax 2 - bxy, where a = 0.45 (ft . s) -l, and b = 0.75 (ft • s) - 1. Let v=· for all values of x when y = 0 (that is, v = 0 along the x-axis). Generate an expression for the stream function and plot some streamlines of the flow. For consistency, set · =0 along the x-axis , and plot in the range 0< x<3 ft and 0 STEP-BY-STEP SOLUTION: Chapter: Problem: • Step 1 of 4 Since the flow field is steady, incompressible and two-dimensional, we can use the stream function in cartesian coordinates as following form: …… (1) and …… (2) In this problem, we know , use equation (1) we can get: …… (3) By integrating equation (3) with respect to y, we can get stream function below: …… (4) Using f(x), not constant because it is a partial integration, we need to use a function of other variable(x). • Chapter , Problem is solved. Corresponding Textbook Fluid Mechanics with Student Resources DVD | 2nd Edition 9780077295462ISBN-13: 0077295463ISBN: Authors:
396
1,467
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.03125
3
CC-MAIN-2016-40
latest
en
0.7888
https://andrewharvey4.wordpress.com/category/mathematics/
1,686,398,243,000,000,000
text/html
crawl-data/CC-MAIN-2023-23/segments/1685224657169.98/warc/CC-MAIN-20230610095459-20230610125459-00479.warc.gz
132,387,865
53,676
### Archive Archive for the ‘mathematics’ Category ## Polyhypercubes and That Whole Area of Combinatorics… I’ve pull this post from my Draft Archives from July 2010. I think it is about time I post it. I stumbled upon Tetris the other day and began to wonder about the pieces, which lead me to stumble into a whole area of maths that I didn’t know had been documented: polyforms. In 2D you have n squares which can be formed into Polyominos, for an arrangement of squares to form a polyomino each square must share at least one of it’s edges with another square. If you use 2 squares you get Dominos, 3 squares gives you Trominos, 4 gives Tetrominos, 5 gives Pentominos, and so on to n-minos. These all fall under the category of polyminos. In 3D you have cubes with the principle that each cube must share a face with at least one other cube. These shapes are called polycubes. If you use 3 cubes you get tricubes, and so on. You can extrapolate this concept to nD hypercubes with the principle that each n-cube must share at least one of its (n-1)-faces with another n-cube. Lets call these polyhypercubes. We shall also say that polyhypercubes are the same if we can rotate (or mirror as well, depending on your definition of equality) them to match exactly. Note that if you are looking at the set of all poly-n-cubes for some n, the total size will vary depending whether you decide to allow mirroring (…although mirroring is the same as allowing rotation in a dimension 1 higher than the dimension of the space). Two pentacubes which are the same if you count mirroring as an allowed operation when testing for equality, but not the same if you don't (you can't rotate one to fall on the other). To generate these shapes you can start with one cube, from this you can make a graph where you add one block to every possible place. You can turn this into a directed graph, where an edge indicates you can get one shape by adding another cube to the previous shape. Perhaps this can be extended to a hypergraph, where two shapes are linked if you can morph from one to another by moving just one block (square, cube…)? Evolution of the 5-polycubes where an edge represents adding one cube to a polycube. You can also do a similar graph (well actually it would be a multigraph as you would have islands) by only allowing an edge where you can make a “rubix cube” like change to the object. All 5-polycubes where an edge indicates a "rubix cube" like transformation. I was originally interested in two problems here. Generating all possible n-polycubes for a given n, and finding the total number of n-polycubes for a given n. The diagrams are examples of what I mean, but it was only done manually and only up to 5-polycubes. Code used to generate those diagrams is at https://github.com/andrewharvey/phc/tree/master/concrete-cases/5-polycubes. Categories: mathematics Tags: ## A method for two parties to verify if they share a common secret without revealing that secret if they don’t. We didn’t cover zero knowledge proofs in the Security Engineering course I did last semester. But part way into the course I needed a way for two people, A and B to verify that some number they each know is infact the same number N in the case that they don’t have a trusted arbitrator and they don’t want to reveal the number they each know to the other person unless the other person has the same number. I don’t think this is exactly a zero knowledge proof situation by it seems closely related. The motivating situation for this was a web application that set some cookies and I wanted to know if one of these cookies set was unique per user or unique per some other setting (but the web application doesn’t allow just anyone to create a new account, so I couldn’t determine this on my own). In the case that it was unique per user then I don’t want to tell anyone what my value for this cookie is because then they may be able to hijack my session. So a method I thought of was that each person reveals one bit of the number to the other person at a time. I’ll try to formalise this a bit more. I’ll call this number the message. $A_M$ is the message $A$ knows, $B_M$ is the message $B$ knows. $A$ and $B$ arrange that they each know some message and arrange that they wish to verify if $A_M = B_M$. If $A_M = B_M$ then $A$ and $B$ must each know that that is the case and if $A_M \ne B_M$ then $A$ and $B$ must also know (after conducting the verification) that this is the case, but do not wish to let the other one know what their message was. $A$ and $B$ meet letting $A$ be the entity that begins the verification. Each message is first encoded into a binary representation using an agreed upon method. $A$ then tells $B$ what the 1st bit of $A_M$ is (denoted $A_M \left [1\right ]$). $B$ now verifies this bit with $B_M$. If $A_M \left [1\right ] = B_M \left [1\right ]$, $B$ tells $A$ the second bit of $B_M$. If $A_M \left [1\right ] \ne B_M \left [1\right ]$, $B$ randomly selects a bit (ie. randomly selects either 0 or 1) and tells $A$ that random bit instead, and flags that $A_M \ne B_M$. As soon as either $A$ or $B$ flags that $A_M \ne B_M$ they subsequently always report a random bit regardless of whether the last bit reported to them was correct or not. We could use an end of message token to indicate the end of the message. Of course this method isn’t perfect because if one’s random stream of bits matches what the other expects then one thinks that $A_M = B_M$ but the other thinks that $A_M \ne B_M$. Another problem is if both parties have determined that $A_M \ne B_M$ then when do they stop sending random bits to each other? If both parties are happy to reveal the length of their message then there is no problem. Otherwise both parties can keep sending random bits until they feel that the the message space they have opened up is large enough and they don’t mind revealing that the length of their message is less than the bit number they are up too. Here’s an example. A’s number is 0110. B’s number is 0110 and they want to check if they share the same number. A -> B: 0 (B expects this) B -> A: 1 (A expects this) A -> B: 1 (B expects this) B -> A: 0 (A expects this) A -> B: $(B expects this) (not needed if they first agree on revealing the message length) Another case A knows 0110, B knows 0010. A -> B: 0 (B expects this) B -> A: 0 (A does not expect this, so A concludes A_M != B_M, and subsequently sends randomness) A -> B: Rand(0,1) (two cases) A sent 0 (B does not expect this, so B also concludes A_M != B_M, and subsequently sends randomness) ... continues until the end of M or until one party stops sending randomness. A sent 1 (B expects this, but A hasn't revealed anything as they made a random selection) B -> A: 0 (A doesn't know if B is sending randomness or not) if they agreed upon a message length, (A knows that A_M != B_M, but B thinks that A_M == B_M) (but A has only revealed 1 bit of A_M to B (because B doesn't know if A was sending A_M or randomness after the 1st bit), and B hasn't revealed anything of B_M to A (because A doesn't know if B was sending randomness))# (the probability of this happening is z) or, no message length agreed upon, A keeps sending randomness and B will detect this (because B is expecting the end of stream token and didn't get it), so they both know that A_M != B_M. This is not very formal and I’m confident I’ve missed some details or left some a bit fuzzy, I only really wanted to explain the general concept. # To be honest I’m not so sure if this is correct. Rather than me going around in circles unable to solve the math, and just abandoning this post, I’ll just leave it be and post with this uncertainty. Categories: mathematics Tags: ## A Maths Problem: Transformed Horizontal Lines January 7, 2010 2 comments This is the kind of post that I originally envisioned that I would post about when I started this blog. But after trying to complete this post I realised why I don’t do this very much, because I can’t always solve the problems I come up with. Anyway… You can generate a funky kind of grid by taking a Cartesian coordinate system, joining lines from $(0, t)$ to $(t, 0)$ for some values t. Here are some examples, If you draw lots of lines you get something like, This is also what you get if you take a bunch of horizontal lines from $x = 0$ to $x = 1$ (where the horizontal lines are equally spaced above each other), and take all the endpoints from the line $x = 1$ and rotate them $90^\circ$ about the point $1, 0$. The thing I was interested in was as you draw more and more of these lines it looks like a curve emerges on the boundary. I imagined that if you drew infinitely many lines like these you would get a nice smooth curve. I want to know what is the formula for that curve. But as I started to try to work it out, it didn’t seem so simple. I tried a lot of approaches, none of which seemed to work. So after a few initial set backs I tried to take a parametric approach taking t to be a value between 0 and 1 where this t indicates the line with start point $(0, t)$. The point on the curve for this t is some point on that line. I tried to get that point via the intersection with the next line, ie. the point on this line that is also on the curve is between the intersection of that line and the line for $t + \phi$ and $t - \phi$ for some really small $\phi$. But when I tried this approach as you make $\phi$ zero, then we get infinitely many points of intersection. That didn’t work so easy but then I realised that if the point is on this line then (although I have not proved this but it seems obvious from the picture) that I have the gradient. So all those lines as shown above have equation $y = \frac{-t}{\left ( 1 - t \right )}x + t$. (Except for t = 1 where we’ll just use a y value of 1). We can use this same t to define a point on the curve (which I call $f$ from here on) parametrically. So I assumed that the gradient of $f$ is given as $f'(t) = \frac{-t}{1-t}$. But now I’m not so sure that I have enough rigour here. But then I got stuck again. I can try to go some integrals but this won’t work because you don’t know the relation between increasing t and the length along the curved you have moved. As you could have two different parametric functions which both have the same derivative function (ignoring the +c constant that disappears when you differentiate), just knowing the function defining the derivative of $f$ parametrically won’t tell me the equation of the original curve. Moving on I now tried to calculate the area under the curve. I could partition it like how a Riemann integral is done. We can easily calculate the area of any of these trapezoids (bounded by red). $A = \frac{x_n - x_{(n-1)}}{2}(y_{x_n} + y_{x_{(n-1)}})$. We can get the x values by finding the point of intersection of the 2 lines that intersect at that x (and have the largest y value if there are several points of intersection for that x). Each line for some value t will have a point of intersection of the line before and after it (based on the t value). When I say the area of t = some number, I mean the area of the trapezoid starting with the intersection of the previous t line and ending with the intersection of the next t line. So the area of t = 1 is zero (because x0 and x1 are the same). The diagram above has $\phi = 0.125$. So, Point A is the intersection of $y = \frac{-t}{\left ( 1-t \right )}x + t$ and $y = \frac{- \left ( t + \phi \right )}{1-\left ( t + \phi\right )}x + t + \phi$, which is, $x_A = (1-t)(1-t-\phi)$ $y_A = t(t+\phi)$ Point B is the intersection of $y = \frac{-t}{\left ( 1-t \right )}x + t$ and $y = \frac{- \left ( t - \phi \right )}{1-\left ( t - \phi\right )}x + t - \phi$, which is, $x_B = (1-t)(1-t+\phi)$ $y_B = t(t-\phi)$ So the area of this trapezoid is $\frac{x_B - x_A}{2}(y_A + y_B)$, which is $2t^2\phi(1-t)$ But then I got stuck here. I can compute a value for the approximate area. phi = 0.0001; area = 0; for (t = 1; t > 0; t -= phi) { area += 2*t*t*phi*(1-t); } print area; Which gives a value very close to 1/6, and if I integrate that area equation for t = 0..1 you get $\frac{1}{6}\phi$. But I don’t want the area, I want the formula that defines the area from x = 0 to some value x so that I can then differentiate this to get the equation of the original curve. So this is where I give up, and leave this for another day. If you work it out please post in the comments! Oh and there is some rough code I wrote to make those images here. And a nice animation too. Categories: mathematics Tags: ## MATH1081 and MATH1231 Cheat Sheets March 19, 2009 Leave a comment Just cross posting my cheat sheets from MATH1081 and MATH1231. PDF and LaTeX source (WordPress.com won’t allow .tex uploads) are provided. These are from 08s2, at UNSW. I’ve released them into the public domain, if that is not legally possibly in Australia then lobby your parliament representative. math1081.pdf math1231-alg.pdf math1231-calc.pdf ## MATH1081 LaTeX Source %This work is hereby released into the Public Domain. To view a copy of the public domain dedication, visit http://creativecommons.org/licenses/publicdomain/ or send a letter to Creative Commons, 171 Second Street, Suite 300, San Francisco, California, 94105, USA. \documentclass[a4paper,10pt]{article} \usepackage{verbatim} \usepackage{amsmath} \usepackage{amssymb} \setlength\parindent{0mm} \usepackage{fullpage} \usepackage{array} \usepackage[all]{xy} \usepackage[pdftex, pdfauthor={Andrew Harvey}, pdftitle={MATH1081 Cheat sheet 2008}, pdfsubject={}, pdfkeywords={}]{hyperref} \begin{document} \section*{Enumeration} \subsection*{Counting Methods} Let$\#(n)$denote the number of ways of doing$n$things. Then, $$\#(A\; \mbox{and}\; B) = \#(A) \times \#(B)$$ $$\#(A\; \mbox{or}\; B) = \#(A) + \#(B)$$ ($n$items,$r$choices)\\ Ordered selection with repetition,$n^r$.\\ Ordered selection without repetition,$P(n,r) = \frac{n!}{(n-r)!}$.\\ Unordered selection without repetition,$C(n,r) = \frac{P(n,r)}{r!}$.\\$|A \cup B| = |A| + |B| - |A \cap B|$\\ Ordered selection with repeition; WOOLLOOMOOLOO problem.\\ Unordered selection with repetition; dots and lines, $$\binom{n+r-1}{n-1}$$ Pigeonhole principle. If you have n holes and more than n objects, then there must be at least 1 hole with more than 1 object. \section*{Recurrences} \subsection*{Formal Languages}$\lambda$represents the \textit{empty word}.$w$is just a variable (it is not part of the language) \subsection*{First Order Homogeneous Case} The recurrence,\\ \begin{center}$a_n = ra_{n-1}$with$a_0=A$\\\end{center} has solution\\ $$a_n=Ar^n.$$ \subsection*{Second Order Recurrences} $$a_n + pa_{n-1} + qa_{n-2} = 0$$ has characteristic, $$r^2 + pr + q = 0$$ If$\alpha$and$\beta$are the solutions to the characteristic equation, and if they are real and$\alpha \ne \beta$then, $$a_n = A\alpha^n + B\beta^n.$$ If$\alpha = \beta$then, $$a_n = A\alpha^n + Bn\beta^n.$$ \subsection*{Guesses for a particular solution} \begin{tabular}{c|c}%{m{width} | m{width}} LHS & Guess \\ \hline$3$& c \\$3n$&$cn + d$\\$3\times 2^n$&$c2^n$\\$3n2^2$&$(cn+d)2^n$\\$(-3)^n$&$c(-3)^n$\\ \end{tabular} \end{document} ## MATH1231-ALG LaTeX Source %This work is hereby released into the Public Domain. To view a copy of the public domain dedication, visit http://creativecommons.org/licenses/publicdomain/ or send a letter to Creative Commons, 171 Second Street, Suite 300, San Francisco, California, 94105, USA. \documentclass[a4paper,10pt]{article} \usepackage{verbatim} \usepackage{amsmath} \usepackage{amssymb} \setlength\parindent{0mm} \usepackage{fullpage} \usepackage[pdftex, pdfauthor={Andrew Harvey}, pdftitle={MATH1231 Algebra Cheat sheet 2008}, pdfsubject={}, pdfkeywords={}, pdfstartview=FitB]{hyperref} \begin{document} \section*{Vector Spaces} \subsection*{Vector Spaces} Vector Space - 10 rules to satisfy, including \begin{small}\textit{(Where$V$is a vector space.)}\end{small} \begin{itemize} \item Closure under addition. If$\mathbf{u}, \mathbf{v} \in V$then$\mathbf{u} + \mathbf{v} \in V$\item Existence of zero.$\mathbf{0} \in V$\item Closure under scalar multiplication. If$\mathbf{u} \in V$then$\lambda \mathbf{u} \in V$, where$\lambda \in \mathbb{R}$\end{itemize} \subsection*{Subspaces} Subspace Theorem:\\ A subset$S$of a vectorspace$V$is a subspace if: \begin{enumerate} \renewcommand{\labelenumi}{\roman{enumi})} \item$S$contains the zero vector. \item If$\mathbf{u}, \mathbf{v} \in S$then$\mathbf{u} + \mathbf{v} \in S$and$\lambda \mathbf{u} \in S$for all scalars$\lambda$. \end{enumerate} \subsection*{Column Space} The column space of a matrix$A$is defined as the span of the columns of$A$, written$\mbox{col}(A)$. \subsection*{Linear Independence} Let$S = \{\mathbf{v_1}, \mathbf{v_2}, \dots, \mathbf{v_n}\}$be a set of vectors. \begin{enumerate} \renewcommand{\labelenumi}{\roman{enumi})} \item If we can find scalars$\alpha_1 + \alpha_2 + \dots + \alpha_n$not all zero such that \begin{center}$\alpha_1\mathbf{v_1} + \alpha_2\mathbf{v_2} + \dots + \alpha_n\mathbf{v_n} = 0$\end{center} then we call$S$a linearly dependent set and that the vectors in$S$are linearly dependent. \item If the only solution of \begin{center}$\alpha_1\mathbf{v_1} + \alpha_2\mathbf{v_2} + \dots + \alpha_n\mathbf{v_n} = 0$\end{center} is$\alpha_1 = \alpha_2 = \dots = \alpha_n = 0$then$S$is called a linearly independent set and that the vectors in$S$are linearly independent. \end{enumerate} \subsection*{Basis} A set$B$of vectors in a vectorspace$V$is called a basis if \begin{enumerate} \renewcommand{\labelenumi}{\roman{enumi})} \item$B$is linearly independent, and \item$V = \mbox{span}(B)$. \end{enumerate} An orthonormal basis is formed where all the basis vectors are unit length and are mutually orthogonal (the dot product of any two is zero). \subsection*{Dimension} The dimension of a vectorspace$V$, written dim($V$) is the number of basis vectors. \section*{Linear Transformations} \subsection*{Linear Map} A function$T$which maps from a vectorspace$V$to a vectorspace$W$is said to be linear if, for all vectors$\mathbf{u}, \mathbf{v} \in V$and for any scalar$\lambda$, \begin{enumerate} \renewcommand{\labelenumi}{\roman{enumi})} \item$T(\mathbf{u} + \mathbf{v}) = T(\mathbf{u}) + T(\mathbf{v})$, \item$T(\lambda\mathbf{u}) = \lambda T(\mathbf{u})$. \end{enumerate} The columns of the transformation matrix are simply the images of the standard basis vectors. \subsection*{The Kernel}$\mbox{im}(T) = \mbox{col}(A)$\\ The kernel of a linear map$T : V \to W$, written ker($T$), consists of the set of vectors$\mathbf{v} \in V$such that$T(\mathbf{v}) = \mathbf{0}$.\\ If A is the matrix representation of a linear map$T$, then the kernel of$A$is the solution set of$A\mathbf{x} = \mathbf{0}$. \subsection*{Rank-Nullity} The dimension of the image of a linear map$T$is called the rank of$T$, written rank($T$). (Maximum number of linearly independent columns of A)\\ The dimension of the kernel of a linear map$T$is called the nullity of$T$, written nullity($T$). (Number of parameters in the solution set of$A\mathbf{x}=\mathbf{0}$)\\ If$T$is a linear map from$V$to$W$then $$\mbox{rank}(T) + \mbox{nullity}(T) = \mbox{dim}(V)$$ \section*{Eigenvectors and Eigenvalues} If$A$is a square matrix,$\mathbf{v} \ne 0$and$\lambda$is a scalar such that, $$A\mathbf{v} = \lambda\mathbf{v}$$ then$\mathbf{v}$is an eigenvector of$A$with eigenvalue$\lambda$.\\ \begin{comment} \begin{equation*} A\mathbf{v} = \lambda\mathbf{v} \implies (A-\lambda I)\mathbf{v} = \mathbf{0} A-\lambda I \mbox{has no inverse (otherwise } \mathbf{v} = \mathbf{0} \mbox{)} \mbox{so set det}(A-\lambda I) = 0 \mbox{this finds the eigenvectors} \mbox{Also since} (A-\lambda I)\mathbf{v} = \mathbf{0} \mathbf{v} \in \mbox{ker}(A-\lambda I) \mbox{this gives eigenvectors.} \end{equation*} \end{comment} Eigenvalues: Set$\mbox{det}(A-\lambda I) = 0$and solve for$\lambda.$\\ Eigenvectors: For each value of$\lambda$find the kernel of$(A-\lambda I)$. \subsection*{Diagonalisation} If$A$has$n$(independent) eigenvectors then put,\\$P = (\mathbf{v_1}|\mathbf{v_2}|...|\mathbf{v_n})$(eigenvectors$\mathbf{v}$)\\ and\\$D = \begin{pmatrix} \lambda_1 & & 0 \\ & \ddots & \\ 0 & & \lambda_n \end{pmatrix} $(eigenvalues$\lambda$)\\ \\ so then\\$A^k = PD^kP^{-1}$, for each non-negative integer$k$.\\ \\ Remember that when$A = \bigl( \begin{smallmatrix} a&b\\ c&d \end{smallmatrix} \bigr)$,$A^{-1} = \frac{1}{det(A)} \begin{pmatrix} d & -b\\ -c & a\end{pmatrix}$\subsection*{Systems of Differential Equations} The system$\begin{cases} \frac{dx}{dt} = 4x + y\\ \frac{dy}{dt} = x + 4y \end{cases}$can be written$\mathbf{x}'(t) = \begin{pmatrix} 4 & 1\\1 & 4\end{pmatrix}\mathbf{x}(t)$where$\mathbf{x}(t) = \binom{x(t)}{y(t)}.$We can guess the solution to be$\mathbf{x}(t) = \alpha \mathbf{v} e^{\lambda t}$(and add for all the eigenvalues). Where$\mathbf{v} $and$ \lambda$are the eigenvectors and eigenvalues respectively. \section*{Probability and Statistics} \subsection*{Probability} Two events$A$and$B$are mutually exclusive if$A \cap B = \emptyset$. $$P(A^c) = 1 - P(A)$$ $$P(A \cup B) = P(A) + P(B) - P(A \cap B)$$ \subsection*{Independence} Two events$A$and$B$are physically independent of each other if the probability that one of them occurs is not influenced by the occurrence or non occurrence of the other. These two events are statistically independent if, $$P(A \cap B) = P(A).P(B).$$ \subsection*{Conditional Probability} Probability of$A$given$B$is given by, $$P(A|B) = \frac{P(A \cap B)}{P(B)}$$ \subsection*{Bayes Rule} \subsection*{Discrete Random Variables} $$p_k = P(X=k) \qquad \mbox{(} \{p_k\}\mbox{ is the probability distribution)}$$ where,$X$is a discrete random variable, and$P(X=k)$is the probability that$X=k$.\\ For$\{p_k\}$to be a probability distribution, \begin{enumerate} \renewcommand{\labelenumi}{\roman{enumi})} \item$p_k \ge 0$for$k = 0, 1, 2, \dots$\item$p_0 + p_1 + \dots = 1$\end{enumerate} \subsection*{Mean and Variance}$E(X)$denotes the mean or expected value of X.\\ %$$E(X) = \sum_{i=1}^{k} p_i x_i \qquad$$ $$E(X) = \sum_{\mbox{all }k} kp_k$$ $$\mbox{Var}(X) = E(X^2) - E(X)^2 \qquad \mbox{where } E(X^2) = \sum_{\mbox{all } k} k^2 p_k$$ \subsection*{Binomial Distribution} If we perform a binomial experiment (i.e. 2 outcomes)$n$times, and each time there is a probability$p$of success then, $$P(X=k) = \binom{n}{k} p^k (1-p)^{n-k},\qquad \mbox{for } 0\le k \le n \mbox{ and 0 otherwise}.$$ \subsection*{Geometric Distribution} $$P(X=k) = p(1-p)^{k-1}, \;\; k = 1, 2, \dots$$ \end{document} ## MATH1231-CACL LaTeX Source %This work is hereby released into the Public Domain. To view a copy of the public domain dedication, visit http://creativecommons.org/licenses/publicdomain/ or send a letter to Creative Commons, 171 Second Street, Suite 300, San Francisco, California, 94105, USA. \documentclass[a4paper,10pt]{article} \usepackage{verbatim} \usepackage{amsmath} \usepackage{amssymb} \setlength\parindent{0mm} \usepackage{fullpage} \usepackage[all]{xy} \usepackage[pdftex, pdfauthor={Andrew Harvey}, pdftitle={MATH1231 Calculus Cheat sheet 2008}, pdfsubject={}, pdfkeywords={}, pdfstartview=FitB]{hyperref} \begin{document} \section*{Functions of Several Variables} \subsection*{Sketching} \begin{itemize} \item Level Curves \item Sections \end{itemize} \subsection*{Partial Derivatives} $$\frac{\partial^2 f}{\partial y \partial x} = \frac{\partial^2 f}{\partial x \partial y}$$ on every open set on which$f$and the partials,\\ $$\frac{\partial f}{x}, \frac{\partial f}{y}, \frac{\partial^2 f}{\partial y \partial x}, \frac{\partial^2 f}{\partial x \partial y}$$ are continuous. \subsection*{Normal Vector} $$\mathbf{n} = \pm \begin{pmatrix} \frac{\partial f(x_0, y_0)}{\partial x}\\ \frac{\partial f(x_0, y_0)}{\partial y}\\ -1\\ \end{pmatrix}$$ \subsection*{Error Estimation} $$f(x_0 + \Delta x, y_0 + \Delta y) \approx f(x_0, y_0) + \left[ \frac{\partial f}{\partial x} (x_0, y_0)\right] \Delta x + \left[ \frac{\partial f}{\partial y} (x_0, y_0)\right] \Delta y$$ \subsection*{Chain Rules} Example,$z = f(x,y)\ \mbox{with}\ x = x(t), y = y(t)$\begin{displaymath} \xymatrix{ & f \ar[dl] \ar[dr] & \\ x \ar[d] & & y \ar[d]\\ t & & t} \end{displaymath} $$\frac{df}{dt} = \frac{\partial f}{\partial x} \cdot \frac{dx}{dt} + \frac{\partial f}{\partial y} \cdot \frac{dy}{dt}$$ \section*{Integration} \subsection*{Integration by Parts} Integrate the chain rule, $$u(x)v(x) = \int u'(x)v(x)\; dx + \int v'(x)u(x)\; dx$$ \subsection*{Integration of Trig Functions} For$\int \sin^2 x\; dx$and$\int \cos^2 x\; dx$remember that,\\ $$\cos 2x = \cos^2 x - \sin^2 x$$ %$$\sin 2x = 2\sin x \cos x$$ Integrals of the form$\int \cos^m x \sin^n x\; dx$, when$m$or$n$are odd, you can factorise using$\cos^2 x + \sin^x = 1$and then using, $$\int \sin^k x \cos x\; dx = \frac{1}{k+1} \sin^{k+1} x + C$$ $$\int \cos^k x \sin x\; dx = \frac{-1}{k+1} \cos^{k+1} x + C$$ \subsection*{Reduction Formulae} ... \subsection*{Partial Fractions} Example, assume $$\frac{2x-1}{(x+3)(x+2)^2} = \frac{A}{x+3} + \frac{Bx + C}{(x+2)^2}$$ \begin{comment} \begin{enumerate} \renewcommand{\labelenumi}{\roman{enumi})} \item$\frac{2x-1}{x+2} = A + \frac{B(x+3)}{x+2}$subs$x=-3 \to A=7$\item$\frac{2x-1}{x+3} = \frac{A(x+2)}{x+3} + B$subs$x=-2 \to B=-5$\end{enumerate} \end{comment} Now multiply both sides by$(x+2)(x+3)$and equate coefficients. \section*{ODE's} \subsection*{Separable ODE} Separate then integrate. \subsection*{Linear ODE} The ODE: $$\frac{dy}{dx} + f(x)y=g(x)$$ has solution, $$y(x) = \frac{1}{u(x)}\left [ \int u(x)g(x)\ dx + C \right]$$ where, $$u(x) := e^{\int f(x)\ dx}$$ \subsection*{Exact ODE} The ODE: $$\frac{dy}{dx} = -\frac{M(x,y)}{N(x,y)}$$ or as, $$M(x,y)dx + N(x,y)dy = 0$$ is exact when, $$\frac{\partial M}{\partial y} = \frac{\partial N}{\partial x}$$ Assume solution is of the form, $$F(x,y) = c$$ with, $$M = \frac{\partial F}{\partial x} \qquad N = \frac{\partial F}{\partial y}$$ Integrate to find two equations equal to$F(x,y)$, then compare and find solution from assumed form. \subsection*{Second Order ODE's} The ODE: $$ay'' + ay' + by = f(x)$$ For the homogeneous case ($f(x) = 0$)\\ the characteristic equation will be$a\lambda^2 + b\lambda + c = 0$If the characteristic equation has,\\ Two Distinct Real roots, (replace the$\lambda$'s with the solutions to the characteristic eqn.) $$y = Ae^{\lambda x} + Be^{\lambda x}$$ Repeated Real roots, $$y = Ae^{\lambda x} + Bxe^{\lambda x}$$ Complex roots, $$y = e^{\alpha x}(A\cos \beta x + B\sin \beta x)$$ where, $$\lambda = \alpha \pm \beta i$$ For the For the homogeneous case, $$y = y_h + y_p$$ $$y = \mbox{solution to homogeneous case} + \mbox{particular solution}$$ Guess something that is in the same form as the RHS.\\ If$f(x) = P(x)\cos ax \mbox{(or sin)}$a guess for$y_p$is$Q_1(x)\cos ax + Q_2(x)\sin ax$\section*{Taylor Series} \subsection*{Taylor Polynomials} For a differentiable function$f$the Taylor polynomial of order$n$at$x=a$is, $$P_n(x) = f(a) + f'(a)(x-a) + \frac{f''(a)}{2!}(x-a)^2 + \dots + \frac{f^{\left( n\right) }(a)}{n!}(x-a)^n$$ \subsection*{Taylor's Theorem} $$f(x) = f(a) + f'(a)(x-a) + \frac{f''(a)}{2!}(x-a)^2 + \dots + \frac{f^{\left( n\right) }(a)}{n!}(x-a)^n + R_n(x)$$ where,\\ $$R_n(x) = \frac{f^{\left( n+1\right) }(c)}{(n+1)!}(x-a)^{n+1}$$ \subsection*{Sequences} $$\lim_{x \to \infty} f(x) = L \implies \lim_{n \to \infty}a_n = L$$ essentially says that when evaluating limits functions and sequences are identical. A sequence diverges when$\displaystyle \lim_{n \to \infty}a_n = \pm \infty$or$\displaystyle \lim_{n \to \infty}a_n$does not exist. \subsection*{Infinite Series} \subsubsection*{Telscoping Series} Most of the terms cancel out. \subsubsection*{$n$-th term test (shows divergence)}$\displaystyle \sum_{n=1}^{\infty} a_n$diverges if$\displaystyle \lim_{n \to \infty}{a_n}$fails to exist or is non-zero. \subsubsection*{Integral Test} Draw a picture. Use when you can easily find the integral. \subsubsection*{$p$- series} The infinite series$\displaystyle \sum_{n=1}^{\infty} \frac{1}{n^p}$converges if$p>1$and diverges otherwise. \subsubsection*{Comparison Test} Compare to a p-series. \subsubsection*{Limit form of Comparison Test} Look at$\displaystyle \lim_{n \to \infty}{\frac{a_n}{b_n}}\;$where$b_n$is usually a p-series.\\ If$=c>0$, then$\sum a_n$and$\sum b_n$both converge or both diverge.\\ If$=0$and$\sum b_n$converges, then$\sum a_n$converges.\\ If$=\infty$and$\sum b_n$diverges, then$\sum a_n$diverges. \subsubsection*{Ratio Test} $$\lim_{n \to \infty} \frac{a_{n+1}}{a_n} = \rho$$ The series converges if$\rho < 1$,\\ the series diverges if$\rho > 1$or$\rho$is infinite,\\ and the test is inconclusive if$\rho = 1$. \subsubsection*{Alternating Series Test} The series, $$\sum_{n=1}^{\infty} (-1)^{n+1} u_n = u_1 - u_2 + u_3 - u_4 + \dots$$ converges if, \begin{enumerate} \item The$u_n$'s are all$>0$, \item$u_n \ge u_{n+1}$for all$n \ge N$for some integer$N$, and \item$u_n \rightarrow 0$. \end{enumerate} \subsubsection*{Absolute Convergence} If$\displaystyle \sum_{n=1}^{\infty} |a_n|$converges, then$\displaystyle \sum_{n=1}^{\infty} a_n$converges. \subsection*{Taylor Series} Taylor Polynomials consist of adding a finite number of things together, whereas Taylor Series is an infinite sum.\\ The Maclaurin series is the Taylor series at$x=0$. \subsection*{Power Series} \section*{More Calculus} \subsection*{Average Value of a Function} $$\frac{\displaystyle \int_a^b f(x)\ dx}{b-a}$$ \subsection*{Arc Length} \begin{center} Arc length over$\displaystyle[a,b] = \int_a^b \sqrt{1+f'(x)^2}\ dx\$\\ \end{center} $$s = \int_a^b \sqrt{x'(t)^2 + y'(t)^2}\ dt \qquad \mbox{(parametric)}$$ \subsection*{Speed} $$\frac{ds}{dt} = \sqrt{x'(t)^2 + y'(t)^2}$$ \subsection*{Surface Area of Revolution} $$2\pi \int_a^b f(x) \sqrt{1+f'(x)^2}\ dx$$ $$2\pi \int_a^b y(t) \sqrt{x'(t)^2 + y'(t)^2}\ dt \qquad \mbox{(parametric)}$$ \end{document} Categories: mathematics, unswcourse Tags: , ## A Response to Terence Tao’s “An airport-inspired puzzle” December 12, 2008 1 comment In Terence Tao’s latest post he poses three questions. Here are my solutions. Suppose you are trying to get from one end A of a terminal to the other end B.  (For simplicity, assume the terminal is a one-dimensional line segment.)  Some portions of the terminal have moving walkways (in both directions); other portions do not.  Your walking speed is a constant v, but while on a walkway, it is boosted by the speed u of the walkway for a net speed of v+u.  (Obviously, one would only take those walkway that are going in the direction one wishes to travel in.)  Your objective is to get from A to B in the shortest time possible. 1. Suppose you need to pause for some period of time, say to tie your shoe.  Is it more efficient to do so while on a walkway, or off the walkway?  Assume the period of time required is the same in both cases. 2. Suppose you have a limited amount of energy available to run and increase your speed to a higher quantity v’ (or v’+u, if you are on a walkway).  Is it more efficient to run while on a walkway, or off the walkway?  Assume that the energy expenditure is the same in both cases. 3. Do the answers to the above questions change if one takes into account the effects of special relativity?  (This is of course an academic question rather than a practical one.) Source: Terence Tao, http://terrytao.wordpress.com/2008/12/09/an-airport-inspired-puzzle/ ## Q1. After just thinking about it without any mathematics I was not to sure so I used a mathematical approach. The first thing I did was to draw a diagram, Admittedly, I did simplify the problem in my diagram, however I am confident that this will not affect the final answer. (How do I prove this? I don’t know.) Along with this diagram I also had to define some things in terms of variables. As shown in the diagram, A is the starting point, B is the ending point, C is an arbitrary point in between which separates the escalator section from the non-escalator sections. Let, t = time it takes to tie shoe lace v = walking speed u = escalator speed $T_{ac}$ = time it takes to get from A to C $T_{cb}$ = time it takes to get from C to B $T_{ab}$ = time it takes to get from A to B We also know, $speed = \frac {distance}{time}$. Now lets consider two scenarios. Scenario A, the person ties their shoe lace in the non-escalator section. Scenario B, the person ties their shoe lace in the escalator section. ### Scenario A: $T_{ac}=T_{ab}+T_{bc}=\left (t+\frac{d_1}{v}\right )+\left (\frac{d_2}{v+u}\right )$ ### Scenario B: $T_{ac}=T_{ab} + T_{bc}=\left ( \frac{d_1}{v}\right )+T_{bc}$ Now let $d_3 = \mbox{distance traveled in} d_2 \mbox{while the person is tieing their shoe lace.}\\\ \ =vt$ $\therefore \mbox{walking on escalator time for time } t_2 = \frac {d_2 - vt}{v+u}$ I shall now make some reasonable assumptions (also formalising things a bit more), All variables are real, and we shall assume that the person has time to tie their shoe lace while on the escalator. I.e. $t \le \frac{d_2}{u}$ I shall denote $T_A$ to be $T_{ac}$ from scenario A and $T_B$ to be $T_{ac}$ from scenario B. Now to see which is larger $T_A$ or $T_B$ we can examine the sign of $T_A - T_B$. If it is positive then $T_A > T_B$, if it is negative then $T_A < T_B$. By some algebra $T_A - T_B = \frac{vt}{v+u}$ and as $v, u, t > 0$$T_A - T_B > 0$. Hence $T_B < T_A$. Therefore it would be more efficient pause for a moment while on an escalator walkway.\ ## Q2. I will take a similar approach for Q2, examining the two cases and then comparing the resultant time. (I’ll re-edit the post when I get around to working out the solution) ## The Mathematics Behind Graphical Drawing Projections in Technical Drawing In the field of technical drawing, projection methods such as isometric, orthogonal, perspective are used to project three dimensional objects onto a two dimensional plane so that three dimensional objects can be viewed on paper or a computer screen. In this article I examine the different methods of projection and their mathematical roots (in an applied sense). The approach that seems to be used by Technical Drawing syllabuses in NSW to draw simple 3D objects in 2D is almost entirely graphical. I don’t think you can say this is a bad thing because you don’t always want or need to know the mathematics behind the process, you just want to be able to draw without thinking about this. However to have an appreciation of what’s really happening the mathematical understanding is a great thing to learn. Many 3D CAD/CAM packages available on the market today (such as AutoCAD, Inventor, Solidworks, CATIA, Rhinoceros) can generate isometric, three point perspective or orthogonal drawings from 3D geometry, however from what I’ve seen they can’t seem do other projections such as dimetric, trimetric, oblique, planometric, one and two point perspective. Admittedly I don’t think these projections are any use or even needed, but when your at high school and you have to show that you know how do to oblique, et al. it can be a problem when the software cannot do it for you from your 3D model. (So I actually wrote a small piece of software to help with this in this article). But to do so, I needed to understand the mathematics behind these graphical projections. So I will try to explain that here. The key idea is to think of everything having coordinates in a coordinate system (I will use the Cartesian system for simplicity). We can then express all these projections as mathematical transformations or maps. Like a function, you feed in the 3D point, and then you get out the projected 2D point. Things get a bit arbitrary here because an isometric view is essentially exactly the same as a front view. So we keep to the convention that when we assign the axis of the coordinate system we try to keep the three planes of the axis parallel to the three main planes of the object. The three "main" planes of the object are placed parallel to the three planes of the axis. This is how we will choose our axis in relation to the object. We will not do this though, We will not choose it like this... ...or this. In fact doing something like that shown just above with the object rotated is how we get projections like isometric. Now what we do is take the coordinates of each point and “transform” them to get the projected coordinates, and join these points with lines where they originally were. However we can only do this for some kinds of projections, indeed for all the ones I have mentioned in this post this will do but only because these projections have a special property. They are linear maps (affine maps also hold this property and are a superset of the set of linear maps) which means that straight lines in 3D project to straight lines in 2D. For curves we can just project a lot of points on the curve (subdivide it) and then join them up after they are projected. It all depends what our purpose is and if we are applying it practically. We can generate equations of the projected curves if we know the equation of the original curve but it won’t always be as simple. For example circles in 3D under isometric projection become ellipses on the projection plane. Going back to the process of the projection, we can use matrices to represent these projections where $\begin{pmatrix}x'\\ y'\\ z'\end{pmatrix} = \begin{pmatrix}a&b&c\\ d&e&f\\ g&h&i\end{pmatrix}\begin{pmatrix}x\\ y\\ z\end{pmatrix}$ is the same as, $x' = ax+by+cz\\ y' = dx+ey+fz\\ z' = gx+hy+iz.$ We call the 3 by 3 matrix above as the matrix of the projection. Knowing all this, we can easily define orthogonal projection as you just take two of the dimensions and cull the third. So for say an orthographic top view the projection matrix is simply, $\begin{pmatrix}1&0&0\\ 0&1&0\\ 0&0&0\end{pmatrix}.$ Now we want a projection matrix for isometric. One way would be to do the appropriate rotations on the object then do an orthographic projection, we can get the projection matrix by multiplying the matrices for the rotations and orthographic projection together. However I will not detail that here. Instead I will show you another method that I used to describe most of the projections that I learnt from high school (almost all except perspectives). I can describe them as well as many “custom” projections in terms of what the three projected axis look like on the projection plane. I described them all in terms of a scale on each of the three axis, as well as the angle two of the axis make with the projection plane’s horizontal. Projection attributes described in terms of the projected axis. Using this approach we can think of the problem back in a graphical perspective of what the final projected drawing will look like rather than looking at the mathematics of how the object gets rotated prior to taking an orthographic projection or what angle do the projection lines need to be at in relation to the projection plane to get oblique, etc. Note also that the x, y, z in the above diagram are the scales of the x, y, z axis respectively. So we can see in the table below that we can now describe these projections in terms of a graphical approach that I was first taught. Projection α (alpha) β (beta) Sx Sy Sz Isometric 30° 30° 1 1 1 Cabinet Oblique 45° 0° 0.5 1 1 Cavalier Oblique 45° 0° 1 1 1 Planometric 45° 45° 1 1 1 Now all we need is a projection matrix that takes in alpha, beta and the three axes scale’s and does the correct transformation to give the projection. The matrix is, $\begin{bmatrix}x'\\y'\\z'\\1\end{bmatrix}=\begin{bmatrix}S_x\cos\alpha&-S_y\cos\beta&0&0\\ S_x\sin\alpha&S_y\sin\beta&S_z&0\\ 0&0&0&0\\ 0&0&0&1\end{bmatrix}\begin{bmatrix}x\\y\\z\\1\end{bmatrix}$ Now for the derivation. First we pick a 3D Cartesian coordinate system to work with. I choose the Z-up Left Hand Coordinate System, shown below and we imagine a rectangular prism in the 3D coordinate system. Block in 3D coordinate system. Now we imagine what it would look like in a 2D coordinate system using isometric projection. Block in 2D coordinate system (isometric). As the alpha and beta angles (shown below) can change, and therefore not limited to a specific projection, we need to use alpha and beta in the derivation. Now using these simple trig equations below we can deduce the following. All the points on the xz plane have y = 0. Therefore the x’ and y’ values on the 2D plane will follow the trig property shown above, so: $x'=x\cos\alpha$ $y' = z + y\sin\alpha$ However not all the points lie on the xz plane, y is not always equal to zero. By visualising a point with a fixed x and z value but growing larger in y value, its x’ will become lower, and y’ will become larger. The extent of the x’ and y’ growth can again be expressed with the trig property shown, and this value can be added in the respective sense to obtain the final combined x’ and y’ (separately). $x'=x\cos\alpha -y\cos\beta$ $y' = z + x \sin \alpha + y \sin \beta$ If y is in the negative direction then the sign will automatically change accordingly. The next step is to incorporate the scaling of the axes. This was done by replacing the x, y & z with a the scale factor as a multiple of the x, y & z. Hence, $x'=S_x x\cos\alpha -S_y y\cos\beta$ $y' = S_z z + S_x x\sin\alpha + S_y y \sin \beta$ This can now easily be transferred into matrix form as shown at the start of this derivation or left as is. References: Harvey, A. (2007). Industrial Technology – Graphics Industries 2007 HSC Major Project Management Folio. (Link) Categories: graphics, mathematics ## An Introduction to Hypercubes. October 21, 2008 1 comment Point, Line Segment, Square, Cube. But what comes next, what is the equivalent object in higher dimensions? Well it is called a hypercube or n-cube, although the 4-cube has the special name tesseract. ## Construction Methods Before I go on to explain about the elements of hypercubes, let me show you some pictures of some hypercubes. I guess this also raises the question how can you construct these objects. One method is to start with a point. Then stretch it out in one dimension to get a line segment. Then take this line and stretch it out in another dimension perpendicular to the previous one, to get a square. Then take that square and stretch it out in another dimension perpendicular to the previous two to get a cube. This is when your visualisation may hit a wall. Its very hard to then visualise taking this cube and stretching it in another dimension perpendicular to the previous three. However mathematically, this is easy and this is one approach to constructing hypercubes. We place a point in R3. ...and then stretch the point in one dimension to make a line... ...and then we stretch that line in a direction perpendicular to the previous time... ...and finally stretch that plane in a direction perpendicular the the previous two times. However there is more mathematical and analytical method. You most probably know that these n-cubes have certain elements to them, namely vertices (points), edges (lines), faces (planes), and then in the next dimension up, cells and then in general n-faces. These elements are summed up nicely here. Firstly we take a field of say $\mathbb{R}^n$. Next we construct the vertices of the n-cube. Basically we are taking all the n dimensional vectors which have all the combinations of 0’s and 1’s for each entry of the vector. More mathematically, There is a vertex described by each vector $\begin{pmatrix}a_1\\ a_2\\ \vdots\\ a_n\end{pmatrix}$ where $a_i \in \{0, 1\}.$ There is an edge between vertices $\begin{pmatrix}a_1\\ a_2\\ \vdots\\ a_n\end{pmatrix}$ and $\begin{pmatrix}b_1\\ b_2\\ \vdots\\ b_n\end{pmatrix}$ if and only if $a_j \ne b_j$ for exactly one $j \in \{1, \dots, n\}$. $\qquad \qquad \vdots$ There is an m-face between (or though) vertices $\begin{pmatrix}a_1\\ a_2\\ \vdots\\ a_n\end{pmatrix}$ and $\begin{pmatrix}b_1\\ b_2\\ \vdots\\ b_n\end{pmatrix}$ and … and $\begin{pmatrix}m_1\\ m_2\\ \vdots\\ m_n\end{pmatrix}$ if and only if $a_j \ne b_j \ne \dots \ne m_j$ for exactly $(m - 1), \;\; j \in \{1, \dots, n\}$. Basically this means we list the vertices just as if were were counting in base 2. And then we can group these vertices into different groups based on the n-face level and (if we think of the vertices of a bit string) how many bits we have to change to make two vertices bit streams the same. This approach is very interesting because the concept of grouping these vertices relates strongly to hypergraphs. Another way to think about it is as follows. Edges, from the set of all edges (i.e. joining each vertex with every other vertex), are the ones that are perpendicular to one of the standard basis vectors. This generalises to n-faces; from the set of all n-faces (i.e. all ways of grouping vertices into groups of n) are those that the object constructed is parallel to the span of any set of n of the standard basis vectors. When you think about it, a lot of things that you can say about the square or cube generalise. For instance you can think of a square being surrounded by 4 lines, and cube by 6 surfaces, a tesseract by 8 cells, etc. ## Visualisation Methods Now that we have some idea how to describe and build n-cubes, the next question is how do we draw them. There are numerous methods and I can’t explain them all in this post (such as slicing and stereographic projection, as well as other forms of projection (I’ll leave these for another blog article)). But another question is also what aspects do we draw and how do we highlight them. For instance it may seem trivial in two dimensions to ask do I place a dot at each vertex and use just 4 solid lines for the edges. But in higher dimensions we have to think about how do we show the different cells and n-faces. Firstly, how can we draw or project these n dimensional objects in a lower dimensional world (ultimately we want to see them in 2D or 3D as this is the only space we can draw in). This first method is basically the exact same approach that most people would have first learnt back in primary school. Although, I do not think it makes the most sense or makes visualisation easiest. Basically this method is just the take a dot and perform a series of stretches on it that I described earlier, although most people wouldn’t think this is what they were doing. Nor would we usually start with a dot, we would normally start with the square. Although we will, so we start with this. 0-cube. We would now draw a line along some axis from that dot, and place another dot at the end of this line. 1-cube, showing vertices and edges. Now from each of the dots we have, we would draw another line along some other axis and again draw a dot at the end of each of those two lines. We would then connect the newly formed dots. 2-cube, showing vertices and edges. Now, we just keep repeating this process where by each time we are drawing another dimension. So we take each of these four dots and draw lines from them in the direction of another axis, placing a dot at the end of each of these lines, and joining each of the dots that came from other dots that were adjacent, with a line. 3-cube, showing vertices and edges. Now for 4D and beyond we basically keep the process going, just choosing really anywhere from the new axis, so long as it passes though the origin. 4-cube, showing vertices and edges. If we do a little bit of work we can see that this map is given by the matrix, $\begin{pmatrix}1&0&r_1\cos \theta&-r_2\cos\phi\\ 0&1&r_1\sin\theta&r_2\sin\phi \\ 0&0&0&0 \\ 0&0&0&0 \end{pmatrix}$ where $\theta$ is the angle of the projected z axis from the x axis, and $\phi$ is the angle of the projected w axis from the negative x axis. Also r1 and r2 are the scales of the third and fourth respective receding axis (it makes it “look” more realistic when we use a number less than 1) This is just an extension of oblique projection for 3D to 2D. Now this method seems very primitive, and a much better approach is to use all the dimensions we have. We live in a three dimensional world, so why just constrict our drawings to two dimensions! Basically, an alternate approach to draw an n-cube in three dimensional space would be to draw n lines all passing though a single point. Although it is not necessary to make all these lines as spread out as possible, we will try to. (This actually presents another interesting idea of how do we equally distribute n points on a sphere. For instance we can try to make it so that all the angles between any two of the points and the origin are equal. But I will leave this for another blog article later.) We then treat each of these lines as one dimension from there we can easily draw, or at least represent an n-dimensional point in 3D space. Now obviously we can have two different points in 4D that map to the same 3D point, but that is always going to happen no matter what map we use. The following set of 4 vectors are the projected axis we will use as a basis. $\left \{ \mathbf{e_1}, \mathbf{e_2}, \mathbf{e_3}, \mathbf{e_4} \right \} = \left \{ \begin{pmatrix}1\\1\\1 \end{pmatrix}, \begin{pmatrix}-1\\-1\\1 \end{pmatrix}, \begin{pmatrix}-1\\1\\-1 \end{pmatrix}, \begin{pmatrix}1\\-1\\-1 \end{pmatrix} \right \}$ Now I won’t say how I got these (actually I took them from Wikipedia, they are just the vertices of a 3-simplex) but all of the vectors share a common angle between any two and the origin. Now if we draw in our tesseract, highlighting the cells with different colours (not this became problematic with some faces and edges as they are a common boundary for two different faces, so you cannot really make them one colour or the other) we get something like this, Tesseract projected onto R3. The cells are shown in different colours, the purple lines show the four axis. The projection matrix for this projection is then simply (from the vectors that each of the standard basis maps to), $\begin{pmatrix}1&-1&-1&1\\ 1&-1&1&-1\\ 1&1&-1&-1 \end{pmatrix}$ Now if we compare this to our original drawing (note I’m not talking about the projection used, but rather the presentation of the drawings, i.e. the colour.) I think you will see that the second one is clearer and try’s to show where the cells and faces are, not just the vertices and edges. Note also the second one is in 3D so you can rotate around it. Looking at the first one though, you will notice it doesn’t show where the faces or cells are. Remember that we have more than just vertices, edges and faces. We have cells, and n-faces. These are essentially just different groupings of the vertices. But how can we show these. Now the most mathematical way would be to just list all the different groupings. This is okay, but I like to see things in a visual sense. So another way would just show different elements. Like you draw all the vertices on one overhead, edges on another, and so on. Then when you put all these overheads on top of each other we get the full image, but we can also look at just one at a time to see things more clearly. This would be particularly more useful for the higher dimensional objects and higher dimensional elements. We can also use different colours to show the different elements. For example in the square, we can see that the line around surrounding it is 4 lines, but in higher dimensions its not so easy, so we can colour the different parts to the element differently. (When I say part I mean the 4 edges of a square are 4 different parts. Whereas the edges are all one element, but are a different element to the vertices.) ## Some Interesting Properties Once you start defining hypercubes there are many interesting properties that we can investigate. For this section lets just assume that we have the standard hypercube of side length 1. Now we can trivially see that the area, volume, etc. for the respective hypercube will always be 1. As described above each time we add another dimension and sweep the object out into that dimension we effectively multiply this hypervolume by 1. So for an n-cube, the hypervolume of it will be $1^n$. When I say hypervolume I mean the one that makes sense for that dimension. E.g. in 2D, area, in 3D, volume, and so on. The next obvious question to ask is what is the perimeter, surface area, cell volume, …, n-face hypervolume of the respective n-cube? It gets a little confusing as you have to think about what exactly you are finding. Is it a length, an area, a volume? Well it will just be an (n – 1) volume. Eg. in 2D we are finding a length (the perimeter), in 3D, an area (surface area), and so on so that each time we increase the dimension of the n-cube we increase the units we are measuring in. Well if we just start listing the sequence (starting with a square), 4, 6… we notice this is just the number of (n – 1) degree elements. Namely, the number of edge, faces, cells, etc. This leads me in the obvious question of how can I calculate the number of m-elements of the n-cube? Well instead of me just going to the formula, which you can find on Wikipedia anyway, I will go though my lines of thinking when I first tried to work this out. Number of vertices is easy, each component of the n-vector can be either a 0 or a 1. So for each component there is 2 possibilities, but we have n of them, so it is just 2x2x2… n times, or 2n. Now originally when I tried to work out the number of edges, I started listing them and saw that I could construct the recurrence… Although with the help of graph theory it is very simple. In graph theory the handshaking theorem says $\displaystyle 2\left |E \right| = \sum_{v \in V} \mbox{deg}(v).$ Where $\left | E \right |$ means the number of edges, and $\mbox{deg}(V)$ means the degree of vertex V, which means the number of edges connected two it. Now if we think of an edge being a group of two vertices where you only make one entry of the vector change to get from one vector to the other, then we can see that there are exactly n way of doing this. We can either change the 1st entry of the vector, or the 2nd, or the …., or the nth. Thus each vertex has of the n-cube graph will have degree n. So as we have 2n vertices and each vertex has degree n, then the sum of the vertex degrees will be n2n. Hence by the handshaking theorem, $|E| = \frac{1}{2} n 2^n = n 2^{n-1}.$ I am not exactly sure how to generalise this further. I will leave it for another article. However, the formula is $2^{n-m} \binom{n}{m}.$ (I shall try to write more at a later date.) References: Categories: mathematics
15,592
54,794
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 1, "img_math": 122, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.6875
4
CC-MAIN-2023-23
latest
en
0.941586
https://www.aqua-calc.com/calculate/volume-to-weight/substance/liquid-blank-barium
1,643,432,954,000,000,000
text/html
crawl-data/CC-MAIN-2022-05/segments/1642320299927.25/warc/CC-MAIN-20220129032406-20220129062406-00583.warc.gz
668,300,842
7,617
# Weight of Liquid barium ## liquid barium: convert volume to weight ### Weight of 1 cubic centimeter of liquid barium carat 16.69 ounce 0.12 gram 3.34 pound 0.01 kilogram 0 tonne 3.34 × 10-6 milligram 3 338 ### The entered volume of liquid barium in various units of volume centimeter³ 1 milliliter 1 foot³ 3.53 × 10-5 oil barrel 6.29 × 10-6 Imperial gallon 0 US cup 0 inch³ 0.06 US fluid ounce 0.03 liter 0 US gallon 0 meter³ 1 × 10-6 US pint 0 metric cup 0 US quart 0 metric tablespoon 0.07 US tablespoon 0.07 metric teaspoon 0.2 US teaspoon 0.2 • For instance, calculate how many ounces, pounds, milligrams, grams, kilograms or tonnes of a selected substance in a liter, gallon, fluid ounce, cubic centimeter or in a cubic inch. This page computes weight of the substance per given volume, and answers the question: How much the substance weighs per volume. #### Foods, Nutrients and Calories ASSORTED FINE CHOCOLATES, ASSORTED, UPC: 077260042156 contain(s) 478 calories per 100 grams (≈3.53 ounces)  [ price ] Brassicasterol in Oil, corn #### Gravels, Substances and Oils Gravel, Regular weighs 1 346 kg/m³ (84.02803 lb/ft³) with specific gravity of 1.346 relative to pure water.  Calculate how much of this gravel is required to attain a specific depth in a cylindricalquarter cylindrical  or in a rectangular shaped aquarium or pond  [ weight to volume | volume to weight | price ] white phosphorus vapor [P] weighs 4 420 kg/m³ (275.93159 lb/ft³)  [ weight to volume | volume to weight | price | mole to volume and weight | mass and molar concentration | density ] Volume to weightweight to volume and cost conversions for Refrigerant R-434A, liquid (R434A) with temperature in the range of -40°C (-40°F) to 60°C (140°F) #### Weights and Measurements The slug per US tablespoon density measurement unit is used to measure volume in US tablespoons in order to estimate weight or mass in slugs Torque can be defined as a turning or twisting action of the force F upon an object. Gm/min² to hm/s² conversion table, Gm/min² to hm/s² unit converter or convert between all units of acceleration measurement. #### Calculators Calculate volume of a dam and its surface area
599
2,192
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.921875
3
CC-MAIN-2022-05
latest
en
0.685786
http://mathhelpforum.com/differential-geometry/113415-lebesgue-integral-print.html
1,529,634,027,000,000,000
text/html
crawl-data/CC-MAIN-2018-26/segments/1529267864337.41/warc/CC-MAIN-20180622010629-20180622030629-00360.warc.gz
207,136,299
2,549
Lebesgue Integral Let $\displaystyle f(x)= \dfrac {1}{\sqrt{|x|}}$ when $\displaystyle |x|>1$ and $\displaystyle f(x)=1$ when $\displaystyle |x| \le 1$.Show that f is Lebesgue integrable on $\displaystyle \mathbb{R}$. On $\displaystyle |x| \le 1$,since $\displaystyle f(x)=1$,so $\displaystyle \int_{[-1,1]} f dm < \infty$ but when $\displaystyle |x|>1$,I don't think it is integrable as when $\displaystyle x>1$ , $\displaystyle \int_{(1,\infty)} f dm > \int_{[2,n]} f dm$ But $\displaystyle \int_{[2,n]} f dm = 2\sqrt{n} -2\sqrt{2}$,then as $\displaystyle n\rightarrow \infty , \int_{[2,n]} f dm \rightarrow \infty$ and this implies that $\displaystyle f$ in not integrable on $\displaystyle (1,\infty)$.
250
707
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.515625
4
CC-MAIN-2018-26
latest
en
0.529003
https://www.david-cook.org/what-is-the-unit-of-1-cm/
1,696,397,474,000,000,000
text/html
crawl-data/CC-MAIN-2023-40/segments/1695233511361.38/warc/CC-MAIN-20231004052258-20231004082258-00457.warc.gz
765,742,209
10,220
# What is the unit of 1 cm? ## What is the unit of 1 cm? Equivalence to other units of length 1 centimetre = 10 millimetres = 0.01 metres = 0.393700787401574803149606299212598425196850 inches (There are exactly 2.54 centimetres in one inch.) ## Does one unit mean 1 cm? A centimeter is a unit of length in the International System of Units (SI), the current form of the metric system. It is defined as 1/100 meters. It is denoted as cm. How many units make a inch? The inch (symbol: in or ″) is a unit of length in the British imperial and the United States customary systems of measurement. It is equal to 136 yard or 112 of a foot…. Inch 1 in in … … is equal to … Imperial/US units 136 yd or 112 ft Metric (SI) units 25.4 mm ### How do you convert measurement units? There are just two simple steps: 1. Find a conversion factor between the given units and the desired units, and write it as an equation. 2. Convert that equation to a fraction with the desired units on top and the given units on the bottom. ### How much is 1 unit in a scale? A unit scale is a ratio. It compares the dimensions of an actual object to the dimensions of a scale drawing or model. For example, 1 inch on a map of your town might actually represent a distance of 100 feet. We could write this ratio like this \begin{align*}\frac{1 \ inch} {100 \ ft}\end{align*}. What is the meaning of 2cm to 1 unit? It could denote and represent one unit on both axes and coordinates of the vertices are meeting at same line. This represents one unit on the t-axis across the paper and 2cm to represent in one unit. #### How do you convert units in math? To convert a smaller unit to a larger unit (eg to ), divide it by the number of smaller units which are needed to make larger unit. To convert from a larger unit to a smaller one, multiply. To convert from a smaller unit to a larger one, divide. How is scale measured in units? To tell whether the unit is measuring length, mass, or volume, you look at the base. Using this table as a reference, you can see the following: A kilogram is 1,000 times larger than one gram (so 1 kilogram = 1,000 grams). A centimeter is 100 times smaller than one meter (so 1 meter = 100 centimeters).
573
2,222
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
4
4
CC-MAIN-2023-40
latest
en
0.900858
https://sustainability.stackexchange.com/questions/10040/which-global-fertility-rate-would-allow-the-human-population-to-adjust-to-earth
1,656,858,234,000,000,000
text/html
crawl-data/CC-MAIN-2022-27/segments/1656104244535.68/warc/CC-MAIN-20220703134535-20220703164535-00720.warc.gz
597,056,400
65,359
Which global fertility rate would allow the human population to adjust to Earth's carrying capacity while maintaining an acceptable dependency ratio? Humans currently consume the ecological resources of 1.75 Earths (https://www.footprintnetwork.org/). Assuming that global average consumption remains stable (less in the global North and more in the global South) this would imply that the human population would have to be 43% lower and that a fertility rate of 2.1 is unsustainable. But at the same time fertility rates under 1.0 seem to lead to socio-economic dislocations including potentially unsustainable dependency ratios as larger older generations have to be supported by smaller younger generations. Which fertility rate would allow the human population to adjust to Earth's carrying capacity as soon as possible while minimising socio-economic dislocations? • Wealth leads to a decline in reproduction rates, visible e.g. in the demographic pyramids of e.g. Germany or the UK. So the question should rather be "how much do we need to reduce our consumption rate". – Erik May 6, 2020 at 8:50 • @Erik: I’m trying to understand what the basic global aggregate fertility rate would need to be primarily to get a sense of magnitude. I think the dynamics of how to get to this rate would be a separate question, probably centred on the problem of decoupling: how to increase (or maintain) well-being sufficiently for birth rates to decrease (or remain low) while simultaneously lowering the consumption of natural resources. May 7, 2020 at 12:14 • You need to population to drop to about 60 %, so when you know the time frame, you can calculate this quite easily. Within one generation it'd be a fertility rate of 0.6, the longer you stretch you time frame, the higher the rate may be. Still, it'd couldn't be 1.0 or above. – Erik May 7, 2020 at 12:30 • If consumption is held stable I understand that the global fertility rate would have to drop to 1 / 1.75 (EF) = 0.57 x 2 (the fertility rate measures number of children per woman) = 1.14 for the population to shrink into the ecological footprint within a generation. Only a few industrialised territories are at or below this level but it is generally not considered sustainable socio-economically and it is usually compensated with immigration of working age people to increase the dependency ratio. On a global aggregate level immigration would of course not be possible. May 7, 2020 at 14:42 1 Answer I think this is question is difficult to answer for several reasons: 1. There is no agreed upon carrying capacity of the Earth. The Global Footprint Network organization does a nice job estimating the Earth's capacity in terms of land usage (biocapacity), but their ecological footprint is a very rough estimate and is not suitable for decision-making. For example, in their calculations the biocapacity of farmland is higher than that of forests, so according to this method you can increase the Earth's biocapacity by chopping down all forests and turning them into farmland! 2. The question assumes that there is an optimum where we can decrease human population and minimize socio-economic dislocations. I think it's far more likely that the quicker you want to achieve this, the bigger the socio-economic effects will be, so to me it seems more of a question how much problems are you willing to accept. 3. The question assumes that the impact of consumption is constant, but this is very unrealistic. Many countries signed the Paris climate agreement and started implementing measures to reduce their greenhouse gas emissions. Also many countries are looking to change their environmental impact when it comes to plastic pollution and use of finite resources. Perhaps the best is to strive for a global fertility rate of just under 2.1. This will reduce the global population slowly (and thus also the footprint of mankind) and at the same time have little other side-effects. This is assuming all countries take their equal share in this reduction compared to current fertility rates.
854
4,063
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3
3
CC-MAIN-2022-27
longest
en
0.935119
https://www.scribd.com/document/324276328/Biologi-Form-4-Pat-k3-Docx
1,563,657,736,000,000,000
text/html
crawl-data/CC-MAIN-2019-30/segments/1563195526670.1/warc/CC-MAIN-20190720194009-20190720220009-00275.warc.gz
810,963,359
60,632
You are on page 1of 8 # SULIT 4551/3 PAPER 3 Answer all questions/ Jawab semua soalan 1. A group of students carried out an experiment to study the anaerobic respiration in yeast. Diagram 1.1 shows the set-up of the experiment. Sekumpulan pelajar menjalankan eksperiment untuk mengetahui respirasi anaerobik dalam yis . Delivery tube Delivery tube Thermometers Stoppers Boiling tubes Test tube A Test tube B Paraffin oil Lime water Lime water Set A Set B ## 5 cm3 yeast suspension + 5 cm3 boiled glucose solution ## 5 cm3 distilled water + 5 cm3 boiled glucose solution Diagram 1.1/Rajah1.1 Diagram 1.2 shows the observations made on the lime water. The thermometer readings and the observations on the lime water were taken at 0 minute and 30 minutes. Rajah 1.2 menunjukkan pemerhatian terhadap air kapur. Bacaan temometer dan pemerhatian ke atas air kapur diambil pada masa 0 minit dan 30 minit. Lime water (clear) ## Diagram 1.2/Rajah 1.2 36 Lime water (chalky) SULIT 4551/3 Table 1.1 shows the result of the experiment at 0 minute and 30 minutes. Time: 0 minute Set A Set B 40 40 30 30 20 20 .. Time: 30 minutes Set A Set B 40 40 30 30 20 20 37 SULIT 4551/3 For examiners use (a) State the thermometer readings in the spaces provided in Table 1.1. [3 marks] 1 (a) (b) (i) Based on Table 1.1, state two observations that can be made on Set A in the experiment. [3 marks] Berdasarkan jadual 1.1, nyatakan dua pemerhatian yang boleh dibuat pada set A dalam eksperimen. Observation 1 on Set A: Pemerhatian 1 dalam set A ....... Observation 2 on Set A: Pemerhatian 2 dalam set A. ........ 1 (b) (i) .... (ii) State the inference for each observation made in (b) (i). [3 marks] ## Inference for observation 1 on Set A: Inferen untuk pemerhatian 1 set A. ...... .. .. Inference for observation 2 on Set A: Inferen untuk pemerhatian 2 set A 1 (b) (i) ... ......... . 38 SULIT 4551/3 (c) Complete Table 1.2 to show the variables involved in the experiment and how the variables are operated. [3 marks] ini dan bagaimana pembolehubah ini dikawal. Variables ## Operating the variables Manipulated variable: ## How to handle the manipulated variable: .... ... .. Responding variable: ## How to determine the responding variable: .... ... .. Controlled variable: .... ... ... For examiners use 1 (c) ## (d) State the hypothesis for this experiment. Nyatakan hipotesis eksperimen ini. [3 marks] 39 1 (d) 1 (d) (c) SULIT 4551/3 (e) (i) Construct a table and record all the data collected in this experiment. Bina satu jadual dan rekodkan semua data yang dikumpul dalam eksperimen ini. Your table should have the following titles. Jadual anda hendaklah mengandungi tajuk-tajuk berikut For examiners use [3 marks] ## Solution in the boiling tube Temperature Changes of lime water 1 (e) (i) (ii) Explain the relationship between the boiled glucose solution and paraffin oil used in the experiment of the anaerobic respiration process in yeast. Terangkan hubungan antara larutan glukos yang dididihkan dengan minyak paraffin yang digunakan dalam eksperimen respirasi anaerobic dalam yis. ....... 1 (e) (ii) [3 marks] (f) Following are some of the apparatus and materials used in the experiment. Yeast suspension, boiled glucose solution, distilled water, thermometers, lime water, and paraffin oil Categorise the apparatus and materials according to their roles in operating the variables in the experiment. 40 1 (f) SULIT 4551/3 For examiners use [3 marks] Kategorikan radas dan bahan mengikut penggunaan pembolehubah dalam eksperimen ini. Manipulated variables Responding variables Constant Variables (g) Based on the experiment, define operationally anaerobic respiration. Berdasarkan eksperimen, berikan definasi secara operasi tentang respirasi anaerobic. 1 (g) [3 marks] (h) The experiment is repeated by using Set C containing 10 cm3 of yeast suspension and 10 cm3 of boiled glucose solution. Eksperimen ini diulang dengan menggunakan Set C yang mengandungi 10 cm 3 ampaian yis dan10 cm3 larutan glukosa yang dididihkan. ## (i) Predict the readings on the thermometer used in the experiment. Ramalkan bacaan pada thermometer dalam eksperimen ini. Terangkan jawapan anda. ........ .... [3 marks] (ii) Explain the time taken for the lime water to change. Terangkan masa yang diambil untuk perubahan air kapur. 41 1 (h) (i) SULIT 4551/3 .... 1 (h) (ii) .... [3 marks] 2. Aquatic plants carry out photosynthesis to obtain food. In the process, oxygen bubbles are released to the aquatic environment. The rate of photosynthesis is affected by several environmental factors such as temperature, light intensity and concentration of carbon dioxide. Tumbuhan akuatik melakukan fotosintesis untukmendapatkan makanan. Semasa proses tersebut, Kadar fotosintesis dipengaruhi oleh beberapa factor persekitaran seperti suhu, keamatan cahaya, dan kepekatan karbon dioksida. ## Design an experiment to study the effect of carbon dioxide concentration on the rate of photosynthesis. Rancangkan satu eksperimen untuk mengkaji kesan kepekatan karbon dioksida keatas kadar fotosintesis. (17 marks) The planning of your experiment must include the following aspects: Perancangan eksperimen kamu hendaklah termasuk aspek-aspek berikut: ## Problem statement / Pernyataan masalah Hypothesis / hipotesis Variables / pembolehubah List of apparatus and materials / Senarai radas dan bahan Experimental procedure / Kaedah eksperimen Presentation of data / penyampaian data 42 SULIT 4551/3 Disediakan Oleh Mohd Shafiq Husin Tutor 019 770 7095 END OF QUESTION PAPER 43
1,608
5,663
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.53125
3
CC-MAIN-2019-30
latest
en
0.435232
https://www.gradesaver.com/textbooks/math/precalculus/precalculus-6th-edition/chapter-r-review-of-basic-concepts-test-page-85/4
1,575,556,499,000,000,000
text/html
crawl-data/CC-MAIN-2019-51/segments/1575540481076.11/warc/CC-MAIN-20191205141605-20191205165605-00536.warc.gz
724,022,951
12,220
## Precalculus (6th Edition) $(A' \cup C) \cap B'=\color{blue}{\left\{6, 7, 8\right\}}$ $A'$ is the set that contains the element/s of the universal set $U$ that are not elements of $A$. Thus, $A' = \left\{7, 8\right\}$ $A' \cup C$ is the set that contains the combined elements of $A'$ nd $C$. Thus, $A'\cup C=\left\{1, 6, 7, 8\right\}$ $B'$ is the set that contains the elements of the universal set $U$ that are not elements of $B$. Thus, $B' = \left\{2, 4, 6, 7, 8\right\}$ $(A' \cup C) \cap B'$ is the set the contains elements that are common to both $(A' \cup C)$ and $B'$. Note that the elements common to both sets are: 6, 7, 8 Therefore, $(A' \cup C) \cap B'=\color{blue}{\left\{6, 7, 8\right\}}$
272
707
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
4.375
4
CC-MAIN-2019-51
latest
en
0.838552
http://gmatclub.com/forum/to-install-cable-television-in-a-home-a-certain-cable-company-charges-115990.html?fl=similar
1,485,017,804,000,000,000
text/html
crawl-data/CC-MAIN-2017-04/segments/1484560281151.11/warc/CC-MAIN-20170116095121-00291-ip-10-171-10-70.ec2.internal.warc.gz
116,039,134
56,813
To install cable television in a home, a certain cable company charges : GMAT Data Sufficiency (DS) Check GMAT Club Decision Tracker for the Latest School Decision Releases http://gmatclub.com/AppTrack It is currently 21 Jan 2017, 08:56 ### GMAT Club Daily Prep #### Thank you for using the timer - this advanced tool can estimate your performance and suggest more practice questions. We have subscribed you to Daily Prep Questions via email. Customized for You we will pick new questions that match your level based on your Timer History Track every week, we’ll send you an estimated GMAT score based on your performance Practice Pays we will pick new questions that match your level based on your Timer History # Events & Promotions ###### Events & Promotions in June Open Detailed Calendar # To install cable television in a home, a certain cable company charges post reply Question banks Downloads My Bookmarks Reviews Important topics Author Message TAGS: ### Hide Tags Manager Status: I will not stop until i realise my goal which is my dream too Joined: 25 Feb 2010 Posts: 235 Schools: Johnson '15 Followers: 2 Kudos [?]: 50 [0], given: 16 To install cable television in a home, a certain cable company charges [#permalink] ### Show Tags 26 Jun 2011, 06:17 2 This post was BOOKMARKED 00:00 Difficulty: 45% (medium) Question Stats: 69% (01:47) correct 31% (01:04) wrong based on 51 sessions ### HideShow timer Statistics To install cable television in a home, a certain cable company charges a basic fee of $30 plus a fee of$20 for each cable outlet installed in the home. How much did the cable company charge the Horace family for installing cable television in their home? (1) The cable company installed three cable outlets in the Horace family home. (2) The amount that the cable company charged the Horace family for installing cable television in their home was equivalent to an average ( arithmetic mean) charge of the $30 per cable outlet installed. OPEN DISCUSSION OF THIS QUESTION IS HERE: to-instal-a-cable-television-in-a-home-a-certain-cable-comp-131751.html [Reveal] Spoiler: OA _________________ Regards, Harsha Note: Give me kudos if my approach is right , else help me understand where i am missing.. I want to bell the GMAT Cat Satyameva Jayate - Truth alone triumphs Senior Manager Joined: 03 Mar 2010 Posts: 440 Schools: Simon '16 (M) Followers: 5 Kudos [?]: 270 [1] , given: 22 Re: To install cable television in a home, a certain cable company charges [#permalink] ### Show Tags 26 Jun 2011, 11:50 1 This post received KUDOS Stmt1: the cable company installed three cable outlets in the Horace Family home. So 30 + 3(20)= 90. Sufficient, Stmt2: the amount that the cable company charged the Horace family for installing the Cable television in their home was equivalent to an average charge of$30 per cable installed. Let the cable company installed "x" cable outlets in the Horace Family home. So total= 30 +x(20) Average of total amount = 30+x(20) / x = 30 x=3 Total amount = 30 + 3(20) = 90 Sufficient, OA D. _________________ My dad once said to me: Son, nothing succeeds like success. GMAT Club Legend Joined: 09 Sep 2013 Posts: 13483 Followers: 576 Kudos [?]: 163 [0], given: 0 Re: To install cable television in a home, a certain cable company charges [#permalink] ### Show Tags 03 Jan 2015, 09:52 Hello from the GMAT Club BumpBot! Thanks to another GMAT Club member, I have just discovered this valuable topic, yet it had no discussion for over a year. I am now bumping it up - doing my job. I think you may find it valuable (esp those replies with Kudos). Want to see all other topics I dig out? Follow me (click follow button on profile). You will receive a summary of all topics I bump in your profile area as well as via email. _________________ Math Expert Joined: 02 Sep 2009 Posts: 36590 Followers: 7091 Kudos [?]: 93331 [0], given: 10557 Re: To install cable television in a home, a certain cable company charges [#permalink] ### Show Tags 05 Jan 2015, 03:24 To install cable television in a home, a certain cable company charges a basic fee of $30 plus a fee of$20 for each cable outlet installed in the home. How much did the cable company charge the Horace family for installing cable television in their home? (1) The cable company installed three cable outlets in the Horace family home. Total cost=$30+$20*3. Sufficient. (2) The amount that the cable company charged the Horace family for installing cable television in their home was equivalent to an average (arithmetic mean) charge of the $30 per cable outlet installed. Say the number of outlets is x, then Total cost=$30+$20*x and the average is ($30+$20*x)/x --> ($30+\$20*x)/x=30. We can solve for x. Sufficient. OPEN DISCUSSION OF THIS QUESTION IS HERE: to-instal-a-cable-television-in-a-home-a-certain-cable-comp-131751.html _________________ Re: To install cable television in a home, a certain cable company charges   [#permalink] 05 Jan 2015, 03:24 Similar topics Replies Last post Similar Topics: 2 At a certain hotel, every room has air conditioning or cable TV 3 20 Sep 2016, 12:54 1 An Internet supplier has 4 international cables. Each cable contains 4 3 19 Sep 2016, 04:31 3 For a certain car repair, the total charge consisted of a charge for p 4 21 Oct 2015, 20:39 12 For each order, a certain company charges a delivery fee d 10 09 Dec 2012, 23:33 6 To instal a cable television in a home, a certain cable comp 7 02 May 2012, 00:01 Display posts from previous: Sort by # To install cable television in a home, a certain cable company charges post reply Question banks Downloads My Bookmarks Reviews Important topics Powered by phpBB © phpBB Group and phpBB SEO Kindly note that the GMAT® test is a registered trademark of the Graduate Management Admission Council®, and this site has neither been reviewed nor endorsed by GMAC®.
1,510
5,902
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.765625
4
CC-MAIN-2017-04
latest
en
0.898866
https://mattermodeling.stackexchange.com/questions/6310/visualize-electron-density-using-pyscf?noredirect=1
1,709,637,317,000,000,000
text/html
crawl-data/CC-MAIN-2024-10/segments/1707948234904.99/warc/CC-MAIN-20240305092259-20240305122259-00663.warc.gz
404,602,297
27,829
# Visualize electron density using pyscf I found the following example of code which uses density functional theory to compute the electron density $$\rho$$: #!/usr/bin/env python # # Author: Qiming Sun <[email protected]> # import numpy from pyscf import lib from pyscf.dft import numint, gen_grid """ Gaussian cube file format """ def density(mol, outfile, dm, nx=80, ny=80, nz=80): coord = mol.atom_coords() box = numpy.max(coord, axis=0) - numpy.min(coord, axis=0) + 4 boxorig = numpy.min(coord, axis=0) - 2 xs = numpy.arange(nx) * (box[0] / nx) ys = numpy.arange(ny) * (box[1] / ny) zs = numpy.arange(nz) * (box[2] / nz) coords = lib.cartesian_prod([xs, ys, zs]) coords = numpy.asarray(coords, order="C") - (-boxorig) nao = mol.nao_nr() ngrids = nx * ny * nz blksize = min(200, ngrids) rho = numpy.empty(ngrids) for ip0, ip1 in gen_grid.prange(0, ngrids, blksize): ao = numint.eval_ao(mol, coords[ip0:ip1]) rho[ip0:ip1] = numint.eval_rho(mol, ao, dm) rho = rho.reshape(nx, ny, nz) with open(outfile, "w") as f: f.write("Density in real space\n") f.write("Comment line\n") f.write("]" % mol.natm) f.write(" .8f .8f .8f\n" % tuple(boxorig.tolist())) f.write("] .8f .8f .8f\n" % (nx, xs[1], 0, 0)) f.write("] .8f .8f .8f\n" % (ny, 0, ys[1], 0)) f.write("] .8f .8f .8f\n" % (nz, 0, 0, zs[1])) for ia in range(mol.natm): chg = mol.atom_charge(ia) f.write("%5d %f" % (chg, chg)) f.write(" .8f .8f .8f\n" % tuple(coord[ia])) fmt = " .8e" * nz + "\n" for ix in range(nx): for iy in range(ny): f.write(fmt % tuple(rho[ix, iy].tolist())) if __name__ == "__main__": from pyscf import gto, scf from pyscf.tools import cubegen mol = gto.M(atom="H 0 0 0; H 0 0 1") mf = scf.RHF(mol) mf.scf() cubegen.density(mol, "h2.cube", mf.make_rdm1()) I wonder however about the nature of this output rho. So if I was to visualize this electron density how would I plot it? Is anyone familiar with this package? It seems to be an array with three columns $$\rho$$, each of length 80. But what does one point then correspond to? • Welcome to the site! Just to clarify, is your question about how to the plot the density (ie how to do this in some programming language) or how to interpret the Cube file format? – Tyberius Jul 4, 2021 at 17:42 • @Tyberius yes, I wonder if there is a nice way to visualize this density in python. Jul 4, 2021 at 17:44 • Jul 6, 2021 at 15:15 • Multiwfn will visualize electron density as well as perform a bunch of other calculations Jul 11, 2021 at 23:11 This is just the Gaussian cube format. It's essentially a voxel dump of the wave function evaluated on a grid. You can find some documentation at http://paulbourke.net/dataformats/cube/ and https://h5cube-spec.readthedocs.io/en/latest/cubeformat.html . Most electronic structure programs are able to generate Gaussian cube files. Several molecular viewers can also do so. For instance, I think Avogadro, Jmol and IQmol all support visualizing from Gaussian cube format. Cube files can be parsed into a 3D scalar grid (i.e. a numpy array) directly. These can then by visualized by applying the marching cubes approach (scikit-image has an implementation) to generate a mesh, and this mesh can be shown in interactive 3d using meshplot. For example, to parse cube files, in the past I've used: def parse_cube(filename): #from: https://github.com/psi4/psi4numpy/blob/6ed03e715689ec82bf96fbb23c1855fbe7835b90/Tutorials/14_Visualization/vizualize.ipynb """ Parses a cube file, returning a dict of the information contained. The cubefile itself is stored in a numpy array. """ with open(filename) as fp: results = {} # skip over the title natoms = int(origin[0]) results['minx'] = minx = float(origin[1]) results['miny'] = miny = float(origin[2]) results['minz'] = minz = float(origin[3]) numx = int(infox[0]) incx = float(infox[1]) results['incx'] = incx results['numx'] = numx results['maxx'] = minx + incx * numx numy = int(infoy[0]) incy = float(infoy[2]) results['incy'] = incy results['numy'] = numy results['maxy'] = miny + incy * numy numz = int(infoz[0]) incz = float(infoz[3]) results['incz'] = incz results['numz'] = numz results['maxz'] = minz + incz * numz atnums = [] coords = [] for atom in range(natoms): atnums.append(int(coordinfo[0])) coords.append(list(map(float, coordinfo[2:]))) results['atom_numbers'] = np.array(atnums) results['atom_coords'] = np.array(coords) data = np.array([ float(entry) for line in fp for entry in line.split() ]) if len(data) != numx*numy*numz: raise Exception("Amount of parsed data is inconsistent with header in Cube file!") results['data'] = data.reshape((numx,numy,numz)) return results Now load up a cube file: cube = parse_cube('ESP.cube') print(cube['data'].min(), cube['data'].max()) I print the min/max because I want to know some range within which to set the cutoff for marching cubes. Here the cutoff is 0.07, and the spacing (i.e. gridsize) is 0.3 angstroms. Note that the spacing is hardcoded into the cube file - one would normally set it when calculating the ESP. from skimage import measure vert, faces, norm, values= measure.marching_cubes(cube['data'], 0.07, spacing=(0.3,0.3,0.3)) finally, draw this mesh with meshplot: import meshplot as mp vert = vert mp.plot(vert, faces) And the final result looks a bit like this: scikitimage: https://scikit-image.org/
1,630
5,326
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 2, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.890625
3
CC-MAIN-2024-10
latest
en
0.506382
https://www.coursehero.com/file/6590904/1-3-Evaluating-Limits-Analytically/
1,527,399,076,000,000,000
text/html
crawl-data/CC-MAIN-2018-22/segments/1526794868003.97/warc/CC-MAIN-20180527044401-20180527064401-00187.warc.gz
720,015,669
211,190
{[ promptMessage ]} Bookmark it {[ promptMessage ]} 1-3 Evaluating Limits Analytically 1-3 Evaluating Limits Analytically - Evaluating Limits... This preview shows pages 1–8. Sign up to view the full content. Evaluating Limits Analytically Section 1.3 This preview has intentionally blurred sections. Sign up to view the full version. View Full Document Thm. 1.1 Some Basic Limits Let a and c be real numbers and let n be a positive. b b c x = lim c x c x = lim n n c x c x = lim Thm. 1.2 Properties of Limits Let b and c be real #s, let n be a positive integer, and let f and g be functions with the following limits. 1. Scalar Multiple: L x f c x = ) ( lim K x g c x = ) ( lim . 1 [ ] bL x f b c x = ) ( lim This preview has intentionally blurred sections. Sign up to view the full version. View Full Document 2. Sum or Difference: 3. Product: 4. Quotient: [ ] K L x g x f c x ± = ± ) ( ) ( lim [ ] LK x g x f c x = ) ( ) ( lim 0 , ) ( ) ( lim = K K L x g x f c x 5. Power: [ ] n n c x L x f = ) ( lim This preview has intentionally blurred sections. Sign up to view the full version. View Full Document Thm. 1.3 Limits of Polynomial and Rational Functions If p is a polynomial function and c is a real number, then If r is a rational function given by r(x) = p(x)/q(x) and q(c) 0 Ex. 3 ) ( ) ( lim c p x p c x = ) ( ) ( ) ( ) ( lim c q c p c r x r c x = = Thm. 1.4 The Limit of a Function Involving a Radical Let n be a positvie integer. The following limit is valid for al c if n is odd, and is valid for c > 0 if n is even. This preview has intentionally blurred sections. Sign up to view the full version. View Full Document This is the end of the preview. Sign up to access the rest of the document. {[ snackBarMessage ]} What students are saying • As a current student on this bumpy collegiate pathway, I stumbled upon Course Hero, where I can find study resources for nearly all my courses, get online help from tutors 24/7, and even share my old projects, papers, and lecture notes with other students. Kiran Temple University Fox School of Business ‘17, Course Hero Intern • I cannot even describe how much Course Hero helped me this summer. It’s truly become something I can always rely on and help me. In the end, I was not only able to survive summer classes, but I was able to thrive thanks to Course Hero. Dana University of Pennsylvania ‘17, Course Hero Intern • The ability to access any university’s resources through Course Hero proved invaluable in my case. I was behind on Tulane coursework and actually used UCLA’s materials to help me move forward and get everything together on time. Jill Tulane University ‘16, Course Hero Intern
718
2,677
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
4.53125
5
CC-MAIN-2018-22
latest
en
0.831677
https://www.topperlearning.com/doubts-solutions/icse-class-7-mathematics/algebraic-expressions/operations-on-algebraic-expressions
1,652,770,571,000,000,000
text/html
crawl-data/CC-MAIN-2022-21/segments/1652662517018.29/warc/CC-MAIN-20220517063528-20220517093528-00571.warc.gz
1,233,559,996
62,175
Contact For Study plan details 10:00 AM to 7:00 PM IST all days. For Franchisee Enquiry OR or Thanks, You will receive a call shortly. Customer Support You are very important to us 9321924448 Mon to Sat - 10 AM to 7 PM Ask Operations On Algebraic Expressions question free × Queries asked on Sunday and after 7 pm from Monday to Saturday will be answered after 12 pm the next working day. • ICSE× • Class 7× • Mathematics× • Algebraic Expressions× • Operations On Algebraic Expressions× # Operations On Algebraic Expressions Free Doubts and Solutions ## where is fundamental concept of algebra chapter? please put that chapter also Asked by jyovi1981 20th July 2021, 12:16 PM × ## There are 10a+13a passengers in a train. If 4a-3b and 5b-2a passengers board at two consecutive station, what is the number of passengers in the train now Asked by akashsagar102460 29th March 2021, 8:55 AM × ## what is like and unlike term Asked by sumandubey 25th January 2021, 10:17 PM × ## iam want the answer of the main Asked by mksowmya8 25th September 2020, 8:46 PM × ## answer for this 4x from 8-x Asked by sowbhagyapranavthakshak 17th September 2020, 1:47 PM × × ## (x +3)/7 - 2(x-4) /3 =1  Answer=? Asked by kavyapatel7200.7sdatl 21st June 2020, 12:02 PM × ## Find the value of  1. 2x+3y, when x=3 and y=4 Asked by singhaprajita7144.7sdatl 27th May 2020, 8:16 PM × ## x-x÷4-1÷3=2+x÷4 Asked by dastina776 18th April 2020, 12:22 PM × ## 11x-2y-13z and 7x+8y-9y subtract Asked by Varinder0050 5th April 2020, 4:06 PM × ## evaluate:3p/4-4p/5 Asked by charymahesh15 5th December 2019, 7:01 PM × ## Subtract -5a2 -3a +1 from the sum of 4a2 +3 -8a and 9a -7 Asked by mohitchauhan 20th October 2019, 7:24 AM × ## ¾(2x-5) Asked by andrewglarix 24th August 2019, 7:23 AM × ## A2-b2 subtract from a2+b2 Asked by karuna6895 11th June 2019, 2:26 PM × ## The wages of 10 workers for a six days week are ₹ 1200. What are the one day wages :  ¡ ) of one worker ? ¡¡) of 4 workers ? Asked by garganimesh786 29th April 2019, 5:49 PM × ## 1ab 2ac 3ab 4ac 5ab 6ac 7ab 8ac 9ab ac ab 5xy-7xy 3fb -3ab = Asked by jayakumar.narasimhan 17th October 2017, 8:29 AM × ## 77a+22b-90a-= Asked by jayakumar.narasimhan 16th October 2017, 10:14 AM × ## 4y7y5y96=-09090909 Asked by jayakumar.narasimhan 19th September 2017, 5:28 PM × ## please send me all the 7 identities Asked by kanav2001 17th September 2017, 8:02 PM × ## If x =  , then the value of   is  0 3 4 5 Asked by chauhanayush706 1st June 2017, 7:16 AM × ## Please provide solution to this question Asked by Topperlearning User 16th September 2016, 3:27 PM × ## Please provide solution to this question Asked by Topperlearning User 16th September 2016, 3:22 PM
1,028
2,756
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.796875
3
CC-MAIN-2022-21
latest
en
0.934545
https://uk.mathworks.com/matlabcentral/cody/problems/2918-matlab-basics-ii-extract-last-3-elements-of-a-vector/solutions/1133469
1,579,697,186,000,000,000
text/html
crawl-data/CC-MAIN-2020-05/segments/1579250606975.49/warc/CC-MAIN-20200122101729-20200122130729-00499.warc.gz
719,581,592
15,536
Cody # Problem 2918. Matlab Basics II - Extract last 3 elements of a vector Solution 1133469 Submitted on 4 Mar 2017 by nag This solution is locked. To view this solution, you need to provide a solution of the same size or smaller. ### Test Suite Test Status Code Input and Output 1   Pass x = [1 2 3 4]; y_correct = [2 3 4]; assert(isequal(last_three(x),y_correct)) 2   Pass x = [5;2;8;6;7;2;3]; y_correct = [7;2;3]; assert(isequal(last_three(x),y_correct)) 3   Pass x = [1.2 3.6 2.5 2.7 2.8 8.6 9.2 2.7 3.7 -4.9]; y_correct = [2.7 3.7 -4.9]; assert(isequal(last_three(x),y_correct))
226
591
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.71875
3
CC-MAIN-2020-05
latest
en
0.632416
http://www.neverendingbooks.org/tag/games/page/2
1,566,165,669,000,000,000
text/html
crawl-data/CC-MAIN-2019-35/segments/1566027314130.7/warc/CC-MAIN-20190818205919-20190818231919-00527.warc.gz
295,008,086
18,909
Skip to content → # Tag: games Here’s a tiny problem illustrating our limited knowledge of finite fields : “Imagine an infinite queue of Knights ${ K_1,K_2,K_3,\ldots }$, waiting to be seated at the unit-circular table. The master of ceremony (that is, you) must give Knights $K_a$ and $K_b$ a place at an odd root of unity, say $\omega_a$ and $\omega_b$, such that the seat at the odd root of unity $\omega_a \times \omega_b$ must be given to the Knight $K_{a \otimes b}$, where $a \otimes b$ is the Nim-multiplication of $a$ and $b$. Which place would you offer to Knight $K_{16}$, or Knight $K_n$, or, if you’re into ordinals, Knight $K_{\omega}$?” What does this have to do with finite fields? Well, consider the simplest of all finite field $\mathbb{F}_2 = { 0,1 }$ and consider its algebraic closure $\overline{\mathbb{F}_2}$. Last year, we’ve run a series starting here, identifying the field $\overline{\mathbb{F}_2}$, following John H. Conway in ONAG, with the set of all ordinals smaller than $\omega^{\omega^{\omega}}$, given the Nim addition and multiplication. I know that ordinal numbers may be intimidating at first, so let’s just restrict to ordinary natural numbers for now. The Nim-addition of two numbers $n \oplus m$ can be calculated by writing the numbers n and m in binary form and add them without carrying. For example, $9 \oplus 1 = 1001+1 = 1000 = 8$. Nim-multiplication is slightly more complicated and is best expressed using the so-called Fermat-powers $F_n = 2^{2^n}$. We then demand that $F_n \otimes m = F_n \times m$ whenever $m < F_n$ and $F_n \otimes F_n = \frac{3}{2}F_n$. Distributivity wrt. $\oplus$ can then be used to calculate arbitrary Nim-products. For example, $8 \otimes 3 = (4 \otimes 2) \otimes (2 \oplus 1) = (4 \otimes 3) \oplus (4 \otimes 2) = 12 \oplus 8 = 4$. Conway’s remarkable result asserts that the ordinal numbers, equipped with Nim addition and multiplication, form an algebraically closed field of characteristic two. The closure $\overline{\mathbb{F}_2}$ is identified with the subfield of all ordinals smaller than $\omega^{\omega^{\omega}}$. For those of you who don’t feel like going transfinite, the subfield $~(\mathbb{N},\oplus,\otimes)$ is identified with the quadratic closure of $\mathbb{F}_2$. The connection between $\overline{\mathbb{F}_2}$ and the odd roots of unity has been advocated by Alain Connes in his talk before a general public at the IHES : “L’ange de la géométrie, le diable de l’algèbre et le corps à un élément” (the angel of geometry, the devil of algebra and the field with one element). He describes its content briefly in this YouTube-video At first it was unclear to me which ‘coupling-problem’ Alain meant, but this has been clarified in his paper together with Caterina Consani Characteristic one, entropy and the absolute point. The non-zero elements of $\overline{\mathbb{F}_2}$ can be identified with the set of all odd roots of unity. For, if x is such a unit, it belongs to a finite subfield of the form $\mathbb{F}_{2^n}$ for some n, and, as the group of units of any finite field is cyclic, x is an element of order $2^n-1$. Hence, $\mathbb{F}_{2^n}- { 0 }$ can be identified with the set of $2^n-1$-roots of unity, with $e^{2 \pi i/n}$ corresponding to a generator of the unit-group. So, all elements of $\overline{\mathbb{F}_2}$ correspond to an odd root of unity. The observation that we get indeed all odd roots of unity may take you a couple of seconds (( If m is odd, then (2,m)=1 and so 2 is a unit in the finite cyclic group $~(\mathbb{Z}/m\mathbb{Z})^*$ whence $2^n = 1 (mod~m)$, so the m-roots of unity lie within those of order $2^n-1$ )). Assuming we succeed in fixing a one-to-one correspondence between the non-zero elements of $\overline{\mathbb{F}_2}$ and the odd roots of unity $\mu_{odd}$ respecting multiplication, how can we recover the addition on $\overline{\mathbb{F}_2}$? Well, here’s Alain’s coupling function, he ties up an element x of the algebraic closure to the element s(x)=x+1 (and as we are in characteristic two, this is an involution, so also the element tied up to x+1 is s(x+1)=(x+1)+1=x. The clue being that multiplication together with the coupling map s allows us to compute any sum of two elements as $x+y=x \times s(\frac{y}{x}) = x \times (\frac{y}{x}+1)$. For example, all information about the finite field $\mathbb{F}_{2^4}$ is encoded in this identification with the 15-th roots of unity, together with the pairing s depicted as Okay, we now have two identifications of the algebraic closure $\overline{\mathbb{F}_2}$ : the smaller ordinals equipped with Nim addition and Nim multiplication and the odd roots of unity with complex-multiplication and the Connes-coupling s. The question we started from asks for a general recipe to identify these two approaches. To those of you who are convinced that finite fields (LOL, even characteristic two!) are objects far too trivial to bother thinking about : as far as I know, NOBODY knows how to do this explicitly, even restricting the ordinals to merely the natural numbers! Please feel challenged! To get you started, I’ll show you how to place the first 15 Knights and give you a procedure (though far from explicit) to continue. Here’s the Nim-picture compatible with that above To verify this, and to illustrate the general strategy, I’d better hand you the Nim-tables of the first 16 numbers. Here they are It is known that the finite subfields of $~(\mathbb{N},\oplus,\otimes)$ are precisely the sets of numbers smaller than the Fermat-powers $F_n$. So, the first one is all numbers smaller than $F_1=4$ (check!). The smallest generator of the multiplicative group (of order 3) is 2, so we take this to correspond to the unit-root $e^{2 \pi i/3}$. The next subfield are all numbers smaller than $F_2 = 16$ and its multiplicative group has order 15. Now, choose the smallest integer k which generates this group, compatible with the condition that $k^{\otimes 5}=2$. Verify that this number is 4 and that this forces the identification and coupling given above. The next finite subfield would consist of all natural numbers smaller than $F_3=256$. Hence, in this field we are looking for the smallest number k generating the multiplicative group of order 255 satisfying the extra condition that $k^{\otimes 17}=4$ which would fix an identification at that level. Then, the next level would be all numbers smaller than $F_4=65536$ and again we would like to find the smallest number generating the multiplicative group and such that the appropriate power is equal to the aforementioned k, etc. etc. Can you give explicit (even inductive) formulae to achieve this? I guess even the problem of placing Knight 16 will give you a couple of hours to think about… (to be continued). To mark the end of 2009 and 6 years of blogging, two musical compositions with a mathematical touch to them. I wish you all a better 2010! Remember from last time that we identified Olivier Messiaen as the ‘Monsieur Modulo’ playing the musical organ at the Bourbaki wedding. This was based on the fact that his “modes à transposition limitée” are really about epimorphisms between modulo rings Z/12Z→Z/3Z and Z/12Z→Z/4Z. However, Messiaen had more serious mathematical tricks up his sleeve. In two of his compositions he did discover (or at least used) one of the smaller sporadic groups, the Mathieu group $M_{12}$ of order 95040 on which we have based a whole series of Mathieu games two and a half years ago. Messiaen’s ‘Ile de fey 2’ composition for piano (part of Quatre études de rythme (“Four studies in rhythm”), piano (1949–50)) is based on two concurrent permutations. The first is shown below, with the underlying motive rotational permutation shown. This gives the permutation (1,7,10,2,6,4,5,9,11,12)(3,8). A second concurrent permutation is based on the permutation (1,6,9,2,7,3,5,4,8,10,11) and both of them generate the Mathieu group $M_{12}$. This can be seen by realizing the two permutations as the rotational permutations and identifying them with the Mongean shuffles generating $M_{12}$. See for example, Dave Benson’s book “Music: A Mathematical Offering”, freely available online. Clearly, Messiaen doesn’t use all of its 95040 permutations in his piece! Here’s how it sounds. The piece starts 2 minutes into the clip. The second piece is “Les Yeux dans les Roues” (The Eyes in the Wheels), sixth piece from the “Livre d’Orgue” (1950/51). According to Hauptwerk, the piece consists of a melody/theme in the pedal, accompanied by two fast-paced homorhythmic lines in the manuals. The pedal presents a sons-durées theme which is repeated six times, in different permutations. Initially it is presented in its natural form. Afterwards, it is presented alternatively picking notes from each end of the original form. Similar transformations are applied each time until the sixth, which is the retrograde of the first. The entire twelve-tone analysis (pitch only, not rhythm) of the pedal is shown below: That is we get the following five permutations which again generate Mathieu 12 : • a=(2,3,5,9,8,10,6,11,4,7,12) • b=(1,2,4,8,9,7,11,3,6,12)(5,10)=e*a • c=(1,12,11,9,5,4,6,2,10,7)(3,8)=e*d • d=(1,11,10,8,4,5,3,7,2,9,6) • e=(1,12)(2,11)(3,10)(4,9)(5,8)(6,7) Here’s the piece performed on organ : Considering the permutations $X=d.a^{-1}$ and $Y=(a.d^2.a.d^3)^{-1}$ one obtains canonical generators of $M_{12}$, that is, generators satisfying the defining equations of this sporadic group $X^2=Y^3=(XY)^{11}=[X,Y]^6=(XYXYXY^{-1})^6=1$ I leave you to work out the corresponding dessin d’enfant tonight after a couple of glasses of champagne! It sure has a nice form. Once again, a better 2010! Over the week-end I read The artist and the mathematician (subtitle : The story of Nicolas Bourbaki, the genius mathematician who never existed) by Amir D. Aczel. Whereas the central character of the book should be Bourbaki, it focusses more on two of Bourbaki’s most colorful members, André Weil and Alexander Grothendieck, and the many stories and myths surrounding them. The opening chapter (‘The Disappearance’) describes the Grothendieck’s early years (based on the excellent paper by Allyn Jackson Comme Appelé du Néant ) and his disappearance in the Pyrenees in the final years of last century. The next chapter (‘An Arrest in Finland’) recount the pre-WW2 years of Weil and the myth of his arrest in Finland and his near escape from execution (based on Weil’s memoires The Apprenticeship of a Mathematician). Chapter seven (‘The Café’) describes the first 10 proto-Bourbaki meetings following closely the study ‘A Parisian Café and Ten Proto-Bourbaki Meetings (1934-1935)‘ by Liliane Beaulieu. Etc. etc. All the good ‘Bourbaki’-stories get a place in this book, not always historically correct. For example, on page 90 it is suggested that all of the following jokes were pulled at the Besse-conference, July 1935 : the baptizing of Nicolas, the writing of the Comptes-Rendus paper, the invention of the Bourbaki-daughter Betti and the printing of the wedding invitation card. In reality, all of these date from much later, the first two from the autumn of 1935, the final two no sooner than april 1939… One thing I like about this book is the connection it makes with other disciplines, showing the influence of Bourbaki’s insistence on ‘structuralism’ in fields as different as philosophy, linguistics, anthropology and literary criticism. One example being Weil’s group-theoretic solution to the marriage-rules problem in tribes of Australian aborigines studied by Claude Lévi-Strauss, another the literary group Oulipo copying Bourbaki’s work-method. Another interesting part is Aczel’s analysis of Bourbaki’s end. In the late 50ties, Grothendieck tried to convince his fellow Bourbakis to redo their work on the foundations of mathematics, changing these from set theory to category theory. He failed as others felt that the foundations had already been laid and there was no going back. Grothendieck left, and Bourbaki would gradually decline following its refusal to accept new methods. In Grothendieck’s own words (in “Promenade” 63, n. 78, as translated by Aczel) : “Additionally, since the 1950s, the idea of structure has become passé, superseded by the influx of new ‘categorical’ methods in certain of the most dynamical areas of mathematics, such as topology or algebraic geometry. (Thus, the notion of ‘topos’ refuses to enter into the ‘Bourbaki sack’ os structures, decidedly already too full!) In making this decision, in full cognizance, not to engage in this revision, Bourbaki has itself renounced its initial ambition, which has been to furnish both the foundations and the basic language for all of modern mathematics.” Finally, it is interesting to watch Aczel’s own transformation throughout the book, from slavishly copying the existing Weil-myths and pranks at the beginning of the book, to the following harsh criticism on Weil, towards the end (p. 209) : “From other information in his autobiography, one gets the distinct impression that Weil was infatuated with the childish pranks of ‘inventing’ a person who never existed, creating for him false papers and a false identity, complete with a daughter, Betti, who even gets married, parents and relatives, and membership in a nonexistent Academy of Sciences of the nonexistent nation of Polvedia (sic). Weil was so taken with these activities that he even listed, as his only honor by the time of his death ‘Member, Poldevian Academy of Sciences’. It seems that Weil could simply not go beyond these games: he could not grasp the deep significance and power of the organization he helped found. He was too close, and thus unable to see the great achievements Bourbaki was producing and to acknowledge and promote these achievements. Bourbaki changed the way we do mathematics, but Weil really saw only the pranks and the creation of a nonexistent person.” Judging from my own reluctance to continue with the series on the Bourbaki code, an overdose reading about Weil’s life appears to have this effect on people… Last time we did recall Cantor’s addition and multiplication on ordinal numbers. Note that we can identify an ordinal number $\alpha$ with (the order type of) the set of all strictly smaller ordinals, that is, $\alpha = { \alpha’~:~\alpha’ < \alpha }$. Given two ordinals $\alpha$ and $\beta$ we will denote their Cantor-sums and products as $[ \alpha + \beta]$ and $[\alpha . \beta]$. The reason for these square brackets is that John Conway constructed a well behaved nim-addition and nim-multiplication on all ordinals $\mathbf{On}_2$ by imposing the ‘simplest’ rules which make $\mathbf{On}_2$ into a field. By this we mean that, in order to define the addition $\alpha + \beta$ we must have constructed before all sums $\alpha’ + \beta$ and $\alpha + \beta’$ with $\alpha’ < \alpha$ and $\beta’ < \beta$. If + is going to be a well-defined addition on $\mathbf{On}_2$ clearly $\alpha + \beta$ cannot be equal to one of these previously constructed sums and the ‘simplicity rule’ asserts that we should take $\alpha+\beta$ the least ordinal different from all these sums $\alpha’+\beta$ and $\alpha+\beta’$. In symbols, we define $\alpha+ \beta = \mathbf{mex} { \alpha’+\beta,\alpha+ \beta’~|~\alpha’ < \alpha, \beta’ < \beta }$ where $\mathbf{mex}$ stands for ‘minimal excluded value’. If you’d ever played the game of Nim you will recognize this as the Nim-addition, at least when $\alpha$ and $\beta$ are finite ordinals (that is, natural numbers) (to nim-add two numbers n and m write them out in binary digits and add without carrying). Alternatively, the nim-sum n+m can be found applying the following two rules : • the nim-sum of a number of distinct 2-powers is their ordinary sum (e.g. $8+4+1=13$, and, • the nim-sum of two equal numbers is 0. So, all we have to do is to write numbers n and m as sums of two powers, scratch equal terms and add normally. For example, $13+7=(8+4+1)+(4+2+1)=8+2=10$ (of course this is just digital sum without carry in disguise). Here’s the beginning of the nim-addition table on ordinals. For example, to define $13+7$ we have to look at all values in the first 7 entries of the row of 13 (that is, ${ 13,12,15,14,9,8,11 }$) and the first 13 entries in the column of 7 (that is, ${ 7,6,5,4,3,2,1,0,15,14,13,12,11 }$) and find the first number not included in these two sets (which is indeed $10$). In fact, the above two rules allow us to compute the nim-sum of any two ordinals. Recall from last time that every ordinal can be written uniquely as as a finite sum of (ordinal) 2-powers : $\alpha = [2^{\alpha_0} + 2^{\alpha_1} + \ldots + 2^{\alpha_k}]$, so to determine the nim-sum $\alpha+\beta$ we write both ordinals as sums of ordinal 2-powers, delete powers appearing twice and take the Cantor ordinal sum of the remaining sum. Nim-multiplication of ordinals is a bit more complicated. Here’s the definition as a minimal excluded value $\alpha.\beta = \mathbf{mex} { \alpha’.\beta + \alpha.\beta’ – \alpha’.\beta’ }$ for all $\alpha’ < \alpha, \beta’ < \beta$. The rationale behind this being that both $\alpha-\alpha’$ and $\beta – \beta’$ are non-zero elements, so if $\mathbf{On}_2$ is going to be a field under nim-multiplication, their product should be non-zero (and hence strictly greater than 0), that is, $~(\alpha-\alpha’).(\beta-\beta’) > 0$. Rewriting this we get $\alpha.\beta > \alpha’.\beta+\alpha.\beta’-\alpha’.\beta’$ and again the ‘simplicity rule’ asserts that $\alpha.\beta$ should be the least ordinal satisfying all these inequalities, leading to the $\mathbf{mex}$-definition above. The table gives the beginning of the nim-multiplication table for ordinals. For finite ordinals n and m there is a simple 2 line procedure to compute their nim-product, similar to the addition-rules mentioned before : • the nim-product of a number of distinct Fermat 2-powers (that is, numbers of the form $2^{2^n}$) is their ordinary product (for example, $16.4.2=128$), and, • the square of a Fermat 2-power is its sesquimultiple (that is, the number obtained by multiplying with $1\frac{1}{2}$ in the ordinary sense). That is, $2^2=3,4^2=6,16^2=24,…$ Using these rules, associativity and distributivity and our addition rules it is now easy to work out the nim-multiplication $n.m$ : write out n and m as sums of (multiplications by 2-powers) of Fermat 2-powers and apply the rules. Here’s an example $5.9=(4+1).(4.2+1)=4^2.2+4.2+4+1=6.2+8+4+1=(4+2).2+13=4.2+2^2+13=8+3+13=6$ Clearly, we’d love to have a similar procedure to calculate the nim-product $\alpha.\beta$ of arbitrary ordinals, or at least those smaller than $\omega^{\omega^{\omega}}$ (recall that Conway proved that this ordinal is isomorphic to the algebraic closure $\overline{\mathbb{F}}_2$ of the field of two elements). From now on we restrict to such ‘small’ ordinals and we introduce the following special elements : $\kappa_{2^n} = [2^{2^{n-1}}]$ (these are the Fermat 2-powers) and for all primes $p > 2$ we define $\kappa_{p^n} = [\omega^{\omega^{k-1}.p^{n-1}}]$ where $k$ is the number of primes strictly smaller than $p$ (that is, for p=3 we have k=1, for p=5, k=2 etc.). Again by associativity and distributivity we will be able to multiply two ordinals $< \omega^{\omega^{\omega}}$ if we know how to multiply a product $[\omega^{\alpha}.2^{n_0}].[\omega^{\beta}.2^{m_0}]$ with $\alpha,\beta < [\omega^{\omega}]$ and $n_0,m_0 \in \mathbb{N}$. Now, $\alpha$ can be written uniquely as $[\omega^t.n_t+\omega^{t-1}.n_{t-1}+\ldots+\omega.n_2 + n_1]$ with t and all $n_i$ natural numbers. Write each $n_k$ in base $p$ where $p$ is the $k+1$-th prime number, that is, we have for $n_0,n_1,\ldots,n_t$ an expression $n_k=[\sum_j p^j.m(j,k)]$ with $0 \leq m(j,k) < p$ The point of all this is that any of the special elements we want to multiply can be written as a unique expression as a decreasing product $[\omega^{\alpha}.2^{n_0}] = [ \prod_q \kappa_q^m(q) ]$ where $q$ runs over all prime powers. The crucial fact now is that for this decreasing product we have a rule similar to addition of 2-powers, that is Conway-products coincide with the Cantor-products $[ \prod_q \kappa_q^m(q) ] = \prod_q \kappa_q^m(q)$ But then, using associativity and commutativity of the Conway-product we can ‘nearly’ describe all products $[\omega^{\alpha}.2^{n_0}].[\omega^{\beta}.2^{m_0}]$. The remaining problem being that it may happen that for some q we will end up with an exponent $m(q)+m(q’)>p$. But this can be solved if we know how to take p-powers. The rules for this are as follows $~(\kappa_{2^n})^2 = \kappa_{2^n} + \prod_{1 \leq i < n} \kappa_{2^i}$, for 2-powers, and, $~(\kappa_{p^n})^p = \kappa_{p^{n-1}}$ for a prime $p > 2$ and for $n \geq 2$, and finally $~(\kappa_p)^p = \alpha_p$ for a prime $p > 2$, where $\alpha_p$ is the smallest ordinal $< \kappa_p$ which cannot be written as a p-power $\beta^p$ with $\beta < \kappa_p$. Summarizing : if we will be able to find these mysterious elements $\alpha_p$ for all prime numbers p, we are able to multiply in $[\omega^{\omega^{\omega}}]=\overline{\mathbb{F}}_2$. Let us determine the first one. We have that $\kappa_3 = \omega$ so we are looking for the smallest natural number $n < \omega$ which cannot be written in num-multiplication as $n=m^3$ for $m < \omega$ (that is, also $m$ a natural number). Clearly $1=1^3$ but what about 2? Can 2 be a third root of a natural number wrt. nim-multiplication? From the tabel above we see that 2 has order 3 whence its cube root must be an element of order 9. Now, the only finite ordinals that are subfields of $\mathbf{On}_2$ are precisely the Fermat 2-powers, so if there is a finite cube root of 2, it must be contained in one of the finite fields $[2^{2^n}]$ (of which the mutiplicative group has order $2^{2^n}-1$ and one easily shows that 9 cannot be a divisor of any of the numbers $2^{2^n}-1$, that is, 2 doesn’t have a finte 3-th root in nim! Phrased differently, we found our first mystery number $\alpha_3 = 2$. That is, we have the marvelous identity in nim-arithmetic $\omega^3 = 2$ Okay, so what is $\alpha_5$? Well, we have $\kappa_5 = [\omega^{\omega}]$ and we have to look for the smallest ordinal which cannot be written as a 5-th root. By inspection of the finite nim-table we see that 1,2 and 3 have 5-th roots in $\omega$ but 4 does not! The reason being that 4 has order 15 (check in the finite field [16]) and 25 cannot divide any number of the form $2^{2^n}-1$. That is, $\alpha_5=4$ giving another crazy nim-identity $~(\omega^{\omega})^5 = 4$ And, surprises continue to pop up… Conway showed that $\alpha_7 = \omega+1$ giving the nim-identity $~(\omega^{\omega^2})^7 = \omega+1$. The proof of this already uses some clever finite field arguments. Because 7 doesn’t divide any number $2^{2^n}-1$, none of the finite subfields $[2^{2^n}]$ contains a 7-th root of unity, so the 7-power map is injective whence surjective, so all finite ordinal have finite 7-th roots! That is, $\alpha_7 \geq \omega$. Because $\omega$ lies in a cubic extension of the finite field [4], the field generated by $\omega$ has 64 elements and so its multiplicative group is cyclic of order 63 and as $\omega$ has order 9, it must be a 7-th power in this field. But, as the only 7th powers in that field are precisely the powers of $\omega$ and by inspection $\omega+1$ is not a 7-th power in that field (and hence also not in any field extension obtained by adjoining square, cube and fifth roots) so $\alpha_7=\omega +1$. Conway did stop at $\alpha_7$ but I’ve always been intrigued by that one line in ONAG p.61 : “Hendrik Lenstra has computed $\alpha_p$ for $p \leq 43$”. Next time we will see how Lenstra managed to do this and we will use sage to extend his list a bit further, including the first open case : $\alpha_{47}= \omega^{\omega^7}+1$. For an enjoyable video on all of this, see Conway’s MSRI lecture on Infinite Games. The nim-arithmetic part is towards the end of the lecture but watching the whole video is a genuine treat! In ONAG, John Conway proves that the symmetric version of his recursive definition of addition and multiplcation on the surreal numbers make the class On of all Cantor’s ordinal numbers into an algebraically closed Field of characteristic two : On2 (pronounced ‘Onto’), and, in particular, he identifies a subfield with the algebraic closure of the field of two elements. What makes all of this somewhat confusing is that Cantor had already defined a (badly behaving) addition, multiplication and exponentiation on ordinal numbers. Over the last week I’ve been playing a bit with sage to prove a few exotic identities involving ordinal numbers. Here’s one of them ($\omega$ is the first infinite ordinal number, that is, $\omega={ 0,1,2,\ldots }$), $~(\omega^{\omega^{13}})^{47} = \omega^{\omega^7} + 1$ answering a question in Hendrik Lenstra’s paper Nim multiplication. However, it will take us a couple of posts before we get there. Let’s begin by trying to explain what brought this on. On september 24th 2008 there was a meeting, intended for a general public, called a la rencontre des dechiffeurs, celebrating the 50th birthday of the IHES. One of the speakers was Alain Connes and the official title of his talk was “L’ange de la géométrie, le diable de l’algèbre et le corps à un élément” (the angel of geometry, the devil of algebra and the field with one element). Instead, he talked about a seemingly trivial problem : what is the algebraic closure of $\mathbb{F}_2$, the field with two elements? My only information about the actual content of the talk comes from the following YouTube-blurb Alain argues that we do not have a satisfactory description of $\overline{\mathbb{F}}_2$, the algebraic closure of $\mathbb{F}_2$. Naturally, it is the union (or rather, limit) of all finite fields $\mathbb{F}_{2^n}$, but, there are too many non-canonical choices to make here. Recall that $\mathbb{F}_{2^k}$ is a subfield of $\mathbb{F}_{2^l}$ if and only if $k$ is a divisor of $l$ and so we would have to take the direct limit over the integers with respect to the divisibility relation… Of course, we can replace this by an increasing sequence of a selection of cofinal fields such as $\mathbb{F}_{2^{1!}} \subset \mathbb{F}_{2^{2!}} \subset \mathbb{F}_{2^{3!}} \subset \ldots$ But then, there are several such suitable sequences! Another ambiguity comes from the description of $\mathbb{F}_{2^n}$. Clearly it is of the form $\mathbb{F}_2[x]/(f(x))$ where $f(x)$ is a monic irreducible polynomial of degree $n$, but again, there are several such polynomials. An attempt to make a canonical choice of polynomial is to take the ‘first’ suitable one with respect to some natural ordering on the polynomials. This leads to the so called Conway polynomials. Conway polynomials for the prime $2$ have only been determined up to degree 400-something, so in the increasing sequence above we would already be stuck at the sixth term $\mathbb{F}_{2^{6!}}$… So, what Alain Connes sets as a problem is to find another, more canonical, description of $\overline{\mathbb{F}}_2$. The problem is not without real-life interest as most finite fields appearing in cryptography or coding theory are subfields of $\overline{\mathbb{F}}_2$. (My guess is that Alain originally wanted to talk about the action of the Galois group on the roots of unity, which would be the corresponding problem over the field with one element and would explain the title of the talk, but decided against it. If anyone knows what ‘coupling-problem’ he is referring to, please drop a comment.) Surely, Connes is aware of the fact that there exists a nice canonical recursive construction of $\overline{\mathbb{F}}_2$ due to John Conway, using Georg Cantor’s ordinal numbers. In fact, in chapter 6 of his book On Numbers And Games, John Conway proves that the symmetric version of his recursive definition of addition and multiplcation on the surreal numbers make the class $\mathbf{On}$ of all Cantor’s ordinal numbers into an algebraically closed Field of characteristic two : $\mathbf{On}_2$ (pronounced ‘Onto’), and, in particular, he identifies a subfield $\overline{\mathbb{F}}_2 \simeq [ \omega^{\omega^{\omega}} ]$ with the algebraic closure of $\mathbb{F}_2$. What makes all of this somewhat confusing is that Cantor had already defined a (badly behaving) addition, multiplication and exponentiation on ordinal numbers. To distinguish between the Cantor/Conway arithmetics, Conway (and later Lenstra) adopt the convention that any expression between square brackets refers to Cantor-arithmetic and un-squared ones to Conway’s. So, in the description of the algebraic closure just given $[ \omega^{\omega^{\omega}} ]$ is the ordinal defined by Cantor-exponentiation, whereas the exotic identity we started out with refers to Conway’s arithmetic on ordinal numbers. Let’s recall briefly Cantor’s ordinal arithmetic. An ordinal number $\alpha$ is the order-type of a totally ordered set, that is, if there is an order preserving bijection between two totally ordered sets then they have the same ordinal number (or you might view $\alpha$ itself as a totally ordered set, namely the set of all strictly smaller ordinal numbers, so e.g. $0= \emptyset,1= { 0 },2={ 0,1 },\ldots$). For two ordinals $\alpha$ and $\beta$, the addition $[\alpha + \beta ]$ is the order-type of the totally ordered set $\alpha \sqcup \beta$ (the disjoint union) ordered compatible with the total orders in $\alpha$ and $\beta$ and such that every element of $\beta$ is strictly greater than any element from $\alpha$. Observe that this definition depends on the order of the two factors. For example,$[1 + \omega] = \omega$ as there is an order preserving bijection ${ \tilde{0},0,1,2,\ldots } \rightarrow { 0,1,2,3,\ldots }$ by $\tilde{0} \mapsto 0,n \mapsto n+1$. However, $\omega \not= [\omega + 1]$ as there can be no order preserving bijection ${ 0,1,2,\ldots } \rightarrow { 0,1,2,\ldots,0_{max} }$ as the first set has no maximal element whereas the second one does. So, Cantor’s addition has the bad property that it may be that $[\alpha + \beta] \not= [\beta + \alpha]$. The Cantor-multiplication $\alpha . \beta$ is the order-type of the product-set $\alpha \times \beta$ ordered via the last differing coordinate. Again, this product has the bad property that it may happen that $[\alpha . \beta] \not= [\beta . \alpha]$ (for example $[2 . \omega ] \not=[ \omega . 2 ]$). Finally, the exponential $\beta^{\alpha}$ is the order type of the set of all maps $f~:~\alpha \rightarrow \beta$ such that $f(a) \not=0$ for only finitely many $a \in \alpha$, and ordered via the last differing function-value. Cantor’s arithmetic allows normal-forms for ordinal numbers. More precisely, with respect to any ordinal number $\gamma \geq 2$, every ordinal number $\alpha \geq 1$ has a unique expression as $\alpha = [ \gamma^{\alpha_0}.\eta_0 + \gamma^{\alpha_1}.\eta_1 + \ldots + \gamma^{\alpha_m}.\eta_m]$ for some natural number $m$ and such that $\alpha \geq \alpha_0 > \alpha_1 > \ldots > \alpha_m \geq 0$ and all $1 \leq \eta_i < \gamma$. In particular, taking the special cases $\gamma = 2$ and $\gamma = \omega$, we have the following two canonical forms for any ordinal number $\alpha$ $[ 2^{\alpha_0} + 2^{\alpha_1} + \ldots + 2^{\alpha_m}] = \alpha = [ \omega^{\beta_0}.n_0 + \omega^{\beta_1}.n_1 + \ldots + \omega^{\beta_k}.n_k]$ with $m,k,n_i$ natural numbers and $\alpha \geq \alpha_0 > \alpha_1 > \ldots > \alpha_m \geq 0$ and $\alpha \geq \beta_0 > \beta_1 > \ldots > \beta_k \geq 0$. Both canonical forms will be important when we consider the (better behaved) Conway-arithmetic on $\mathbf{On}_2$, next time.
8,681
31,839
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.734375
4
CC-MAIN-2019-35
latest
en
0.849574
https://www.storyofmathematics.com/consider-the-function-below-if-an-answer-does-not-exist-enter-dne-c-x-equal-x-frac-1-5-x-plus-6-a-find-the/
1,708,817,262,000,000,000
text/html
crawl-data/CC-MAIN-2024-10/segments/1707947474569.64/warc/CC-MAIN-20240224212113-20240225002113-00219.warc.gz
1,004,118,008
41,025
# Consider the function below. C(x) = x^{1/5}(x + 6). (If an answer does not exist, enter DNE). This question aims to find the interval of increase or interval of decrease of the given function by finding its critical points first. The interval of increase and decrease is the interval in which the real function will increase or decrease in the value of a dependent variable. The increase or decrease of the interval can be found by checking the value of the first derivative of the given function. If the derivative is positive, this means that the interval is increasing. It implies the increase of function with the dependent variable $x$. If the derivative is negative, this means that the interval is decreasing. It implies the decrease of function with the dependent variable  x . Let the function be: $f(x) = x ^\frac{1}{5} ( x + 6 )$ Taking first derivative of the function $f (x)$: $f’ (x) =\frac{1}{5} \pi ^ \frac{-4}{5} ( x + 6 ) + x^ \frac{1}{5}$ $=\frac{x + 6}{5x ^ {\frac{4}{5}}} + x ^\frac{1}{5}$ $=\frac{ x + 6 + 5x ^ {\frac{1}{5}+ \frac{4}{5}}}{ 5x^{\frac{4}{5}} }$ Taking $6$ common, we get: $=\frac{6 (x + 1) }{ 5x ^ {\frac{4}{5}}}$ To find critical points, we will put the first derivative equal to $0$: $f’ (x) = 0$ $\frac{ 6 (x + 1) }{ 5x ^ {\frac{4}{5}} } = 0$ $x + 1 = 0$ $x = – 1$ The critical points are $x = – 1$ and $x = 0$ The interval is then: $(- \infty , – 1 ) , (- 1 , 0) , (0 , \infty)$ ## Numerical Solution In the given interval $( – \infty , – 1 )$,  put $x = -2$ $\frac{ 6 (- 2 + 1) }{ 5( – 2) ^ {\frac{4}{5}} } = – 0 . 68 < 0$ Thus, $f (x)$ is decreasing in the interval $(- \infty , – 1)$. Take the interval $( -1 , 0 )$ and put $x = – 0.5$: $f’ (x) = \frac{ 6 ( – 0.5 + 1) }{ 5( – 0.5 ) ^ {\frac{4}{5}} } = 1.04 > 0$ So $f (x)$ is increasing in the interval $( – 1 , 0 )$. In the interval $(0 , \infty)$, put $x = 1$: $f’ (x) =\frac{6 ( 1 + 1) }{5( 1) ^ {\frac{4}{5}}} = 2.4 > 0$ So $f(x)$ is increasing in the interval $(0 , \infty)$. ## Example Find the increasing and decreasing intervals of the function $f(x)= -x^3 + 3x^2 +9$. $f’(x) = -3x^2 + 6x$ $f’(x) = -3x (x – 2)$ To find critical points: $-3x (x – 2) = 0$ $x = 0$ or $x = 2$ The intervals are $(- \infty, 0)$ , $(0, 2)$ and $(2, \infty)$. For interval $(- \infty , 0 )$,  put $x = -1$: $f’ (x) = -9 < 0$ It is a decreasing function. For interval $(0, 2)$,  put $x =1$: $f’ (x) = 3 > 0$ It is an increasing function. For interval $(2, \infty)$,  put $x =4$: $f’ (x) = -24 < 0$ It is a decreasing function. Image/Mathematical drawings are created in Geogebra.
970
2,608
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 2, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
4.625
5
CC-MAIN-2024-10
latest
en
0.693196
http://kth.diva-portal.org/smash/record.jsf?pid=diva2:953324
1,513,293,424,000,000,000
text/html
crawl-data/CC-MAIN-2017-51/segments/1512948551162.54/warc/CC-MAIN-20171214222204-20171215002204-00356.warc.gz
161,683,807
17,208
Change search Cite Citation style • apa • harvard1 • ieee • modern-language-association-8th-edition • vancouver • Other style More styles Language • de-DE • en-GB • en-US • fi-FI • nn-NO • nn-NB • sv-SE • Other locale More languages Output format • html • text • asciidoc • rtf Autonomous Rubik's Cube Solver KTH, School of Industrial Engineering and Management (ITM), Machine Design (Dept.), Mechatronics. KTH, School of Industrial Engineering and Management (ITM), Machine Design (Dept.), Mechatronics. 2016 (English)Independent thesis Basic level (degree of Bachelor), 10 credits / 15 HE creditsStudent thesisAlternative title Autonom Rubiks Kub Lösare (Swedish) ##### Abstract [en] Today the industry is constantly getting more automated. Every machine need an algorithm to run and some even need to make decisions in how to perform their tasks in the best way. These problems can be as large as transportation systems, or as small as solving a puzzle. The 3D-puzzle known as Rubik’s cube have enticed millions of people since its release in mid 1970s. With 43 quintillion possible combinations the Rubik’s cube present a complex problem that requires both logical thinking and memorization. Recently Rubik’s Cube community has seen a rise of robots that can automatically solve a Rubik’s Cube.The goal of this Bachelor’s Thesis is to investigate the e˙ect of di˙erent solving algorithms on the energy consumption of a mechanical system. The Demonstrator system we will use is a Rubik’s cube solver that can solve any scrambled cube. This is done by optically scanning the cube and building a virtual image of it. Two di˙erent solving algorithms will be run in order to solve the cube and calculate the required permutations. The energy consumption in the two cases of the algorithms will be calculated and compared. From this data, conclusions regarding choice of algorithm will be made. A more complex algorithm that results in fewer moves will be more energy eÿcient, at least under the circumstances that the computation cost is ignored. When compared, the optimized algorithm only used 8% of the simpler version. ##### Abstract [sv] Dagens industrier blir allt mer och mer automatiserad. Varje maskin behöver en algoritm för att fungra, och vissa maskiner måste även ta beslut om hur de ska utföra sin uppgift på bästa vis. Dessa problem kan vara allt från stora transportproblem, till att lösa små pussel. 3D pusslet Rubiks Kub har fänglsat miljoner människor sedan det skapades i mitten av 1970-talet. Med 43 Kvin-tiljoner möjliga kombinationer är Rubiks Kub ett komplicerat problem som kräver både logiskt tänkande och memorisering. Senaste trenden är att skapa robotar som löser kuben automatiskt.Målet med detta kandiatprojekt är att utreda kopplingen mellan val av lös-ningsalgoritm och energiåtgången för ett mekaniskat system. För att genomföra utredningen ska vi andvända oss av en kubiks kub lösare som kan automatiskt lösa en kub från alla positioner. Den fungerar genom att kuben läses in optiskt och så skapas en virituell kopia. Två olika lösningsalgoritmer kommer att köras för att lösa kuben och deras resultat i energiåtgång kommer att gämföras. Från denna data kommer slutsatser att dras om de två algoritmerna. En mer kom-pliserad algoritm som kräver färre drag kommer att vara mer energie˙ektivt, åtminstånde när man ignorerar beräkningskostnad och dess energi. Vi kom fram till att en optimerad algoritm andvände bara 8% av energin jämför med en simplare algoritm. 2016. , 64 p. ##### National Category Mechanical Engineering ##### Identifiers OAI: oai:DiVA.org:kth-190859DiVA: diva2:953324 ##### Examiners Available from: 2016-08-17 Created: 2016-08-17 Last updated: 2016-08-17Bibliographically approved #### Open Access in DiVA ##### File information File name FULLTEXT01.pdfFile size 2191 kBChecksum SHA-512 Type fulltextMimetype application/pdf Mechatronics ##### On the subject Mechanical Engineering #### Search outside of DiVA The number of downloads is the sum of all downloads of full texts. It may include eg previous versions that are now no longer available urn-nbn #### Altmetric score urn-nbn Total: 343 hits Cite Citation style • apa • harvard1 • ieee • modern-language-association-8th-edition • vancouver • Other style More styles Language • de-DE • en-GB • en-US • fi-FI • nn-NO • nn-NB • sv-SE • Other locale More languages Output format • html • text • asciidoc • rtf v. 2.29.1 | | | |
1,224
4,446
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.515625
3
CC-MAIN-2017-51
longest
en
0.89136
https://visp-doc.inria.fr/doxygen/visp-3.0.1/tutorial-boost-vs.html
1,709,031,612,000,000,000
text/html
crawl-data/CC-MAIN-2024-10/segments/1707947474674.35/warc/CC-MAIN-20240227085429-20240227115429-00633.warc.gz
597,928,332
5,461
Visual Servoing Platform  version 3.0.1 Tutorial: How to boost your visual servo control law # Introduction This tutorial gives some hints to boost your visual servo control law in order to speed up the time to convergence. Note that all the material (source code and image) described in this tutorial is part of ViSP source code and could be downloaded using the following command: To illustrate this tutorial let us consider the example tutorial-ibvs-4pts-plotter.cpp introduced in Tutorial: Image-based visual servo. This example consider an image based visual servoing using four points as visual features. In the general case, considering as the input velocities to the robot controller, the control laws provided in vpServo class lead to the following control law where the sign is negative for an eye in hand servo and positive for an eye to hand servo, is a constant gain, is the task Jacobian and is the error to regulate to zero. As described in [2], this control law ensure an exponential decoupled decrease of the error . This behavior is illustrated with the next figure, where we see the exponential decrease of the eight visual features (x and y for each point) and the corresponding six velocities that are applied to the robot controller. As a consequence, velocities are high when the error is important, and very low when the error is small near the convergence. At the beginning, we can also notice velocity discontinuities with velocities varying from zero to high values in one iteration. Convergence in 191 iterations with a constant gain. This behavior can be reproduced running tutorial-ibvs-4pts-plotter.cpp example. Here after we recall the important lines of code used to compute the control law: task.setLambda(0.5); // Set the constant gain value for (unsigned int i = 0 ; i < 4 ; i++) { ... } while(1) { for (unsigned int i = 0 ; i < 4 ; i++) { ... vpFeatureBuilder::create(p[i], point[i]); // Update the visual features used in the task } vpColVector v = task.computeControlLaw(); // Compute the control law } As implemented in tutorial-ibvs-4pts-plotter-gain-adaptive.cpp it is possible to adapt the gain in order to depend on the infinity norm of the task Jacobian. The usage of an adaptive gain rather than a constant gain allows to reduce the convergence time. In that case the gain becomes: where , and are constant parameters and is the infinity norm of the task Jacobian to consider. The parameters are not set directly. They are computed from three other parameters that are more intuitive to tune: Here represents the gain when , represents the gain when and represents the slope of when . The impact of the adaptive gain is illustrated in the next figure. During the servo, velocities applied to the controller are higher, especially when the visual error is small. But as in the previous section, using an adaptive gain doesn't insure continuous velocities especially at the first iteration. Convergence in 91 iterations with an adaptive gain. This behavior can be reproduced running tutorial-ibvs-4pts-plotter-gain-adaptive.cpp example. Compared to the previous code given in Introduction and available in tutorial-ibvs-4pts-plotter.cpp, here after we give the new lines of code that were introduced to use an adaptive gain: vpAdaptiveGain lambda(4, 0.4, 30); // lambda(0)=4, lambda(oo)=0.4 and lambda_dot(0)=30 # Continuous sequencing As implemented in tutorial-ibvs-4pts-plotter-continuous-gain-adaptive.cpp it is also possible to ensure continuous sequencing to avoid velocity discontinuities. This behavior is achieved by introducing an additional term to the general form of the control law. This additional term comes from the task sequencing approach described in [21] equation (17). It allows to compute continuous velocities by avoiding abrupt changes in the command. The form of the control law considered here is the following: where : • is the resulting continuous velocity command to apply to the robot controller. • the sign of the control law depends on the eye in hand or eye to hand configuration. • is the Jacobian of the task. • is the error to regulate. • is the time. • is a gain. We recommend to set this value to 4. • is the value of when . The effect of continuous sequencing is illustrated in the next figure where during the first iterations velocities are starting from zero. Convergence in 98 iterations with an adaptive gain and continuous sequencing. This behavior can be reproduced running tutorial-ibvs-4pts-plotter-continuous-gain-adaptive.cpp example. Compared to the previous code given in Using an adaptive gain and available in tutorial-ibvs-4pts-plotter-gain-adaptive.cpp, here after we give the new line of code that were introduced to ensure continuous sequencing:
1,025
4,773
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.609375
3
CC-MAIN-2024-10
latest
en
0.840375
https://www.physicskey.com/41/heat-and-temperature
1,701,356,874,000,000,000
text/html
crawl-data/CC-MAIN-2023-50/segments/1700679100227.61/warc/CC-MAIN-20231130130218-20231130160218-00418.warc.gz
1,037,076,120
6,179
The term heat defines a quantity of energy (this quantity of energy has a special property) transferred and the temperature is generally defined as the sensation of heat, however the definition of temperature based on sensation only is somewhat not a good approach. You can see the direct relationship of the kinetic energy of an ideal gas and the temperature in the kinetic theory of an ideal gas. The quantity of energy as heat has a special property which is it always flows from the hotter region of a body to colder region of the same body or another body in contact. Therefore, the heat has the flowing property - we can not say that a particular quantity of heat is contained in a body but instead the heat is defined as the energy transferred from hotter body or region to colder body or region due to temperature difference. Thus, heat is the energy in transit. You may think the temperature and heat are the same things in everyday language, but it's not true. Since heat is transferred from hotter region towards colder region, you can not say that a particular heat is contained in a body. So consider a particular amount of heat is transferred to a body and all parts of the body reach thermal equilibrium. When you cut the body into halves, both halves will have the same temperature but not the the same amount of heat. When two bodies are in the the same temperature, they are said to be in thermal equilibrium with each other. In Figure 1, if body $A$ is in thermal equilibrium with body $B$ and body $B$ is in thermal equilibrium with body $C$, body $A$ and body $C$ will also be in thermal equilibrium and this is called zeroth law of thermodynamics. ## Measuring Temperature The heat and temperature are entirely different things but we can define the hotness or coldness of a body or material in terms of it's temperature. This is very familiar to you that when a body is heated (supplied heat to it), it's temperature and volume increase. So the temperature of a material can be measured in terms of the expansion of a particular material arranged in a way which can be calibrated on a scale. One example is the liquid thermometer. In liquid thermometer, usually mercury or alcohol are taken inside a tube. When the tube with liquid is placed in thermal contact with a hot body, the liquid level rises in the tube (see Figure 2). The tube is calibrated and you can measure the temperature of the body. ### Celsius and Fahrenheit Temperature Scales The most commonly used temperature scales are Celsius and Fahrenheit temperature scales. We ponder an example of a thermometer shown in Figure 2. In Celsius scale the zero of the thermometer is calibrated at the point where the liquid in the thermometer is in thermal equilibrium with the constant temperature of the mixture of ice and water (freezing point). Similarly, the 100 of the thermometer is the point where the liquid rises in the capillary tube and comes in thermal equilibrium with the temperature of the mixture of water and steam (boiling point). The length between these two points is divided into 100 equal parts and the smallest part is called one degree Celsius written as $1{{\kern 1pt} ^ \circ }{\rm{C}}$. There are 100 degrees between freezing and boiling temperatures of water in Celsius scale. In Fahrenheit scale the temperature of the freezing point of water is $32{{\kern 1pt} ^ \circ }{\rm{F}}$ (thirty-two degrees Fahrenheit) and the temperature of the boiling point is $212{{\kern 1pt} ^ \circ }{\rm{F}}$. Note that in Fahrenheit scale, the smallest value is called one degree Fahrenheit written as $1{{\kern 1pt} ^ \circ }{\rm{F}}$. There are 180 degrees between freezing and boiling temperatures of water in Fahrenheit scale, so you can convert the temperature in Celsius scale into Fahrenheit scale by using the expression below: ${T_F} = 32{}^ \circ {\rm{F}} + \frac{9}{5}{T_C} \tag{1} \label{1}$ where $T_F$ and $T_C$ represent the temperatures in Fahrenheit and Celsius scales respectively. You can also convert the temperature in Fahrenheit scale in Celsius scale by solving for $T_C$ in the above equation: ${T_C} = ({T_F} - 32{}^ \circ {\rm{F)}}\frac{5}{9} \tag{2} \label{2}$ There are different kinds of thermometers which depend on the material used. Material dependent thermometers always have discrepancies. For example, mercury and alcohol have different expansion properties. Therefore, mercury and alcohol thermometers can give the same temperature at the calibration points but give slightly different temperatures at intermediate points. The variations again increase when you measure the temperature beyond the calibration points. All gases behave in the same way and for more precise measurement we need such a scale which is material independent and this can be achieved by a constant volume gas thermometer discussed below. ### Constant Volume Gas Thermometer and Kelvin Scale The constant volume gas thermometer is based on the relationship between pressure and temperature at constant volume; as the temperature increases, the pressure of the gas increases. The constant volume gas thermometer is calibrated by finding the pressure of the gas at the temperature of freezing point of water (mixture of ice and water or ice point) and the pressure at the temperature of boiling point of water (mixture of water and steam or steam point). In Figure 3, the pressures of gas at $0{{\kern 1pt} ^ \circ }{\rm{C}}$ and $100{{\kern 1pt} ^ \circ }{\rm{C}}$ are measured. The point $a$ where pressure is measured at $0{{\kern 1pt} ^ \circ }{\rm{C}}$ is joined with with the point $b$ where pressure is measured at $100{{\kern 1pt} ^ \circ }{\rm{C}}$. The straight line $ab$ is produced backwards as shown by the dashed line in Figure 3 which meets the temperature axis at the temperature of $-273.15{{\kern 1pt} ^ \circ }{\rm{C}}$. At this temperature the pressure of the gas is zero as shown by the graph and the temperature is called absolute zero. The zero pressure is not practically observed. The gases turn into liquids or solids in lower temperatures and the direct relationship between pressure and temperature is no longer valid. The new temperature scale obtained by the graph in Figure 3 with its zero at $-273.15{{\kern 1pt} ^ \circ }{\rm{C}}$ is called Kelvin scale of temperature; it means the zero Kelvin written as 0K not $0{{\kern 1pt} ^ \circ }{\rm{K}}$ is equal to $-273.15{{\kern 1pt} ^ \circ }{\rm{C}}$ and 273.15K is equal to $0{{\kern 1pt} ^ \circ }{\rm{C}}$. Remember that ∘ is not used in Kelvin scale. You can convert any temperature in Celsius scale into Kelvin scale by ${T_K} = 273.15 + {T_C} \tag{3} \label{3}$ where $T_K$ is the temperature in Kelvin scale. The SI unit of temperature is the Kelvin temperature from Kelvin scale. The material dependent temperature scales can only measure the limited range of temperatures such as the mercury thermometer can not measure the temperature below freezing point of mercury. And material dependent thermometers are based on a particular planet or a particular material. The Kelvin scale of temperature can measure all possible temperatures in the universe and is the universal temperature scale.
1,624
7,212
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 2, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.4375
3
CC-MAIN-2023-50
latest
en
0.939951
https://small-s.science/tag/moe/
1,716,581,756,000,000,000
text/html
crawl-data/CC-MAIN-2024-22/segments/1715971058736.10/warc/CC-MAIN-20240524183358-20240524213358-00213.warc.gz
454,140,700
58,118
## Planning with assurance, with assurance Planning for precision requires that we choose a target Margin of Error (MoE; see this post for an introduction to the basic concepts) and a value for assurance, the probability that MoE will not exceed our target MoE.  What your exact target MoE will be depends on your research goals, of course. Cumming and Calin-Jageman (2017, p. 277) propose a strategy for determining target MoE. You can use this strategy if your research goal is to provide strong evidence that the effect size is non-zero. The strategy is to divide the expected value of the difference by two, and to use that result as your target MoE. Let’s restrict our attention to the comparison of two means. If the expected difference between the two means is Cohens’s d = .80, the proposed strategy is to set your target MoE at f = .40, which means that your target MoE is set at .40 standard deviations. If you plan for this value of target MoE with 80% assurance, the recommended sample size is n = 55 participants per group. These results are guaranteed to be true, if it is known for a fact that Cohen’s d is .80 and all statistical assumptions apply. But it is generally not known for a fact that Cohen’s d has a particular value and so we need to answer a non-trivial question: what effect size can we reasonably expect? And, how can we have assurance that the MoE will not exceed half the unknown true effect size? One of the many options we have for answering this question is to conduct a pilot study, estimate the plausible values of the effect size and use these values for sample size planning.  I will describe a strategy that basically mirrors the sample size planning for power approach described by Anderson, Kelley, and Maxwell (2017). The procedure is as follows. In order to plan with approximately 80% assurance, estimate on the basis of your pilot the 80% confidence interval for the population effect size and use half the value of the lower limit for sample size planning with 90% assurance. This will give you 81% assurance that assurance MoE is no larger than half the unknown true effect size. ## The logic of planning with assurance, with assurance There are two “problems” we need to consider when estimating the true effect size. The first problem is that there is at least 50% probability of obtaining an overestimate of the true effect size. If that happens, and we take the point estimate of the effect size as input for sample size planning, what we “believe” to be a sample size sufficient for 80% assurance will be a sample size that has less than 80% assurance at least 50% of the times. So, using the point estimate gives assurance MoE for the unknown effect size with less than 50% assurance. To make it more concrete: suppose the true effect equals .80, and we use n = 25 participants in both groups of the pilot study, the probability is  approximately 50% that the point estimate is above .80. This implies, of course, that we will plan for a value of f > .40, approximately 50% of the times, and so the sample we get will only give us 80% assurance 50% of the times. The second problem is that the small sample sizes we normally use for pilot studies may give highly imprecise estimates. For instance, with n = 25 participants per group, the expected MoE is f = 0.5687. So, even if we accept 50% assurance, it is highly likely that the point estimate is rather imprecise. Since we are considering a pilot study,  one of the obvious solutions, increasing the sample size so that expected MoE is likely to be small, is not really an option. But what we can do is to use an estimate that is unlikely to be an overestimate of the true effect size. In particular, we can use as our estimate the lower limit of a confidence interval for the effect size. Let me explain, by considering the 80% CI  of the effect size estimate. From basic theory it follows that the “true” value of the effect size will be smaller than the lower limit of the 80% confidence interval with probability  equal to 10%. That is, if we calculate a huge number of 80% confidence intervals, each time on the basis of new random samples from the population, the true value of the effect size will be below the lower limit in 10% of the cases. This also means that the lower limit of the interval has 90% probability to not overestimate the true effect size. This means that  if we take the lower limit of the 80% CI of the pilot estimate as input for our sample size calculations, and if we plan with assurance of .90, we will have 90%*90% = 81% assurance that using the sample size we get from our calculations will have  MoE  no larger than half the true effect size. (Note that for 80% CI’s with negative limits you should choose the upper limit). ## Sample Size planning based on a pilot study Student of mine recently did a pilot study.  This was a pilot for an experiment investigating the size of the effect of fluency of delivery of a spoken message in a video on Comprehensibility, Persuasiveness and viewers’ Appreciation of the video. The pilot study used two groups of size n = 10, one group watched the fluent video (without ‘eh’) and the other group watched the disfluent video where the speaker used ‘eh’ a lot. The dependent variables were measured on 7-point scales. Let’s look at the results for the Appreciation variable. The (biased) estimate of Cohen’s d (based on the pooled standard deviation) equals 1.09, 80% CI [0.46, 1.69] (I’ve calculated this using the ci.smd function from the MBESS-package. According to the rules-of-thumb for interpreting Cohen’s d, this can be considered a large effect. (For communication effect studies it can be considered an insanely large effect). However, the CI shows the large imprecision of the result, which is of course what we can expect with sample sizes of n = 10. (Average MoE equals f = 0.95, and according to my rules-of-thumb that is well below what I consider to be borderline precise). If we use the lower limit of the interval (d = 0.46),  sample size planning with 90% assurance for half that effect (f = 0.23) gives us a sample size equal to n = 162. (Technical note: I planned  for the half-width of the standardized CI of the unstandardized effect size, not for the CI of the standardized effect size; I used my Shiny App for planning assuming an independent groups design with two groups).  As explained, since we used the lower limit of the 80% CI of the pilot and used 90% assurance in planning the sample size, the assurance that MoE will not exceed half the unknown true effect size equals 81%. ## A rule of thumb for setting target MOE One of the most difficult aspects of sample size planning for precision is the specification of a target Margin of Error (MoE). Here, I would like to introduce a simple rule of thumb, in the hope that it helps you in determining a reasonable target MoE. Here, the rule of thumb is applied to obtaining an estimate of the difference between two independent group means, where the two populations are normally distributed with equal variances. ## Goal 1: Assessing the direction of an effect Sample size planning starts with formulating a goal for the research. A very common goal is to try to determine the direction of an effect. For the goal of assessing the direction of an effect, it helps if the confidence interval of the difference contains only positive or negative values. That is, you want a confidence interval that exludes the value 0, for if that value is included, you would probably conclude that the estimate is consistent with both positive and negative effects. Thus, our first goal is to obtain a confidence interval of the mean difference that excludes the value 0. Now, a confidence interval excludes 0, if obtained MOE is at most equal to the obtained effect size estimate. Suppose that the estimate equals the true effect of say, 0.50, we want MOE to be at most very close to 0.50, otherwise 0 will be included in the interval. But if our estimate underestimates the true effect, say the estimate equals 0.30, we want MOE to be at most very close to 0.30. Likewise, if we overestimate the effect, MOE can be larger than 0.50. This means that we cannot say, for instance, we expect that the true effect is .50, so let’s plan for a target MOE that with 80% assurance is at most .50, because this target MOE may be too large for underestimates of the true effect, depending on the extent to which the effect is underestimated. So, in specifying target MOE, we should take into account that underestimates of the effect size occur. (Actually, these underestimates occur with a relative frequency of 50% in a huge collection of direct replications). We can say that we do not only want to exclude zero from the interval, but also that we want that to occur in a large proportion of direct replications. This will be our second goal. I will call the probabiity associated with our second goal, the probability of exclusion (PE) The rule of thumb is that if we want 80% probability that a random confidence interval excludes zero, we should plan for an expected MOE equal to f = d / √2. (the square root sign is unreadable in my browser; so in words: the effect size divided by the square root of 2; with mathjax: $f = d / sqrt{2}$). Since there is 50% probability that obtained MOE will be larger than expected MOE, this is equal to planning for target MOE = f = d / √2, with 50% assurance or simply without assurance. You can do this in the ESCI-software, but also with the R-functions provided below. The first example in the code below, is an illustration of planning for assessing the direction of the effect, with true effect size d = .50. If we want 80% assurance to have only positive values in our confidence interval, we should plan for a target MoE = expected MoE = f = d / √2 = 0.3535. Using the SampleSize-function below, this gives a sample size n = 63, or total sample size = N = 2*63 = 126. The probability that the confidence interval excludes 0 equals approximately 80% (p = 0.7951). So, the rule of thumb of planning for d / √2, seems to work pretty good. ## Goal 2: distinguishing between effect sizes If your research goal is to estimate the value of the effect size in stead of its direction, the rule of thumb can be used as follows. Suppose we do not know the true effect size, but want to have 80% assurance that we have a high probability to be able to distinguish between small (d = .20) and large effects (d = .80). That is, if the true effect is .20 we want the value .80 to be excluded from the confidence interval and if the true effect is .80, we want the value .20 to be excluded from the confidence interval. We can proceed as follows, the difference between the effect sizes is .80 – .20 = .60. We use this value to determine target MOE. Thus, if we now plan for a target MoE = expected Moe = d / √2), we should have approximately 80% PE that obtained MoE will exclude 0.80 if the true effect is 0.20 and vice versa. The functions below give sample size n = 44, and the probablity of exclusion equals .7947. So, our rule of thumb, seems to work pretty good again. See example 2 in the code below. Alternatively, we could take the region of practical equivalence (ROPE) into account. Suppose, our equivalence range equals .10 sigma. If we want to have enough precision to distinguish large from small effects, we should plan as follows. We take the difference between a large effect and the upper equivalence value of a small effect or, equivalently, the difference between a small effect and the lower equivalence vaue of a large effect, i.e. .50, and plan for f = .50 / √2. If the effect is large we expect a confidence interval that excludes the equivalence range for the small effect (and vice versa), with 80% probability of exclusion. But we could also take the difference between the lower equivalence value of a large effect and the upper equivalence value of a small effect, i.e. .40, and plan for f = .40/√2. (See the third example in the code below) This will give us 80% PE that any true value within the ROPE of the one effect will exclude values in the ROPE of the other. For example, if the true effect is .70, and expected MOE equals .40/√2 = .2828, there is approximately 80% probability that the 95% CI excludes .30, which is in the ROPE of a small effect. The expected CI will be .70 +/- .2828 = [0.4172, 0.9828]. Note that the lower limit is larger than the upper limit of the ROPE for d = .20, as we want it to be. Note, however, that if the true effect is small (d = .20), the CI will exclude effects equivalent to large effects, which is consistent with our research goal, but it will not exlude the value 0 or effects equivalent to a medium effect. Indeed, the expected CI will be [-0.0828, 0.4828]. (This is not a problem, of course, since this was not the purpose of our research) As a final example, suppose we want sufficient precision to distinguish small from medium effects (or large from medium effects). If we take the ROPE perspective, with an equivalence range of +/- .10 sigma, the lower equivalence value of the medium effect equals .50 – .10 = .40 and the upper limit of the small effect equals .30. If we want 80% assurance that the CI will be small enough to distinguish small from medium effects, we should plan for expected MOE f = (.40 – .30)/√2 = 0.0707. Using the functions below, this requires a sample size n = 1538. (See the final example in the code below). ## Setting target MOE: conclusion In summary, the rule of thumb is to divide the effect size d by √2 and plan for an expected MoE equal to this value. This will give you a sample size that gives approximately 80% assurance that the CI will not contain 0. In the case of distinguishing effect sizes, one option is to divide the difference between the lower equivalence value of the larger effect and the upper equivalence value of the smaller effect by the square root of 2 and plan for an expected MoE equal to this value. This will give you a sample size that gives approximately 80% PE that the CI of the estimated true value of one effect excludes the values in the ROPE of the other effect. Do you want at least 90% PE? Use the square root of three, in stead of the square root of two, in determining target MoE. eMoe = function(n) { eMoe = qt(.975, 2*(n - 1))*sqrt(2/n) return(eMoe) } cost <- function(n, tMoe) { (eMoe(n) - tMoe)^2 } sampleSize <- function(tMoe) { optimize(cost, interval=c(10, 5000), tMoe = tMoe)$minimum } # FIRST EXAMPLE # plan for 80% assurance of excluding 0 # i.e. estimate the direction if true effect # equals .50 d = .50 #application of rule of thumb: f = .50 / sqrt(2) #sampleSize (uses ceiling() to round up): n = ceiling(sampleSize(f)) n ## [1] 63 # Probabiity of Exclusion (here taken to be equivalent to # power for two-sided t-test (since true direction is unknown)) df = 2*(n - 1) ncp = f / sqrt(1/n) #or ncp = d / sqrt(2/n) pt(qt(.025, df), df, ncp) + 1 - pt(qt(.975, df), df, ncp) ## [1] 0.7951683 # SECOND EXAMPLE: # distinguish between small and large effect sizes: d = .80 - .20 f = d / sqrt(2) n = ceiling(sampleSize(f)) n ## [1] 44 df = 2*(n - 1) ncp = f / sqrt(1/n) #or ncp = d / sqrt(2/n) #PE: pt(qt(.025, df), df, ncp) + 1 - pt(qt(.975, df), df, ncp) ## [1] 0.79467 # EXAMPLE 3: distinguish small and large with ROPE # ROPE small and large: rope.small = c(.10, .30) rope.large = c(.70, .90) d = rope.large[1] - rope.small[2] f = d / sqrt(2) n = ceiling(sampleSize(f)) n ## [1] 98 df = 2*(n - 1) ncp = f / sqrt(1/n) #or ncp = d / sqrt(2/n) #PE: pt(qt(.025, df), df, ncp) + 1 - pt(qt(.975, df), df, ncp) ## [1] 0.7956414 # Example 4: distinguish medium from small # or medium from large with ROPE rope.medium = c(.40, .60) d = rope.medium[1] - rope.small[2] f = d / sqrt(2) n = ceiling(sampleSize(f)) n ## [1] 1538 df = 2*(n - 1) ncp = f / sqrt(1/n) #or ncp = d / sqrt(2/n) #PE: pt(qt(.025, df), df, ncp) + 1 - pt(qt(.975, df), df, ncp) ## [1] 0.7916783 ## Sample size planning for precision: the basics In this post, I will introduce some of the ideas underlying sample size planning for precision. The ideas are illustrated with a shiny-application which can be found here: https://gmulder.shinyapps.io/PlanningApp/. The app illustrates the basic theory considering sample size planning for two independent groups. (If the app is no longer available (my allotted active monthly hours are limited on shinyapps.io), contact me and I’ll send you the code). ### The basic idea The basic idea is that we are planning an experiment to estimate the difference in population means of an experimental and a control group. We want to know how many observations per group we have to make in order to estimate the difference between the means with a given target precision. Our measure of precision is the Margin of Error (MOE). In the app, we specify our target MOE as a fraction (f) of the population standard deviation. However, we do not only specify our target MOE, but also our desired level of assurance. The assurance is the probability that our obtained MOE will not exceed our target MOE. Thus, if the assurance is .80 and our target MOE is f = .50, we have a probability of 80% that our obtained MOE will not exceed f = .50. The only part of the app you need for sample size planning is the “Sample size planning”-form. Specify f, and the assurance, and the app will give you the desired sample size. If you do that with the default values f = .50 and Assurance = .80, the app will give you the following results on the Planning Results-tab: Sample Size: 36.2175, Expected MOE (f): 0.46. This tells you that you need to sample 37 participants (for instance) per group and then the Expected MOE (the MOE you will get on average) will equal 0.46 (or even a little less, since you sample more than 36.2175 participants). The Planning-Results-tab also gives you a figure for the power of the t-test, testing the NHST nil-hypothesis for the effect size (Cohen’s d) specified in the “Set population values”-form. Note that this form, like the rest of the app provides details that are not necessary for sample size planning for precision, but make the theoretical concepts clear. So, let’s turn to those details. ### The population Even though it is not at all necessary to specify the population values in detail, considering the population helps to realize the following. The sample size calculations and the figures for expected MOE and power, are based on the assumption that we are dealing with random samples from normal populations with equal variances (standard deviations). From these three assumptions, all the results follow deductively. The following is important to realize: if these assumptions do not obtain, the truth of the (statistical) conclusions we derive by deduction is no longer guaranteed. (Maybe you have never before realized that sample size planning involves deductive reasoning; deductive reasoning is also required for the calculation of p-values and to prove that 95% confidence intervals contain the value of the population parameter in 95% of the cases; without these assumptions is it uncertain what the true p-value is and whether or not the 95% confidence interval is in fact a 95% confidence interval). In general, then, you should try to show (to others, if not to yourself) that it is reasonable to assume normally distributed populations, with equal variances and random sampling, before you decide that the p-value of your t-test, the width of your confidence interval, and the results of sample size calculations are believable. The populations in the app are normal distributions. By default, the app shows two such distributions. One of the distributions, the one I like to think about as corresponding to the control condition, has μ = 0, the other one has μ = 0.5. Both distributions have a standard deviation (σ = 1). The standardized difference between the means is therefore equal to δ = 0.50. The default populations are presented in Figure 1 below. Figure 1: Two normal distributions. The distribution to the left has μ = 0, the one to the right has μ = 0.5 The standard deviation in both distributions equals σ = 1. The standardized difference δ and the unstandardized difference between the means both equal 0.50. ### The sampling distribution of the mean difference The other default setting in the app is a sample size (per group) of n = 20. From the sample size and the specification of the populations, we can deduce the probability density of the different values of the estimates of the difference between the population means. The estimate is simply the difference between the sample means. This so-called sampling distribution of the mean difference is depicted on the tab next to the population. Figure 2 shows what the sampling distribution looks like if we repeatedly draw random samples of size n = 20 per group from our populations and keep track of the difference between the sample means we get in each repetition. Figure 2: Sampling distribution of the difference between two sample means based on samples of n = 20 per group and random sampling from the populations described in Figure 1. Note that the mean of the sampling distribution equals 0.5 (as indicated by the middle vertical line). This is of course the (default) difference between the population means in the app. So, on average, estimates of the population difference equal the population difference. The lines to the left and the right of the mean indicate the mean plus or minus the Margin of Error (MOE). The values corresponding to the lines are 0.5 ± MOE. 95% of estimates of the population mean difference have a value between these lines. Conceptually, the purpose of planning for precision is to decrease the (horizontal) distance between these lines and the population mean difference. In other words, we would like the left and right lines as close to the mean of the distribution as is practically acceptable and possible. ### The distribution of the t-statistic The tab next to the sampling distribution tab contains a figure representing the sampling distribution of the t-statistic. The sampling distribution of t can be deduced on the basis of the population values and the sample size. In the app, it is assumed that t is calculated under the assumption that the null-hypothesis of zero difference between the means is true. The sampling distribution of t is what you get if you repeatedly sample from the populations as specified, calculate the t-statistic and keep a record of the values of the t-statistic. The sampling distribution of the t-statistic presented in Figure 3 contains two vertical lines. These lines are located (horizontally) on the value of t that would lead to rejection of the null-hypothesis of equal population means. In other words, the lines are located at the critical value of t (for a two-tailed test). Figure 3: Distribution of the t-statistic testing the null-hypothesis of equal population means. The distribution is based on sampling from the populations described in Figure 3. The sample size is n = 20 per group. The lines represent the critical value of t for a two sided t-test. The area between the vertical lines is the probability of a type II error. The combined areas to the left of the left line and to the right of the right line is the power of the test. The area between the lines is the probability that the null-hypothesis will not be rejected. In the case of a true population mean difference (which is the default assumption in the app), that probability is the probability of an error of the second kind: a type II error. The complement of that probability is called the power of the test. This is, of course, the area to the left of the left vertical line added to the area to the right of the right vertical line. Conceptually, the power of the test is the probability of rejecting the null-hypothesis when in fact it is false. Figure 3 clearly demonstrates that if the true mean difference equals 0.50 and the sample size (per group) equals n = 20, that there is a large probability that the null-hypothesis will not be rejected. Actually, the probability of a type II error equals .66. (So, the power of the test is .34). ### Sample size planning for precision With respect to sample size planning for precision, the app by default takes half of a standard deviation (f = .50) as the target MOE. Besides, planning is with 80% assurance. This means that the default settings search for a sample size (per group), so that with 80% probability MOE will not exceed 0.50 (Note that the default value of the standard deviation is 1, so an f of .50 corresponds to a target MOE of 0.50 on the scale of the data; Likewise, were the standard deviation equal to 2, an f of .50 would correspond to a target MOE of 1.0). As described above, planning with the default values gives us a sample size of n = 37 per group, with an expected MOE of 0.46. In the tab next to the planning results, a figure displays what you can expect to find on average, given the planned sample size and the specification of the population. That figure is repeated here as Figure 4. Figure 4: Expected results in terms of point and interval estimates (95% confidence intervals). This is what you will find on average given the population specification in Figure 1 and using the default values for sample size planning. Figure 4 displays point and interval estimates of the group means and the difference between the means. The interval estimates are 95% confidence intervals. The figure clearly shows that on average, our estimate of the difference is very imprecise. That is, the expected 95% confidence interval ranges from almost 0 (0.50 – 0.46 = 0.04) to almost 1 (0.50 + 0.46 = 0.96). Of course, using n = 20, would be worse still. A nice thing about the app (well, I for one think it’s pretty cool) is that as soon as you ask for the sample sizes, the sample size in the set population values form is automatically updated. Most importantly, this will also update the sampling distribution graphs of the difference between the means and the t-statistic. So, it provides an excellent way of showing what the updated sample size means in terms of MOE and the power of the t-test. Let’s have a look at the sampling distribution of the mean difference, see Figure 5. Figure 5: Sampling distribution of the mean difference with n = 37 per group. Compare with Figure 2 to see the (small) difference in the Margin of Error compared to n = 20. If you compare Figures 5 and 2, you see that the vertical lines corresponding to the mean plus and minus MOE have shifted somewhat towards the mean. So here you can see, that almost doubling the sample size (from 20 to 37) had the desired effect of making MOE smaller. I would like to point out the similarity between the sampling distribution of the difference and the expected results plot in Figure 4. If you look at the expected results for our estimate of the population difference, you see that the point estimate corresponds to the mean of the sampling distribution, which is of course equal to the populations mean difference and that the limits of the expected confidence interval correspond to the left and right vertical lines in Figure 5. Thus, on average the limits of the confidence interval correspond to the values that mark the middle 95% of the sampling distribution of the samples mean difference. Since we specified an assurance of 80%, there is an 80% probability that in repeated sampling from the populations (see Figure 1) with n = 37 per group, our (estimated) MOE will not exceed half a standard deviation. Thus, whatever the true value of the populations mean difference is, there is a high probability that our estimate will not be more than half a standard deviation away from the mean. This is, I think, one of the major advantages of sample size planning for precision: we do not have to specify the unknown population mean difference. This is in contrast to sample size planning for power, where we do have to specify a specific population mean difference. Speaking of power, the results of the sample size planning suggest that for our specification of the populations mean difference (Cohen’s delta = 0.50) the power of the test equals 0.56. Thus, there is a probability of 56% that with n = 37 per group the t-test will reject. The probability of a type II error is therefore 44%. Figure 6 shows the distribution of the t statistic with n = 37 per group and a standardized effect size of 0.50. Figure 6. The distribution of the t-statistic testing the null-hypothesis of equal population means. The distribution is based on the population specification in Figure 1 and sample sizes of n = 37 per group, with true effect size equal to 0.50. The probability of a type II error is the area of under the curve between the two vertical lines. The power is the area under the curve beyond the two lines. Compare with Figure 3 to see the differences in these probabilities compared to n = 20. ### Power versus precision Now suppose that the unstandardized mean difference between the population means equals 2 and that the standard deviation equals 2.5. I just filled in the set population values form, setting the mean of population 2 to 2.0 and the standard deviation to 2.5. And I clicked set values. Let us plan for a target MOE of f = 0.5 standard deviations with 80% assurance. Click get sample sizes in the sample size planning form. In this case, target MOE equals 1.25. The results are not very surprising. Since the f did not change compared to the previous time, the results as regards the sample size are exactly the same. We need n = 37. Again, this is what I like about sample size planning, no matter what the unknown situation in the population is, I just want my margin of error to be no more than half a standard deviation (for example). But the power did change (of course). Since the standardized population mean difference is now 0.80 (= 2.0 / 2.5) in stead of 0.50, and all the other specifications remained the same, the power increases from 56% to 92%. That’s great. However, the high probability of rejecting the null-hypothesis does not mean that we get precise estimates. On average, the point estimate of the difference equals 2 and the 95% confidence limits are 0.85 and 3.15 (the point estimate plus or minus 0.46 times the standard deviation of 2.5). See Figure 7. Figure 7: Expected results using n = 37 when sampling from two normal populations with equal standard deviations (σ = 2.5) and mean difference of 2.0. The standardized effect size equals 0.80. Note the imprecision of the estimates even though the power of the t-test equals .92. In short, even though there is a high probability of (correctly) rejecting the null-hypothesis of equal population means, we are still not in the position to confidently conclude what the size of the difference is: the expected confidence interval is very wide. ## Planning for a precise slope estimate in simple regression In this post, I will show you a way of determining a sample size for obtaining a precise estimate of the slope of the simple linear regression equation . The basic ingredients we need for sample size planning are a measure of the precision, a way to determine the quantiles of the sampling distribution of our measure of precision, and a way to calculate sample sizes. As our measure of precision we choose the Margin of Error (MOE), which is the half-width of the 95% confidence interval of our estimate (see: Cumming, 2012; Cumming & Calin-Jageman, 2017; see also www.thenewstatistics.com). ### The distribution of the margin of error of the regression slope In the case of simple linear regression, assuming normality and homogeneity of variance, MOE is , where , is the .975 quantile of the central t-distribution with degrees of freedom, and is the standard error of the estimate of . An expression of the squared standard error of the estimate of is (Wilcox, 2017): the variance of Y given X divided by the sum of squared errors of X. The variance equals , the variance of Y multiplied by 1 minus the squared population correlation between Y and X, and it is estimated with the residual variance , where . The estimated squared standard error is given in (1) (1) With respect to the sampling distribution of MOE, we first note the following. The distribution of estimates of the residual variance in the numerator of (1) is a scaled -distribution: thus Second, we note that where , therefore Alternatively, since , and multiplying by 1 (). In terms of the sampling distribution of (1), then, we have the ratio of two (scaled) distributions, one with degrees of freedom, and one with degrees of freedom. Or something like: which means that the sampling distribution of MOE is: (2) This last equation, that is (2), can be used to obtain quantiles of the sampling distribution of MOE, which enables us to determine assurance MOE, that is the value of MOE that under repeated sampling will not exceed a target value with a given probability. For instance, if we want to know the .80 quantile of estimates of MOE, that is, assurance is .80, we determine the .80 quantile of the (central) F-distribution with N – 2 and N – 1 degrees of freedom and fill in (2) to obtain a value of MOE that will not be exceeded in 80% of replication experiments. For instance, suppose , , , , and assurance is .80, then according to (2), 80% of estimated MOEs will not exceed the value given by: vary = 1 varx = 1 rho = .5 N = 100 dfe = N - 2 dfx - N - 1 assu = .80 t = qt(.975, dfe) MOE.80 = t*sqrt(vary*(1 - rho^2)*qf(.80, dfe, dfx)/(dfx*varx)) MOE.80 ## [1] 0.1880535 ### What does a quick simulation study tell us? A quick simulation study may be used to check whether this is at all accurate. And, yes, the estimated quantile from the simulation study is pretty close to what we would expect based on (2). If you run the code below, the estimate equals 0.1878628. library(MASS) set.seed(355) m = c(0, 0) # note: s below is the variance-covariance matrix. In this case, # rho and the cov(y, x) have the same values # otherwise: rho = cov(x, y)/sqrt(varY*VarX) (to be used in the # functions that calculate MOE) # equivalently, cov(x, y) = rho*sqrt(varY*varX) (to be used # in the specification of the variance-covariance matrix for #generating bivariate normal variates) s = matrix(c(1, .5, .5, 1), 2, 2) se <- rep(10000, 0) for (i in 1:10000) { theData <- mvrnorm(100, m, s) mod <- lm(theData[,1] ~ theData[,2]) se[i] <- summary(mod)$coefficients[4] } MOE = qt(.975, 98)*se quantile(MOE, .80) ## 80% ## 0.1878628 ### Planning for precision If we want to plan for precision we can do the following. We start by making a function that calculates the assurance quantile of the sampling distribution of MOE described in (2). Then we formulate a  squared cost function, which we will optimize for the sample sizeusing the optimize function in R. Suppose we want to plan for a target MOE of .10 with 80% assurance.We may do the following. vary = 1 varx = 1 rho = .5 assu = .80 tMOE = .10 MOE.assu = function(n, vary, varx, rho, assu) { varY.X = vary*(1 - rho^2) dfe = n - 2 dfx = n - 1 t = qt(.975, dfe) q.assu = qf(assu, dfe, dfx) MOE = t*sqrt(varY.X*q.assu/(dfx * varx)) return(MOE) } cost = function(x, tMOE) { cost = (MOE.assu(x, vary=vary, varx=varx, rho=rho, assu=assu) - tMOE)^2 } #note samplesize is at least 40, at most 5000. #note that since we already know that N = 100 is not enough #in stead of 40 we might just as well set N = 100 at the lower #limit of the interval (samplesize = ceiling(optimize(cost, interval=c(40, 5000), tMOE = tMOE)$minimum)) ## [1] 321 #check the result: MOE.assu(samplesize, vary, varx, rho, assu) ## [1] 0.09984381 ### Let’s simulate with the proposed sample size Let’s check it with a simulation study. The value of estimated .80 of estimates of MOE is 0.1007269 (if you run the below code with random seed 335), which is pretty close to what we would expect based on (2). set.seed(355) m = c(0, 0) # note: s below is the variance-covariance matrix. In this case, # rho and the cov(y, x) have the same values # otherwise: rho = cov(x, y)/sqrt(varY*VarX) (to be used in the # functions that calculate MOE) # equivalently, cov(x, y) = rho*sqrt(varY*varX) (to be used # in the specification of the variance-covariance matrix for # generating bivariate normal variates) s = matrix(c(1, .5, .5, 1), 2, 2) se <- rep(10000, 0) samplesize = 321 for (i in 1:10000) { theData <- mvrnorm(samplesize, m, s) mod <- lm(theData[,1] ~ theData[,2]) se[i] <- summary(mod)$coefficients[4] } MOE = qt(.975, 98)*se quantile(MOE, .80) ## 80% ## 0.1007269 ### References Cumming, G. (2012). Understanding the New Statistics. Effect Sizes, Confidence Intervals, and Meta-Analysis. New York: Routledge Cumming, G., & Calin-Jageman, R. (2017). Introduction to the New Statistics: Estimation, Open Science, and Beyond. New York: Routledge. Wilcox, R. (2017). Understanding and Applying Basic Statistical Methods using R. Hoboken, New Jersey: John Wiley and Sons. ## Planning for a precise interaction contrast estimate In my previous post (here),  I wrote about obtaining a confidence interval for the estimate of an interaction contrast. I demonstrated, for a simple two-way independent factorial design, how to obtain a confidence interval by making use of the information in an ANOVA source table and estimates of the marginal means and how a custom contrast estimate can be obtained with SPSS. One of the results of the analysis in the previous post was that the 95% confidence interval for the interaction was very wide. The estimate was .77, 95% CI [0.04, 1.49]. Suppose that it is theoretically or practically important to know the value of the contrast to a more precise degree.  (I.e. some researchers will be content that the CI allows for a directional qualitative interpretation: there seems to exist a positive interaction effect, but others, more interested in the quantitative questions may not be so easily satisfied).  Let’s see how we can plan the research to obtain a more precise estimate. In other words, let’s plan for precision. Of course, there are several ways in which the precision of the estimate can be increased. For instance, by using measurement procedures that are designed to obtain reliable data, we could change the experimental design, for example switching to a repeated measures (crossed) design, and/or increase the number of observations. An example of the latter would be to increase the number of participants and/or the number of observations per participant.  We will only consider the option of increasing the number of participants, and keep the independent factorial design, although in reality we would of course also strive for a measurement instrument that generally gives us highly reliable data. (By the way, it is possible to use my Precision application to investigate the effects of changing the experimental design on the expected precision of contrast estimates in studies with 1 fixed factor and 2 random factors). The plan for the rest of this post is as follows. We will focus on getting a short confidence interval for our interaction estimate, and we will do that by considering the half-width of the interval, the Margin of Error (MOE). First we will try to find a sample size that gives us an expected MOE (in repeated replication of the experiment with new random samples) no more than a target MOE. Second, we will try to find a sample size that gives a MOE smaller than or equal to our target MOE in a specifiable percentage (say, 80% or 90%) of replication experiments. The latter approach is called planning with assurance. Let us get back to some of the SPSS output we considered in the previous post to get the ingredients we need for sample size planning. First, the ANOVA table. Table 1. ANOVA source table We are interested in estimating and optimizing the precision of an interaction contrast estimate. The first things we need are an expression of the error variance needed to calculate the standard error of the estimate and the degrees of freedom that were used in estimating the error variance. In general, the error variance needed is the same error variance you would use in performing an F-test for the specific effect, in this case the interaction effect. Thus, we note the error variance used to test the interaction effect, i.e. mean square error, and the degrees of freedom. The value of mean square error is 3.324, and the degrees of freedom are 389. Note that this value is the total sample sizes minus the number of conditions (393 – 4 = 389), or, equivalently, the total sample sizes minus the degrees of freedom of the intercept, the main effects, and the interaction (393 – (1 + 1 + 1 + 1) = 389).  I will call these degrees of freedom the error degrees of freedom, dfe. MOE can be obtained by multiplying a critical t-value with the same degrees of freedom as the error degrees of freedom with the standard error of the estimate. The standard error of the contrast estimate is where is the contrast weight for the i-th condition mean, and the number of observations (in our example participants) in treatment condition i.  Note that is the variance of  treatment mean i, the square root of which gives the familiar standard error of the mean. The contrast weights we used to estimate the 2 x 2 interaction were {-1, 1, 1, -1}. So, the expression for MOE becomes Thus, suppose we have the independent 2×2 factorial design, , and the true value of Mean Square Error is 3.324, then MOE for the contrast estimate equals . Note that this is the value of MOE we obtain on average in repeated replications with new samples, if we use sample sizes of 100 (total number of participants is 400) and if the true value of the error variance is 3.324.  The value is close to the value we obtained in the previous post (MOE = 0.72) because the sample sizes were very close to 100 per group. Now, we found the original confidence interval too wide, and we have just seen how 100 participants per group does not really help. MOE is only slightly smaller than our originally obtained MOE. We need to set a target MOE and then figure out how many participants we need to get that target MOE. #### Intermezzo: Rules of thumb for target MOE (Here are some updated rules of thumb: https://the-small-s-scientist.blogspot.com/2018/11/contrast-tutorial.html) In the absence of theoretical or practical considerations about the precision we want, we may want to use rules of thumb. My (very first proposal for) rules of thumb are based on the default interpretations of Cohen’s d. Considering the absolute values of d ≤ .10 to be negligible d = .20 small, d = .50 medium and d = .80 large. (I really do not like rules-of-thumb, because using them is a sign that you are not thinking). Now, suppose that we interpret the confidence interval as a range of plausible values for the true value of the effect size. It is not at all clear to me what such a supposition entails, but let’s simply take it for granted right now (please don’t). Then, I think it is reasonable to say that being able to distinguish between small and negligible effects sizes is relatively precise. Thus a MOE of .05 (pooled) standard deviations  can be considered precise because (on average) the 95% CI for the small effect sizes is [.15, .25], assuming we know the value of the standard deviation, so negligible effects will not be deemed plausible values on average, since effect sizes smaller than .10 are outside the interval. By essentially the same reasoning. if we cannot distinguish between large and negligible effects, we are not estimating things very precisely. Therefore, a MOE of .80 standard deviations can be considered to be not very precise. On average, the CI for an existing large effect, will be [0,  1.60], so it includes both negligible and very large effects as plausible values. For medium (does it make sense to speak of medium precision?) precision I would like to suggest .20-.25 standard deviations. On average, with this value for MOE, if there is a medium effect, small effects and large effects are relatively implausible.  In the case of small effects, medium precision entails that on average both effects in the opposite direction and medium effects are among the plausible values. Of course, I am interpreting the d-values as strict boundaries, but the scale is not categorical, but continuous. So instead of small, large effect sizes, it’s better to speak of smallish and largish effect sizes. And as soon as I find a variant for medium effects sizes I will also include that term in the list. Note: sample size planning may indicate that precision of MOE = .20-.25 standard deviations is unattainable. In that case, we will simply have to accept that our precision does not lead to confident conclusions about the population effect size. (Once I showed one of my colleagues my precision app, during which he said: “that amount of precision requires a very large sample. I do not like your ideas about sample size planning”). (By the way, I am also considering rules-of-thumb for target MOE that include assurance. Something like: high precision is when repeated experiments have a high probability of distinguishing small and negligible effects; in that case the average MOE will be smaller than .05). #### Planning for precision Let’s plan for a precision of 0.25 standard deviation. In our case, that standard deviation is the pooled standard deviation: the square root of Mean Square Error. The (estimated) value of  Mean Square Error is 3.324 (see Table 1), so our value for the standard deviation is 1.8232.  Our target MOE is, therefore, 0.4558. Let’s make things very clear. Here we are planning for a target MOE based on an estimate of the pooled standard deviation (and on assumptions about the population distribution). In order for our planning to be of practical value, we need some reassurance that that estimate is trustworthy. One way of doing that is to consider the CI for the standard deviation. I will not discuss that topic, and simply give you a CI: [2.90,  3.86]. Take a look at the expression for MOE. where , since we are considering the 2×2 design. Since our target MOE equals .4588, our goal becomes to solve the following equation for , since we want the sample size: However, because determines both the standard error and the degrees of freedom (and thereby the critical value of t), the equation may be a little hard to solve.  So, I will create a function in R that enables me to quite easily get the required sample size. (It is relatively easy to create a more general function (see the Precision App), but here I will give an example tailored to the specific situation at hand). First we create a function to calculate MOE: MOE = function(n) { MOE = 2*qt(.975, 4*(n - 1))*sqrt(3.324/n) } Next, we will define a loss function and use R’s built-in optimize function to determine the sample size. Note that the loss-function calculates the squared difference between MOE based on a sample size n and our target MOE. The optimize function minimizes that squared difference in terms of sample size n (starting with n = 100 and stopping at n = 1000). loss <- function(n) { (MOE(n) - 0.4558)^2 } optimize(loss, c(100, 1000)) ## $minimum ## [1] 246.4563 ## ##$objective ## [1] 8.591375e-18 Thus, according to the optimize function we need 247 participants (per group; total N = 988), to get an expected MOE equal to our target MOE. The expected MOE equals 0.4553, which you can confirm by using the MOE function we made above. #### Planning with assurance Although expected MOE is close to our target MOE, there is a probability 50% that the obtained MOE will be larger than our target MOE.  In other words, repeated sampling will lead to obtained MOEs larger than what we want. That is to say, we have 50% assurance that our obtained MOE will be at least as small as our target MOE. Planning with assurance means that we aim for a certain specified assurance that our obtained MOE will not exceed our target MOE. For instance, we may want to have 80% assurance that our obtained MOE will not exceed our target MOE. Basically, what we need to do is take the sampling distribution of the estimate of  Mean Square Error into account. We use the following formula (see also my post introducing the Precision App for the general formulae: https://the-small-s-scientist.blogspot.nl/2017/04/planning-for-precision.html). where is the assurance expressed in a probability between 0 and 1. Let’s do it in R. Again, the function that calculates assurance MOE is  tailored for the specific situation, but it is relatively easy to formulate these functions in a generally applicable way, MOE.gamma = function(n) { df = 4*(n-1) MOE = 2*qt(.975, df)*sqrt(3.324/n*qchisq(.80, df)/df) } loss <- function(n) { (MOE.gamma(n) - 0.4558)^2 } optimize(loss, c(100, 1000)) ## $minimum ## [1] 255.576 ## ##$objective ## [1] 2.900716e-18 Thus, according to the results, we need 256 persons per group (N = 1024 in total) to have a 80% probability of obtaining a MOE not larger than our target MOE. In that case, our expected MOE will be 0.4472. ## Planning for Precision: simulation results for four designs with four conditions This is the third post about the Planning for Precision app (in the future I’ll explain the difference between Planning for Precision and Precision for Planning). Some background information about the application can be found here: http://the-small-s-scientist.blogspot.com/2017/04/planning-for-precision.html. In this post, I want to present the simulation results for 4 designs with 4 conditions. The designs are: the counter balanced design (see previous post), the fully-crossed design, the stimulus-within-condition design, and the stimulus-and-participant-within-condition design (the both-within-condition design). I have not included the participants-within-condition design, because this is simply the mirror-image (so to say) of the stimulus-within-condition design. In one of my next posts, I will describe some more background information about planning for precision, but some of the basics are as follows. We have a design with 4 treatment conditions, and what we want do is to estimate differences between these condition means by using contrasts. For instance, we may be interested in the (amount of) difference between the first mean, maybe because it is a control-condition with the average of the other three conditions: μ1 – (μ2 + μ3 + μ4)/3 =  1*μ1 – 1/3*μ2 -1/3*μ3 – 1/3*μ4.  The values {1, -1/3, -1/3, -1/3} are the contrast weights, and for the result we use the term ψ. The value of ψ is estimated on the basis of estimates of the population means, that is, the sample means or condition means. Due to sampling error, the contrast estimate varies from sample to sample and the amount of sampling error can be expressed by means of a confidence interval. Conceptually, the confidence interval expresses the precision of the estimate: the wider the confidence interval, the less precise the estimate is. The Margin of Error (MOE) of an estimate is the half-width of the confidence interval, so the confidence interval is the estimate plus or minus MOE. We will take MOE as an expression of the precision of the estimate (the less the value of MOE the more precise the estimate).  Now, if you want to estimate an effect size, more precision (lower value of MOE; less wide confidence interval) is better than less precision (higher value of MOE; wider confidence interval).  The app let’s you specify the design and the contrast weights and helps you find the minimum required sample sizes (for participants and stimuli) for a given target MOE. (You can also play with the designs to see which design gives you smallest expected MOE). Crucially, if you plan for precision, you also want to have some assurance that the MOE you are likely to obtain in you actual experiment will not be larger than you target MOE. Compare this with power: 80% power means that the probability that you will reject the null-hypothesis is 80%. Likewise, assurance MOE of 80% means that there is an 80% probability that your obtained MOE will be no larger than assurance MOE. The simulations (with N = 10000 replications) estimate Expected MOE as well as Assurance MOE for assurances of .80, .90, .95, and .99, for 4 designs with 4 treatment conditions, with a total number of 48 participants and 24 stimuli (items).  The MOEs are given for three standard constrasts: 1) the difference between the first mean and the mean of the other three, with weights {1, -1/3, -1/3, -1/3}; 2) the difference between the second mean and the mean of conditions three and four, with weights {0, 1, -1/2, -1/2}; 3) the difference between the third and fourth condition means, with weights {0, 0, 1, -1}. I will present the results in  separate tables for the 4 designs considered and include percentage difference between expected values of assurance MOE and the estimated values estimated values. ### The fully crossed design The results are in the following table. The percentage difference between the expected quantiles (= assurance MOEs for given insurance;  i.e. q.80 is expected or estimated  80% Assurance MOE) and the estimated quantiles are: .80: 0.11%; .90: 0.05%; .95: -0.14%; 99: -0.05%. ### The counter balanced design The results are presented in the following table. The percentage difference between the expected quantiles and the estimated quantiles are: .80: 0.03%; .90: 0.13%;  .95: 0.09%, .99: -0.23%. ### The stimulus-within-condition design The following table contains the details. The percentage difference between the expected quantiles and the estimated quantiles are: .80: -0.11%; .90: -0.33%;  .95: -0.55%, .99: -0.70%. ### Both-participant-and-stimulus-within-condition design Here is the table. And the percentage differences are: .80: -0.34%; .90: -0.59%;  .95: -0.82%;  .99: -1.06%. ### Conclusion The results show that the simulation results are quite consistent with the expected values based on mixed model ANOVA. We can see that the differences between expected and estimated values increase the less the number of participants and items per condition. For instance, in the both within condition design 12 participants respond to 6 stimuli in one of the four treatment conditions. The fact that even with these small samples sizes the results seem to agree to an acceptable degree is (to my mind) encouraging. Note that with small samples the expected assurance MOES are slightly lower than the estimates, but the largest difference is -1.06% (see the MOE for 99% assurance). ## Planning for Precision: first simulation results In this post, I want to share the results of the first simulation study to “test” my Planning for Precision app. More details about the app can be found in a previous post: here. I have included the basic logic of the simulations (including R code) in a document that you can download: https://drive.google.com/open?id=0B4k88F8PMfAhSlNteldYRWFrQTg. The simulation study simulates responses from a four condition counter balanced design, with p = 48 participants and q = 24 stimuli/items. Here, we will focus on expected and assurance MOE for three contrasts. The first contrast estimates the difference between the first mean and the average of the other three, the second contrast the difference between the second mean and the average of the third and fourth means, and the final contrast the difference between the means of the third and fourth contrasts. Expected MOE is compared to the mean of the estimated MOE for each of the contrasts (based on 10000 replications). Assurance MOE is judged for assurance of .80, .90, .95 and .99, by comparing the calculations in the app with the corresponding quantile estimates of the simulated distributions. ### Results Note that in the above table, the Expected Mean MOE is what I have called Expected MOE, and the q.80 through q.99 are quantiles of the distribution of MOE. As an example, q.80 is the quantile corresponding to assurance MOE with 80% assurance, Expected q.80 is the value of assurance MOE calculated with the theoretical approach, and Estimated q.80 is the estimated quantile based on the simulation studies. Importantly, we can see that most of the figures agree to a satisfying degree. If we look at the relative differences, expressed in percentages for the assurance MOEs, we get 0.0325% for q.80,  0.1260% for q.90, 0.0933% for q.95, and  -0.2324% for q.99. ### Conclusion The first simulation results seem promising. But I still have a lot of work to do for the rest of the designs. ## Planning for precision with samples of participants and items Many experiments involve the (quasi-)random selection of both participants and items. Westfall et al. (2014) provide a Shiny-app for power-calculations for five different experimental designs with selections of participants and items. Here I want to present my own Shiny-app for planning for precision of contrast estimates (for the comparison of up to four groups) in these experimental designs.  The app can be found here: https://gmulder.shinyapps.io/precision/ (Note: I have taken the code of Westfall’s app and added code or modified existing code to get precision estimates in stead of power; so, without Westfall’s app, my own modified version would never have existed). The plan for this post is as follows. I will present the general theoretical background (mixed model ANOVA combined with ideas from Generalizability Theory) by considering comparing three groups in a counter balanced design. Note 1: This post uses mathjax, so it’s probably unreadable on mobile devices. Note: a (tidied up) version (pdf) of this post can be downloaded here: download the pdf Note 2: For simulation studies testing the procedure go here: https://the-small-s-scientist.blogspot.nl/2017/05/planning-for-precision-simulation.html Note 3: I use the terms stimulus and item interchangeably; have to correct this to make things more readable and comparable to Westfall et al. (2014). Note 4: If you do not like the technical details you can skip to an illustration of the app at the end of the post. ## The general idea The focus of planning for precision is to try to minimize the half-width of a 95%-confidence interval for a comparison of means (in our case). Following Cumming’s (2012) terminology I will call this half-width the Margin of Error (MOE). The actual purpose of the app is to find required sample sizes for participants and items that have a high probability (‘assurance’) of obtaining a MOE of some pre-specified value. ## Expected MOE for a contrast For a contrast estimate   we have the following expression for the expected MOE. where is the standard error of the contrast estimate. Of course, both the standard error and the df are functions of the sample sizes. For the standard error of a contrast with contrast weights through , where a is the number of treatment conditions,  we use the following general expression. where n is the per treatment sample size (i.e. the number of participants per treatment condition times the number of items per treatment condition) and the within treatment variance (we assume homogeneity of variance). For a simple example take an independent samples design with n = 20 participants responding to 1 item in one of two possible treatment conditions (this is basically the set up for the independent t-test). Suppose we have contrast weights and , and , the standard error for this contrast equals .  (Note that this is simply the standard error of the difference between two means as used in the independent samples t-test). In this simple example, df is the total sample size (N = n*a) minus the number of treatment conditions (a), thus . The expected MOE for this design is therefore, . Note that using these figures entails that 95% of the contrast estimates will take values between the true contrast value plus and minus the expected MOE: . For the three groups case, and contrast weights {}, the same sample sizes and within treatment variance gives . (If you like, I’ve written a little document with derivation of the variance of selected contrast estimates in the fully crossed design for the comparison of two and three group means. That document can be found here: https://drive.google.com/open?id=0B4k88F8PMfAhaEw2blBveE96VlU) The focus of planning for precision is to try to find sample sizes that minimize expected MOE to a certain target MOE.  The app uses an optimization function that minimizes the squared difference between expected MOE and target MOE to find the optimal (minimal) sample sizes required. ### Planning with assurance If the expected MOE is equal to target MOE,  the sample estimate of MOE will be larger than your target MOE in 50% of replication experiments. This is why we plan with assurance (terminology from Cumming, 2012).  For instance, we may want to have a 95% probability (95% assurance) that the estimated MOE will not exceed our target MOE. In order to plan with assurance, we need (an approximation of) the sampling distribution of MOE. In the ANOVA approach that underlies the app, this boils down to the distribution of estimates of thus In terms of the two-groups independent samples design above: the expected MOE equals 2.8629. But, with df = 38, there is an 80% probability (assurance) that the estimated MOE will be no larger than: Note that the 45.07628 is the quantile in the chi-squared (df = 38) distribution. That is . The app let’s  you specify a target MOE and a value for the desired assurance () and will find the combination of number of participants and items that will give an estimated MOE no larger than target MOE in % of the replication experiments. ## The mixed model ANOVA approach Basically, what we need to plan for precision is to able to specify and the degrees of freedom. We will specify as a function of variance components and use the Satterthwaite procedure to approximate the degrees of freedom by means of a linear combination of expected mean squares. I will illustrate the approach with a three-treatment conditions counterbalanced design. ### A description of the design Suppose we are interested in estimating the differences between three group means. We formulate two contrasts: one contrast estimates the mean difference between the first group and the average of the means of the second and third groups. The weights of the contrasts are respectively {1, -1/2, -1/2}, and {0, 1, -1}. We are planning to use a counterbalanced design with a number of participants equal to p and a sample of items of size q. In the design we randomly assign participants to a groups, where a is the number of conditions, and randomly assign items to a lists (see Westfall et al., 2014 for more details about this design). All the groups are exposed to all lists of stimuli, but the groups are exposed to different lists in each condition. The number of group by list combinations equals , and the number of observations in each group by list combination equals . The condition means are estimated by combining a group by list combinations each of which composed of different participants and stimuli. The total number of observations per condition is therefore, . ### The ANOVA model The ANOVA model for this design is where the effect is a constant treatment effect (it’s a fixed effect), and the other effect are random effects with zero mean and variances (participants), (items), (person by treatment interaction), (item by treatment interaction) and (error variance confounded with the person by item interaction). Note: in Table 1 below, is (for technical reasons not important for this blogpost) presented as this confounding . We make use of the following restrictions (Sahai & Ageel, 2000): , and . The latter two restrictions make the interaction-effects correlated across conditions (i,e. the effects of person and treatment are correlated across condition for the same person, likewise the interaction effects of item and treatment are correlated across conditons for the same item. Interaction effects of different participants and items are uncorrelated). The covariances between the random effects are assumed to be zero. Under this model (and restrictions) , and . Furthermore, the covariance of the interactions between treatment and participant or between treatment and item for the same participant or item are for participants and for items. ### Within treatment variance In order to obtain an expectation for MOE, we take the expected mean squares to get an expression or the expected within treatment variance . These expected means squares are presented in Table 1. The expected within treatment variance can be found in the Treatment row in Table 1. It is comprised of all the components to the right of the component associated with the treatment effect (). Thus, . Note that the latter equals the sum of the expected mean squares of the Treatment by Participant () and the Treatment by Item () interactions, minus the expected mean square associated with Error (). ### Degrees of freedom The second ingredient we need in order to obtain expected MOE are the degrees of freedom that are used to estimate the within treatment variance. In the ANOVA approach the within treatment variance is estimated by a linear combination of mean squares (as described in the last sentence of the previous section. This linear combination is also used to obtain approximate degrees of freedom using the Satterthwaite procedure: 1. ### Expected MOE (Note: I can’t seem to get mathjax to generate align environments or equation arrays, so the following is ugly; Note to self: next time use R-studio or Lyx to generate R-html or an equivalent format). The expected value of MOE for the contrasts in the counter balanced design is: ### Finally an example Suppose we the scores in three conditions are normally distributed with (total) variances . Suppose furthermore, that 10% of the variance can be attributed to treatment by participant interaction, 10% of the variance to the treatment by item interaction and 40% of the variance to the error confounded with the participant by item interaction. (which leaves 40% of the total variance attributable to participant and item variance. Thus, we have , , and . Our target MOE is .25, and we plan to use the counterbalanced design with p = 30 participants, and q = 15 items (stimuli). Due to the model restrictions presented above we have , , and . The value of is therefore, , and the approximate df equal . For the first contrast, with weights {1, -1/2. -1/2}, then, the Expected value for the Margin of Error is . For the second contrast, with weights {0, 1, -1}, the Expected value of the Margin of Error is Thus, using p = 30 participants, and q = 15 items (stimuli) will not lead to an expected MOE larger than the target MOE of .25. We can use the app to find the required number of participants and items for a given target MOE. If the number of groups is larger than two, the app uses the contrast estimate with the largest expected MOE to calculate the sample sizes (in the default setting the one comparing only two group means). The reasoning is that if the least precise estimate (in terms of MOE) meets our target precision, the other ones meet our target precision as well. ## Using the app I’ve included lot’ of comments in the app itself, but please ignore references to a manual (does not exist, yet, except in Dutch) or an article (no idea whether or not I’ll be able to finish the write-up anytime soon). I hope the app is pretty straightforward. Just take a look at  https://gmulder.shinyapps.io/precision/, but the basic idea is: – Choose one of five designs – Supply the number of treatment conditions – Specify contrast(weights) (or use the default ones) – Supply target MOE and assurance – Supply values of variance components (read (e,g,) Westfall, et al, 2014, for more details). – Supply a number of participants and items – Choose run precision analysis with current values or – Choose get sample sizes. (The app gives two solutions: one minimizes the number of participants and the other minimizes the number of stimuli/items). NOTE: the number of stimuli is always greater than or equal to 10 and the number of participants is always greater than or equal to 20. ### An illustration Take the example above. Out target MOE equals .25, and we want insurance of .80 to get an estimated MOE of no larger than .25. We use a counter-balanced design with three conditions, and want to estimate two contrasts: one comparing the first mean with the average of means two and three, and the other contrast compares the second mean with the third mean. We can use the default contrasts. For the variance components, we use the default values provided by Westfall et al. (2014) for the variance components. These are also the default values in the app (so we don’t need to change anything now). Let’s see what happens when we propose to use p = 30 participants and q = 15 items/stimuli. Here is part of a screenshot from the app: These results show that the expected MOE for the first contrast (comparing the first mean with the average of the other means) equals 0.3290, and assurance MOE for the same contrasts equals 0.3576. Remember that we specified the assurance as .80. So, this means that 80% of the replication experiments give estimated MOE as large as or smaller than 0.3576. But we want that to be at most 0.2500.  Thus, 30 participants and 15 items do suffice for our purposes. Let’s use to app to get sample sizes. The results are as follows. The app promises that using 25 stimuli combined with 290 participants or 25 participants and 290 items will do the trick (the symmetry of these results are due to the fact that the interaction components are equal; both the treatment by participant and the treatment by stimulus interaction component equal .10).  Since we have 3 treatment conditions using 290 participants or stimuli is a little awkward, so I suggest to use 291 (equals 97 participants per group or 97 items per list). (300 is a much nicer figure of course). Likewise, as it is hard to equally divide 25 stimuli or participants over three lists or groups, use a multiple of three (say: 27). If we input the suggest sample sizes in the app, we see the following results if we choose the run precision analysis  with current values. As you can see: Assurance MOE is close to 0.25 (.24) for the second contrast (the least precise one), so 80% of replication experiments will get estimated MOE of 0.25 (.24) or smaller. The expected precision is 0.22. The first contrast (which can be estimated with more precision) has assurance MOE of 0.21 and expected MOE of approximately 0.19.  Thus, the sample sizes lead to the results we want. ### References Cumming, G. (2012). Understanding the New Statistics. New York/London: Routledge. Sahai, H., & Ageel, M. I. (2000). The analysis of variance. Fixed, Random, and Mixed Models. Boston/Basel/Berlin: Birkhäuser. Westfall, J., Kenny, D. A., & Judd, C. M. (2014). Statistical power and optimal design in experiments in which samples of participants respond to samples of stimuli. Journal of Experimental Psychology: General, 143(5), 2020-2045.
16,805
71,654
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.1875
3
CC-MAIN-2024-22
longest
en
0.930188
https://fred.stlouisfed.org/series/PLOINVKRA624NUPN?rid=258
1,495,732,236,000,000,000
text/html
crawl-data/CC-MAIN-2017-22/segments/1495463608107.28/warc/CC-MAIN-20170525155936-20170525175936-00227.warc.gz
779,345,975
23,046
Price Level of Investment for Republic of Korea (PLOINVKRA624NUPN)  Excel (data)  CSV (data)  Image (graph)  PowerPoint (graph)  PDF (graph) Observation: 2010: 60.31479 Updated: Sep 17, 2012 Units: PPP of Investment over Exchange Rate, Frequency: Annual 1Y | 5Y | 10Y | Max EDIT LINE 1 (a) Price Level of Investment for Republic of Korea, PPP of Investment over Exchange Rate, Not Seasonally Adjusted (PLOINVKRA624NUPN) Price Level of GDP is the PPP over GDP divided by the exchange rate times 100. The PPP of GDP or any component is the national currency value divided by the real value in international dollars. The PPP and the exchange rate are both expressed as national currency units per US dollar.The value of price level of GDP for the United States is made equal to 100. Price Levels of the components Consumption, Investment, and Government are derived in the same way as the price level of GDP. While the U.S. = 100 over GDP, this is not true for the component shares. The purchasing power parity in domestic currency per \$US for GDP or any component, may be obtained by dividing the price level by 100 and multiplying by the Exchange Rate. Source Indicator: pi Price Level of Investment for Republic of Korea Select a date that will equal 100 for your custom index: to Customize data: Write a custom formula to transform one or more series or combine two or more series. You can begin by adding a series to combine with your existing series. Now create a custom formula to combine or transform the series. Need help? [] Finally, you can change the units of your new series. Select a date that will equal 100 for your custom index: FORMAT GRAPH Log scale: NOTES Source: University of Pennsylvania Release: Penn World Table 7.1 Units:  PPP of Investment over Exchange Rate, Not Seasonally Adjusted Frequency:  Annual Notes: Price Level of GDP is the PPP over GDP divided by the exchange rate times 100. The PPP of GDP or any component is the national currency value divided by the real value in international dollars. The PPP and the exchange rate are both expressed as national currency units per US dollar.The value of price level of GDP for the United States is made equal to 100. Price Levels of the components Consumption, Investment, and Government are derived in the same way as the price level of GDP. While the U.S. = 100 over GDP, this is not true for the component shares. The purchasing power parity in domestic currency per \$US for GDP or any component, may be obtained by dividing the price level by 100 and multiplying by the Exchange Rate. Source Indicator: pi Suggested Citation: University of Pennsylvania, Price Level of Investment for Republic of Korea [PLOINVKRA624NUPN], retrieved from FRED, Federal Reserve Bank of St. Louis; https://fred.stlouisfed.org/series/PLOINVKRA624NUPN, May 25, 2017. RELATED CONTENT RELEASE TABLES Retrieving data. Updating graph.
682
2,920
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.21875
3
CC-MAIN-2017-22
longest
en
0.856867
https://mathematica.stackexchange.com/questions/229409/how-to-select-2-pairs-of-distinct-twins-form-a-list-of-4-tuples
1,660,754,652,000,000,000
text/html
crawl-data/CC-MAIN-2022-33/segments/1659882573029.81/warc/CC-MAIN-20220817153027-20220817183027-00506.warc.gz
373,096,483
68,386
# How to select 2 pairs of distinct twins form a list of 4-tuples? Given a list of 4-tuples as follows. data = Tuples[Range@6, 4]; I want to select any 4-tuples with two pairs of different twins, for example, as follows: • {1,2,1,2} • {2,2,3,3} • {4,5,5,4} • etc # Attempt First I select any association with length of 2 as follows filter1 = Select[Counts /@ data, Length@# == 2 &] Second I want to select only any association with value of 2. How to do this? • data[[Flatten@Position[Sort /@ data, {x_, x_, y_, y_} /; x != y]]] does work as well but too complicated. Sep 2, 2020 at 20:02 • As I love combinatorial approaches so the accepted answer must reflect it. Sep 3, 2020 at 11:03 ## 6 Answers We can use Permutations once on {1, 1, 2, 2} to get a list of part indices and extract the associated Parts of each 2-subset of the base set: positions = Permutations[{1, 1, 2, 2}]; pairs = Subsets[Range @ 6, {2}]; We can use pairs and positions with Outer or Distribute or Tuples: res1 = Join @@ Outer[Part, pairs, positions, 1]; res2 == Part @@@ Distribute[{pairs, positions}, List]; res3 = Distribute[{pairs, positions}, List, List, List, Part]; res4 = Part @@@ Tuples[{pairs, positions}]; res5 = Tuples[p[pairs, positions]] /. p -> Part; res1 == res2 == res3 == res4 == res5 True You can also use Extract instead of Part: res6 = Join @@ (Extract[#, List /@ positions] & /@ pairs); res6 == res1 True res1 Another way Select[data, Values[Counts[#]] === {2, 2} &] Try this: Pick[data, Values[Counts[#]] === {2, 2} & /@ data] A pattern based approach: Cases[ data, {x_, x_, y_, y_} | {x_, y_, x_, y_} | {x_, y_, y_, x_} /; x != y ] or Select[ data, MatchQ[Sort[#], {x_, x_, y_, y_} /; x != y] & ] • @WissenMachtFrei Fixed that now. Sep 2, 2020 at 19:38 • Thank you very much. As always, I am waiting for other answers (if any) before deciding the accepted answer. Sep 2, 2020 at 19:43 Another approach is to construct the desired tuples directly without selection from a larger set: Subsets[Range@6, {2}] // Map[Join[#, #] &] // Map[Permutations] // Flatten[#, 1] & Since the result of Permutations[{i, i, j, j}] is {{i, i, j, j}, {i, j, i, j}, {i, j, j, i}, {j, i, i, j}, {j, i, j, i}, {j, j, i, i}} We can use the method as below Permutations[{#1, #1, #2, #2}] & @@@ Subsets[Range[6], {2}] // Flatten[#, 1] & Or Permutations[{i, i, j, j}] /. Thread[{i, j} -> #] & /@ Subsets[Range[6], {2}] // Flatten[#, 1] & Or Outer[#1 /. Thread[{i, j} -> #2] &, Permutations[{i, i, j, j}], Subsets[Range[6], {2}], 1] // Flatten[#, 1] & data//Extract[#,Position[Tally/@#, {{_,2},{_,2}}]]& {{1, 1, 2, 2}, {1, 1, 3, 3}, {1, 1, 4, 4}, {1, 1, 5, 5}, {1, 1, 6, 6}, {1, 2, 1, 2}, {1, 2, 2, 1}, {1, 3, 1, 3}, {1, 3, 3, 1}, {1, 4, 1, 4}, {1, 4, 4, 1}, {1, 5, 1, 5}, {1, 5, 5, 1}, {1, 6, 1, 6}, {1, 6, 6, 1}, {2, 1, 1, 2}, {2, 1, 2, 1}, {2, 2, 1, 1}, {2, 2, 3, 3}, {2, 2, 4, 4}, {2, 2, 5, 5}, {2, 2, 6, 6}, {2, 3, 2, 3}, {2, 3, 3, 2}, {2, 4, 2, 4}, {2, 4, 4, 2}, {2, 5, 2, 5}, {2, 5, 5, 2}, {2, 6, 2, 6}, {2, 6, 6, 2}, {3, 1, 1, 3}, {3, 1, 3, 1}, {3, 2, 2, 3}, {3, 2, 3, 2}, {3, 3, 1, 1}, {3, 3, 2, 2}, {3, 3, 4, 4}, {3, 3, 5, 5}, {3, 3, 6, 6}, {3, 4, 3, 4}, {3, 4, 4, 3}, {3, 5, 3, 5}, {3, 5, 5, 3}, {3, 6, 3, 6}, {3, 6, 6, 3}, {4, 1, 1, 4}, {4, 1, 4, 1}, {4, 2, 2, 4}, {4, 2, 4, 2}, {4, 3, 3, 4}, {4, 3, 4, 3}, {4, 4, 1, 1}, {4, 4, 2, 2}, {4, 4, 3, 3}, {4, 4, 5, 5}, {4, 4, 6, 6}, {4, 5, 4, 5}, {4, 5, 5, 4}, {4, 6, 4, 6}, {4, 6, 6, 4}, {5, 1, 1, 5}, {5, 1, 5, 1}, {5, 2, 2, 5}, {5, 2, 5, 2}, {5, 3, 3, 5}, {5, 3, 5, 3}, {5, 4, 4, 5}, {5, 4, 5, 4}, {5, 5, 1, 1}, {5, 5, 2, 2}, {5, 5, 3, 3}, {5, 5, 4, 4}, {5, 5, 6, 6}, {5, 6, 5, 6}, {5, 6, 6, 5}, {6, 1, 1, 6}, {6, 1, 6, 1}, {6, 2, 2, 6}, {6, 2, 6, 2}, {6, 3, 3, 6}, {6, 3, 6, 3}, {6, 4, 4, 6}, {6, 4, 6, 4}, {6, 5, 5, 6}, {6, 5, 6, 5}, {6, 6, 1, 1}, {6, 6, 2, 2}, {6, 6, 3, 3}, {6, 6, 4, 4}, {6, 6, 5, 5}}
2,008
3,900
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.96875
4
CC-MAIN-2022-33
longest
en
0.790874
http://mymathforum.com/probability-statistics/19531-confusing-probability-questions-2.html
1,571,871,192,000,000,000
text/html
crawl-data/CC-MAIN-2019-43/segments/1570987836368.96/warc/CC-MAIN-20191023225038-20191024012538-00546.warc.gz
134,168,001
10,544
My Math Forum confusing probability questions User Name Remember Me? Password Probability and Statistics Basic Probability and Statistics Math Forum May 28th, 2011, 09:47 PM #11 Member   Joined: May 2011 Posts: 51 Thanks: 0 Re: confusing probability questions I give up. May 29th, 2011, 10:42 PM #12 Senior Member   Joined: Dec 2010 Posts: 233 Thanks: 0 Re: confusing probability questions Anyone can help to solve question 2 part (c) and (d)? May 31st, 2011, 06:01 AM   #13 Senior Member Joined: Sep 2010 Posts: 101 Thanks: 0 Re: confusing probability questions Quote: Originally Posted by hoyy1kolko Anyone can help to solve question 2 part (c) and (d)? Quote: Originally Posted by rudrax I don't know what is asked in 'd', first I assumed there was some typo... For 'c' - Take three girls...let's make the problem little more interesting, so we will take three hot girls A, B and C Make groups of 2 girls, how many options 3C2=3=AB,BC,CA Here we say AB=BA means order doesn't mater, but A as Opposer is not equals to A as Proposer So in AB itself A has 2 options and B has 1(the remaining option)=2! options AB(where A is Proposer, B is Opposer), BA(where B is Proposer, A is Opposer) Then the correct count should be 3C2*2! or nCr*r! or 3P2 June 4th, 2011, 04:22 PM #14 Senior Member   Joined: Apr 2007 Posts: 2,140 Thanks: 0 1. There are total of 6 balls = 5 black balls + 1 white ball. Alan (1): Probability of winning = 1/6 and probability of not winning = 5/6 If Alan didn't win by the probability of 5/6, then 5 total = 4 black balls + 1 white ball. Bill (2): Probability of winning = 1/5 and probability of not winning = 4/5 We repeat this process until there are two black balls and one white ball left. Assuming that Bill does not win by probability of 2/3, now it's Alan's first turn for one black ball and one white ball remaining. If both Alan and Bill take turns and both of them not winning by probability of 1/2 until the last turn of Bill, therefore winning by probability of 1/2. , , , , , , , , , , , , , , # 7/20 of the pupils in a class are boys. The rest are girls. Find the percentage of girls in the class. Click on a term to search for related topics. Thread Tools Display Modes Linear Mode Similar Threads Thread Thread Starter Forum Replies Last Post silvercats New Users 7 April 26th, 2012 06:16 PM hoyy1kolko Probability and Statistics 5 April 15th, 2011 10:22 PM hoyy1kolko Probability and Statistics 1 April 9th, 2011 08:54 AM daivinhtran Probability and Statistics 1 March 29th, 2011 04:53 PM silvercats Number Theory 1 December 31st, 1969 04:00 PM Contact - Home - Forums - Cryptocurrency Forum - Top
761
2,670
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
4.1875
4
CC-MAIN-2019-43
latest
en
0.949497
https://de.mathworks.com/matlabcentral/cody/problems/195-program-an-exclusive-or-operation-with-logical-operators/solutions/1174398
1,585,519,929,000,000,000
text/html
crawl-data/CC-MAIN-2020-16/segments/1585370496227.25/warc/CC-MAIN-20200329201741-20200329231741-00290.warc.gz
426,825,270
15,277
Cody # Problem 195. Program an exclusive OR operation with logical operators Solution 1174398 Submitted on 28 Apr 2017 by Robert This solution is locked. To view this solution, you need to provide a solution of the same size or smaller. ### Test Suite Test Status Code Input and Output 1   Pass a=0;b=0; res=0; assert(isequal(myXOR(a,b),res)) 2   Pass a=1;b=1; res=0; assert(isequal(myXOR(a,b),res)) 3   Pass a=1;b=0; res=1; assert(isequal(myXOR(a,b),res)) 4   Pass a=0;b=1; res=1; assert(isequal(myXOR(a,b),res)) 5   Pass a=[0 0 1 1];b=[1 0 1 0]; res=[1 0 0 1]; assert(isequal(myXOR(a,b),res))
223
603
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.96875
3
CC-MAIN-2020-16
latest
en
0.50516
https://www.studypool.com/discuss/1251707/What-is-the-measure-of-angle-x-?free
1,480,971,396,000,000,000
text/html
crawl-data/CC-MAIN-2016-50/segments/1480698541839.36/warc/CC-MAIN-20161202170901-00102-ip-10-31-129-80.ec2.internal.warc.gz
1,021,325,688
14,166
##### What is the measure of angle x? Mathematics Tutor: None Selected Time limit: 1 Day Nov 4th, 2015 Since the given triangle has all the three sides equal the triangle is eqilateral and in a eqilateral triangle the angle at each vertex is  degrees. To prove it mathematically by law of cosines a^2=b^2 + c^2 -2 bc cos(A) where a,b,c are length of sides of triangle and A is the angle at vertex A. by applying the condition that a=b=c we get cos(A)=1/2 hence angleA is 60degress Nov 4th, 2015 ... Nov 4th, 2015 ... Nov 4th, 2015 Dec 5th, 2016 check_circle
174
567
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.90625
4
CC-MAIN-2016-50
longest
en
0.908823
https://www.toppr.com/guides/physics/semiconductor-electronics-materials-device-and-simple-circuits/junction-transistor-circuit-configurations-and-characteristics/
1,713,919,504,000,000,000
text/html
crawl-data/CC-MAIN-2024-18/segments/1712296818835.29/warc/CC-MAIN-20240423223805-20240424013805-00046.warc.gz
928,190,657
40,075
# Junction Transistor – Circuit Configurations and Characteristics In a junction transistor, the Emitter (E), Base (B) and Collector (C) are the only three terminals available. Hence, in any circuit, one of these terminals has to be common to both input and output connections. Therefore, the junction transistor can be connected in either of these configurations: • CE or Common Emitter • CB or Common Base • CC or Common Collector Among these, the junction transistor is most widely used in the Common Emitter configuration. Also, the n-p-n Silicon transistors are used more commonly than the p-n-p transistors. Hence, we will look at the characteristics and configurations of an n-p-n Silicon Junction Transistor in a CE configuration. ## Characteristics of a CE Junction Transistor In Common Emitter (CE) configuration, the emitter is the common terminal. Hence, the input is between the base and the emitter while the output is between the collector and the emitter. Two terms that you must remember: • Input characteristic – the variation of the base current (IB) with the base-emitter voltage (VBE) • Output characteristic – the variation of the collector current (IC) with the collector-emitter voltage (VCE) It is observed that the output characteristics are controlled by the input characteristics. Hence, the collector current changes with the base current. Let’s study them with the help of a circuit diagram shown below: ## Studying the Input Characteristics Also, a curve is plotted between the base current (IB) and the base-emitter voltage (VBE) to study the input characteristics of the junction transistor in CE configuration. The collector-emitter voltage (VCE) is kept at a fixed value to study the relation between IB and VBE. Since we intend to study the input characteristics when the transistor is in an active state, VCE is maintained at a large value. The value chosen is large enough to ensure reverse biasing of the base-collector junction. For a Silicon transistor, VCE = 0.6-0.7 V. Also, VCE = VCB + VBE Hence, VCE has to be maintained at a value much larger than 0.7 V. The approximate range of voltage is between 3 and 20 V. An increase in the value of VCE appears as an increase in the value of VCB. Hence, we get almost identical curves for various values of VCE. Also, determining one input characteristic is sufficient to understand curve as shown below: ## Studying the Output Characteristics To study the output characteristics, let’s plot a curve is between the Collector current (IC) and the collector-emitter voltage (VCE). Also, keep the base current (IB) at a steady value. Now, if the base-emitter current (VBE) is increased by a small amount, you can observe an increase in hole current from the emitter and electron current from the base regions. Hence, IB and IC increase proportionally. Or, if IB increases, IC increases too. So, keeping IB constant and plotting IC against VCE, you can make the following observations: For every value of IB, the plot of IC versus VCE displays one output characteristic. ## Calculation of Important AC Parameters of Junction Transistors Now, let’s calculate some important ac parameters of transistors using the linear segments of the input and output characteristics. ### Input Resistance (ri) Input resistance (ri) is the ratio of change in the base-emitter voltage (ΔVBE) to the subsequent change in base current (ΔIB) when the collector-emitter voltage (VCE) is kept constant. ri = (ΔVBE/ ΔIB)VCE This is ac resistance or dynamic resistance and which is obvious in the input characteristics since its value varies with the operating current in the transistor. ### Output Resistance (ro) Output resistance (ro) is the ratio of change in the collector-emitter voltage (ΔVCE) to the change in the collector current (ΔIC) when the base current (IB) is kept constant. ro = (ΔVCE/ ΔIC)IB A close look at the output characteristics reveals that initially, IC increases linearly for every small change in the value of VCE. The reason is simple – the base-collector junction is not reverse biased and the transistor is not active. On the contrary, the transistor is in a saturation state. Hence, the current is controlled by the supply voltage (VCC) in this part of the characteristic. In this state VCC = VCE. As VCE increases and reaches a value which is higher than that required to reverse bias the junction, IC increases marginally with increasing VCE. This reciprocal of the slope of the linear part of the output characteristics offers the value of ro. The bias of the base-collector junction primarily controls the output resistance of the transistor. Also, the output resistance (ro) is very high. This is because of the reverse-bias state of the diode. Hence, you observe that initially, the resistance is very low when the transistor is in a saturation state. ### Current Amplification Factor (β) Current Amplification Factor (β) is the ratio of change in the collector current (IC) to the change in base current (IB) when the collector-emitter voltage (VCE) is kept constant. Also, the transistor is in an active state. Now, the small signal current gain is βac = (ΔIC/ ΔIB)VCE This has a large value. On the other hand, if we take a simple ratio of IC and IB, we get βdc of the junction transistor. βdc = IC/IB It is important to note that IC and IB increase almost linearly. In simple words, if IB=0, then IC=0. Hence, the values of βac and βdc are nearly equal. ## Solved Examples for You Question: What are the input and output characteristics of a CE Junction Transistor? Solution: In a Common Emitter configuration of a Junction Transistor, the emitter is the common terminal. Input is between the base and emitter. The output is between the collector and emitter. Input characteristics are the variation of base current (IB) with the base-emitter voltage (VBE). Output characteristics are the variation of collector current (IC) with the collector-emitter voltage (VCE). Share with friends ## Customize your course in 30 seconds ##### Which class are you in? 5th 6th 7th 8th 9th 10th 11th 12th Get ready for all-new Live Classes! Now learn Live with India's best teachers. Join courses with the best schedule and enjoy fun and interactive classes. Ashhar Firdausi IIT Roorkee Biology Dr. Nazma Shaik VTU Chemistry Gaurav Tiwari APJAKTU Physics Get Started
1,441
6,391
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.140625
3
CC-MAIN-2024-18
latest
en
0.888848
https://en.wikipedia.org/wiki/Talk:Relativistic_electromagnetism
1,516,435,540,000,000,000
text/html
crawl-data/CC-MAIN-2018-05/segments/1516084889473.61/warc/CC-MAIN-20180120063253-20180120083253-00539.warc.gz
685,517,572
43,172
# Talk:Relativistic electromagnetism WikiProject Physics / Relativity  (Rated Start-class, Mid-importance) This article is within the scope of WikiProject Physics, a collaborative effort to improve the coverage of Physics on Wikipedia. If you would like to participate, please visit the project page, where you can join the discussion and see a list of open tasks. Start  This article has been rated as Start-Class on the project's quality scale. Mid  This article has been rated as Mid-importance on the project's importance scale. ## Regrets (I have a few!) I really wish Id not started this page now as it is too complex a subject to illustrate without diagrams that I cant draw! Suggestions on how to get out of this mess will be most welcome!--Light current 17:51, 30 April 2006 (UTC) Diagrams will be inserted to illustrate the text ASAP--Light current 16:03, 21 October 2005 (UTC) Having trouble getting the equations right. Please bear with me (or help!)--Light current 17:26, 21 October 2005 (UTC) Lots more work needed on article especially with diagrams & equations. Please bear with me.--Light current 01:46, 22 October 2005 (UTC) Started work on diagrams. Any help from persons with drawing packages gratefully recieved.--Light current 21:45, 22 October 2005 (UTC) ## Request for help I really need someone to help on improving my very basic (and not ver satisfactory) text based diagrams in this article. Any offers? You will be rewarded with thanks!!--Light current 00:30, 23 October 2005 (UTC) ## Cut from article to be worked on Using angle brackets to denote an average over all points on the shell, �sin2 θ� = �1 − cos2 θ� = 1− �x2� R2 . Now since the origin is at the center of the sphere, the average value of x2 is the same as the average value of y2 or z2: �x2� = �y2� = �z2�. (4.9) But this implies that �x2� = 1 3�x2 + y2 + z2� = 1 3�R2� = R2 3 since x2 +y2 +z2 = R2 and R is constant over the whole shell. Combining equations (4.8) and (4.10) gives �sin2 θ� = 1− R2 3R2 = 2 3. (4.11) 4.3 So the average energy per unit volume stored in the transverse electric field is q2a2 48π2�0c4R2. ## The physical basis of electromagnetic radiation \ wall \ | \ | \ | \ | \ | \|B /|\ / | \ / | \ A/ | \C Consider a positively charged particle, initially traveling to the right at 1/4 the speed of light. It bounces off a wall at point B. The particle is now at point A, but if there had been no bounce it would now be at C. An imaginary circle (actually a cross section of a sphere) encloses the region of space where news of the bounce has already arrived; inside this circle the electric field points directly away from A. Outside the circle the news has not yet arrived, so the field points directly away from C. As time passes, the circle expands outward at the speed of light, and points A and C move away from B at 1/4 the speed of light. The field at your location points away from where the particle would be now if there had been no bounce. We know from special relativity that no information can travel faster than the speed of light Assume that the information travels at precisely the speed of light, but no faster. This assumption, together with Gauss’s law, is enough to determine the electric field everywhere around the accelerated charge. The complete map of the electric field of an accelerated charge turns out to be quite complicated. An abstract representation in terms of field lines will be used instead. Field lines are continuous lines through space that run parallel to the direction of the electric field. A drawing of the field lines in a region therefore indicates immediately the direction of the electric field. A map of the field lines for the situation of figure is shown in figure . No field lines through the gray spherical shell in figure are shown, since this is the region that is just in the midst of receiving the news of the particle’s acceleration. To determine the direction of the field here, imagine a curved Gaussianpillbox”, indicated by the dashed line in the figure, which straddles the gray shell. This surface is symmetrical about the line along which the particle is moving; but viewed from along this line, it would be circular. The Gaussian surface encloses no electric charge, so Gauss’s law tells us that the total flux of �E through it must be zero. The direction of the field within the gray spherical shell can be found be considering the flux through the curved Gaussian “pillbox” indicated by the dashed line. Now consider the flux through various parts of the surface. On the outside (right-hand) portion there is a positive flux, while on the inside (left-hand) portion there is a negative flux. But these two contributions to the flux do not cancel each other, since the field is significantly stronger on the outside than on the inside. This is because the field on the outside is that of a point charge located at C, while the field on the inside is that of a point charge located at A, and C is significantly closer than A. The net flux through the inside and outside portions of the surface is therefore positive. To cancel this positive flux, the remaining edges of the pillbox must contribute a negative flux. Thus the electric field within the gray shell must have a nonzero component along the shell, in toward the center of the Gaussian surface. This component is the transverse field, since it points transverse (i.e., perpendicular) to the purely radial direction of the field on either side. To be more precise about the direction of the field within the gray shell, consider the modified Gaussian surface shown in figure. Since the flux along segment cd must be zero, the electric field within the gray shell must be parallel to this segment. until it subtends the same angle, as viewed from C, that the inner surface ab subtends as viewed from A. Now the fluxes through ab and ef do indeed cancel. Segments bc and de are chosen to be precisely parallel to the field lines in their locations, so there is no flux through these portions of the surface. In order for the total flux to be zero, therefore, the flux must be zero through segment cd as well. This implies that the electric field within the gray shell must be parallel to cd. If you start at A and follow any field line outward, you will turn a sharp corner at the gray shell’s inner edge, then make your way along the shell and slowly outward, turning another sharp corner at the outer edge. The thickness of the gray shell is determined by the duration of the acceleration of the charge. A complete drawing of the field lines for this particular situation is shown in figure. The transverse portion of the electric field of an accelerated charge is also called the radiation field, because as time passes it “radiates” outward in a sphere expanding at the speed of light. If the acceleration of the charged particle is sufficiently great, the radiation field can be quite strong, affecting faraway charges much more than the ordinary radial field of a charge moving at constant velocity. The radiation field can also store a relatively large amount of energy, which is carried away from the charge that created it. ### Strength of the radiation field. A complete sketch of the electric field lines for the situation shown in the preceding figures, including the transverse radiation field created by the acceleration of the charge. Consider a somewhat simpler situation, in which a positively charged particle, initially moving to the right, suddenly stops and then remains at rest. Let v0 be the initial speed of the particle, and let the deceleration begin at time t = 0 and end at time t = t0. Assume that the acceleration is constant during this time interval; the magnitude of the acceleration is then a = |�a| = v0 t0. Assume that v0 is much less than the speed of light, so that the relativistic compression and stretching of the electric field discussed above is negligible. Figure shows the situation at some time T, much later than t0. The “pulse” of radiation is contained in a spherical shell of thickness ct0 and radius cT. Outside of this shell, the electric field points away from where the particle would have been if it had kept going; that point is a distance v0T to the right of its actual location. (The distance that R = cT v0T sin θ ct0 θ v0T θ For clarity, only a single field line is shown here. it traveled during the deceleration is negligible on this scale. A single field line is shown in the figure, coming out at an angle θ from the direction of the particle’s motion. There is a sharp kink in this line where it passes through the shell, as discussed in the previous section. The radial component Er of the kinked field can be found by applying Gauss’s law to the pillbox shown. How strong is the electric field within the shell? Break the kinked field up into two components: a radial component Er that points away from the location of the particle, and a transverse component Et that points in the perpendicular direction. The ratio of these components is determined by the direction of the kink; See that Et Er = v0T sin θ ct0 = aT sin θ c We can find the radial component Er by applying Gauss’s law to a tiny pillbox that straddles the inner surface of the shell. Let the sides of the pillbox be infinitesimally short so that the flux through them is negligible. Then since the net flux through the pillbox is zero, the radial component of �E (that is, the component perpendicular to the top and bottom of the pillbox) must be the same on each side of the shell’s inner surface. But inside the sphere of radiation the electric field is given by Coulomb’s law. Thus the radial component of the kinked field is Er = 1 4π�0 q R2 , where q is the charge of the particle. Combining equations and using the fact that R = cT , it can be shown that Et = qa sin θ 4π�0c2R. Looking back at the figure, we see that the size of the kink in the field is a qualitative indication of the field strength. Also, the strength of the transverse field is proportional to a, the magnitude of the particle’s acceleration. The greater the acceleration, the stronger the pulse of radiation. This pulse of radiation carries energy. The energy per unit volume stored in any electric field is proportional to the square of the field strength. This implies Energy per unit volume ∝ a2 R2. Since the volume of the spherical shell (the shell itself, not the region it encloses) is proportional to R2, the total energy it contains does not change as time passes and R increases. Thus when a charged particle accelerates, it loses energy to its surroundings, in an amount proportional to the square of its acceleration. This process is the basic mechanism behind all electromagnetic radiation: visible light and radio waves to gamma rays. ### The Larmor Formula #### Energy radiated by a charged particle In any electric field, the energy store per unit volume is epsilon_0/2 E^2 The total energy in the pulse is: ${\displaystyle Ep={\frac {e^{2}a^{2}t}{6\pi \epsilon _{0}c^{3}}}}$ where t is the duration of the acceleration If we divide both sides by the duration t of the particles acceleration, we obtain the power: P. ${\displaystyle P={\frac {e^{2}a^{2}}{6\pi \epsilon _{0}c^{3}}}}$ where a is the acceleration, e is the electronic charge, epsilon0 is the permittivity of free sapce and c is the speed of electromagnetic radiation This is the Larmor formula If the direction in which the energy goes is not important, we can average equation (4.7) over all directions. Using a mathematical device, introduce a coordinate system with the origin at the center of the sphere and the x axis along the particle’s original direction of motion. Then for any point (x, y, z) on the spherical shell, cos θ = x/R. To obtain the total energy stored in the transverse electric field, we must multiply equation by the volume of the spherical shell. The surface area of the shell is 4πR2 and its thickness is ct0, so its volume is the product of these factors. Therefore the total energy is Total energy in electric field = q2a2t0 12π�0c3 . The total energy is independent of R; that is, the shell carries away a fixed amount of energy that is not diminished as it expands. There is also a magnetic field, which carries away an equal amount of energy. Many details about magnetic fields have been omitted. A factor of 2 would need putting in. Thus the total energy carried away by the pulse of radiation is twice that of the previous equation or When a charged particle accelerates, part of its electric field breaks free and travels away at the speed of light, forming a pulse of electromagnetic radiation. Often, in practice, charged particles oscillate back and forth continuously, sending off one pulse after another in a periodic pattern. An example of the electric field around an oscillating charge is shown in figure. A map of the electric field lines around a positively charged particle oscillating sinusoidally, up and down, between the two gray regions near the center. Points A and B are one wavelength apart. If you follow a straight line out from the charge at the center of the figure, you will find that the field oscillates back and forth in direction. The distance over which the direction of the field repeats is called the wavelength. For instance, points A and B in the figure are exactly one wavelength apart. The time that it takes the pattern to repeat once is called the period of the wave, and is equal to the time that the source charge takes to repeat one cycle of its motion. The period is also equal to the time that the wave takes to travel a distance of one wavelength. Since it moves at the speed of light, we can infer that the wavelength and the period are related by speed = wavelength period or c = λ T , where λ (“lambda”) is the standard symbol for wavelength, T is the standard symbol for period, and c is the speed of light. ### Electromagnetic Waves When a charged particle accelerates, part of its electric field breaks free and travels away at the speed of light, forming a pulse of electromagnetic radiation. Often, in practice, charged particles oscillate back and forth continuously, sending off one pulse after another in a periodic pattern. An example of the electric field around an oscillating charge is shown in figure. A B If you follow a straight line out from the charge at the center of figure you will find that the field oscillates back and forth in direction. The distance over which the direction of the field repeats is called the wavelength. For instance, points A and B in the figure are exactly one wavelength apart. If you sit at a fixed point and watch the electric field as it passes by, you will again find that its direction oscillates. The time that it takes the pattern to repeat once is called the period of the wave, and is equal to the time that the source charge takes to repeat one cycle of its motion. The period is also equal to the time that the wave takes to travel a distance of one wavelength. Since it moves at the speed of light, we can infer that the wavelength and the period are related by speed =wavelength.period or c = λT where λ (“lambda”) is the standard symbol for wavelength, T is the standard symbol for period, and c is the speed of light. The frequency of an oscillation or a wave is the reciprocal of the period. extracted/modified from Daniel V Schroeder paper ## Off the beaten track in Wikipedia As the person who started it, I tend to agree with you. It was a mistake. But maybe some bits of it might be saved for use in other articles.--Light current 22:59, 21 November 2006 (UTC) OTOH, maybe it could be pruned down to the bare min to explain the concept only. Other more knowledgable editors may then wish to expand it properly in the fullness of time!--Light current 23:18, 21 November 2006 (UTC) ## How do we know that Purcell was the first to take this approach to electromagnetism & relativity ? Are there any sources that show conclusively that this had never been done before Purcell ? The reason I ask this is that I attended a brilliant lecture course in 1968 (yes - that's after Purcell) on electromagnetism & relativity in which the lecturer derived all of Maxwell's equations from just two postulates: the invarance of c (velocity of light) and e (charge of an electron) through all the standard transformations of special relativity. I don't recall any reference having been made to Purcell's work, though it's feasible that the lecturer had made use of some of his material. My suspicion is that the ideas go back a lot earlier than 1963. (Incidentally, I'm currently reading Subtle is the Lord by Abraham Pais – it was that which led me to browse Wikipedia for articles related to electromagnetism & relativity and hence find this one.) DFH 20:27, 23 November 2006 (UTC) Thats what Daniel V Schroeder said in his paper on this subject.--Light current 23:41, 23 November 2006 (UTC) In my 1956 edition of the "Encyclopaedia Britannica" (before Purcell) article on "electricty" v8, p159, it is shown how the "magnetic force" between two parallel wires of current is really an electrostatic attraction due to special relativity. However, the math only went as far to show qualitatively that special relativity demands: A/R = (i/c)^2 where A=attraction due to i, R = repulsion between free electrons. Understanding the connection is basic to a good theoretical understanding of special relativity. Feynman in his famous 1964 physics course derives this relationship in v2 13-6. Ywaz (talk) 17:45, 17 July 2010 (UTC) ## Source material Here [1] is the source material for this article. See if YOU can rewrite it--Light current 03:29, 1 January 2007 (UTC) ## Another explanation on "The origin of magnetic forces" I have a more simple explanation on "why" magnetic forces appear. However, I'm not sure whether it's correct or not: Consider two identical charged objects with each with mass m moving at the same velocity v at a distance 2x from each other perpendicular to the velocity. Since their time slows down when they move, they repel each other less (slower) than we expect them to. Therefore, we say there's a magnetic force counteracting the repulsive electric force. Mathematically: The objects' masses, charges and distance are the same in their reference frame as in the observer's. Due to time dilation, however, the acceleration - and therefore also the electric forces - observed in the observer's frame (primed) will be different: ${\displaystyle F'=m\cdot a'=m\cdot {\frac {d^{2}x}{d(t')^{2}}}=m\cdot {\frac {d^{2}x}{dt^{2}}}\cdot (t/t')^{2}=m\cdot a\cdot {\sqrt {1-(v/c)^{2}}}^{2}=F\left(1-(v/c)^{2}\right)}$ The observer concludes that there must be a magnetic force accounting for the difference: ${\displaystyle F_{m}=F'-F=F\left(1-(v/c)^{2}\right)-F=-(v/c)^{2}F}$ Pellishau (talk) 13:04, 18 January 2008 (UTC) ## Electricity from magnetism? This article describes how magnetic fields originate from electrostatics and special relativity. However, it is not apparent to me how the same principles can be used to explain how magnetic fields create electrical currents. This seems like an important subject that needs to be addressed to complete this article. Just because when you have an electric field from magnetic one (relation can be reverse), you then have a voltage difference, or electromotive force. — Preceding unsigned comment added by Klinfran (talkcontribs) 08:00, 1 September 2011 (UTC) 192.104.67.122 (talk) 18:17, 4 August 2009 (UTC)db —Preceding unsigned comment added by 192.104.67.122 (talk) 17:05, 4 August 2009 (UTC) Collapsed conversation—not really on topic on artile talk page anymore I think its best to severely down-grade the term magnetism; what this is all about is that electric charge in motion and relativity explains everything called magnetism. Magnetism is just the name given to a force that wasn't seen as electric force because relativity hadn't been understood. So the question 'how magnetic fields create electrical currents' is sort of, how an electric charge causes a force on another charge (which is just how it is), and is also then exactly the same physics as the other way round, ie 'how electrical currents create magnetic fields' they both always exist in unison. (note.1 changing the viewers velocity changes the interpretation of what is and what isn't a magnetic field, because its a poor non-fundamental conceptualisation of the physics.) (note.2 the term electromagnetism is fundamentally replaced by electro-weak, because the weak nuclear force has the same basis as electric force, magnetism is just a relativistic side-effect of a sub-category of a 'real' force.) also the 'causes' part of the question isn't quite clear, 'cause' can really only refer to the way energy moves though the system, because the physics of the currents and magnetic fields do not contain causality, better to think of them as a single thing, and that energy (driving changes in the system) can be added, (the 'cause'), too and/or taken from, (the 'effect'), many different places in this thing, one or more interpreted as a magnetic effect, or not, depending of the viewers relative speed. so basically, the use of the term Magnetism, only for historical reference and possibly an example of an evolution in understanding, would have great benefits to education.—Preceding unsigned comment added by 86.175.21.115 (talk) 23:24, 30 January 2011 (UTC) Are you arguing that the electricity is a more fundamental force of nature than magnetism? absolutely, i guess you didn't really understand this then. If so, could you explain why, and what reliable sources support this? and you ignore Einstein's quote, at the top of the article, why? All you're saying above is, "starting with electricity and special relativity, you can derive that there has to be magnetism". Yes, OK. It is also true that "starting with magnetism and special relativity, you can derive that there has to be electricity". Actually we know that neither electricity nor magnetism is a fundamental force of nature: They are emergent phenomena. The true law of nature is quantum electrodynamics (QED), and in the classical limit QED gives rise to both electricity and magnetism simultaneously. Again, the derivation is "QED --> Electromagnetism", not "QED --> Electric force, then Electric force --> Magnetic force". well apart from QM having nothing to do with it, this is all classical, i remember this from Feynmans lectures, and didn't he basically invent QED. as a visual aid try:- (Q + relativity + QM)== QED and see magnetism is not needed at all The stuff in this article, Purcell's textbook, etc., is good pedagogy, because people find the electric force intuitive and special relativity sort-of-intuitive-eventually, but somehow the magnetic force is much more unintuitive. So it's helpful to show students that if you "believe in" electricity and "believe in" special relativity than you have to also "believe in" magnetism. But you shouldn't get carried away and think that as a matter of fundamental physics (not physics pedagogy) the electric force is a more basic and fundamental force in the universe than the magnetic force. the 'unintuitiveness' of magnetism IS the 'unintuitiveness' of relativity. try picturing the Magnetic force like the Coriolis force, each a lower-dimensional 'fictitious' force of a more fundamental higher-dimensional reality. By the way, if you're more interested in practical matters, please feel free to try to write out all the laws of electromagnetism (at least including Maxwell's equations and the Lorentz force) without mentioning magnetism or the magnetic field, and see whether this way they become simpler or more complicated. I think you will find it is much more complicated! If you finish that exercise, a more challenging exercise is to try to explain spin–orbit coupling without mentioning magnetism. :-) --Steve (talk) 01:44, 31 January 2011 (UTC) again forget QM, not relevant at all, really do yourself a favor and try harder to get this, its a really good one to understand. Feyman lectures are good on this. this has a lot of parallels with; Energy == Mass, and for pretty much the same reasons, and that, i'd have to say is MUCH harder to get intuitively. and so educationally, 'getting' this concept is immensely powerful. In the future could you please write blocks of text instead of interspersing your comments? It makes it easier for people to read the whole conversation later on. Thank you. i disagree Here is Richard Feynman answering the question "Why do bar magnets attract each other?" His answer is basically, Because electrical forces and magnetic forces are fundamental forces of nature. He does not mention special relativity at all, and he talks on and on about what is the ultimate cause of magnetic forces but never says that the answer is electrical forces. In fact he clearly puts electrical forces and magnetic forces on the same level, just saying they're intimately related (which I certainly agree with). I don't think I've seen Feynman say anywhere that electricity is more fundamental and magnetism is less fundamental, and I read his whole "Lectures" textbook series. What page number of what book does he say this? from memory, a very long time back, middle of book 3, but you have to understand it not just read it. You are making the argument that magnetism is a less fundamental force of nature than electricity, and that magnetism is ultimately caused by electricity. It's shocking that you would think QED, which is our most fundamental understanding of electromagnetism, has nothing to do with whether this is true or false. I'm saying "Most basic and fundamental laws of physics ==> Electromagnetism" and you're saying "Most basic and fundamental laws of physics ==> Electricity ==> Magnetism". We can't decide on this question if you're unwilling to discuss the "Most basic and fundamental laws of physics", namely the standard model of particle physics including QED (or maybe string theory from which you derive the standard model). Could you please explain "(Q + relativity + QM)== QED"? What does "Q" mean? charge. The Einstein quote is emphasizing that a magnetic force can also be viewed as an electric force. I don't see how he's saying that the first view is wrong (or narrow-minded or whatever) and the second view is right. But it's hard to say for sure in a quote from a 3-paragraph commemorative letter. Didn't Einstein write other things too? For example, if Einstein that thought electricity was fundamental and magnetism was unnecessary, then he sure didn't say so in his lecture notes on electromagnetism. His memorable moving magnet and conductor example is notably symmetric in its treatment of electricity and magnetism: You can view it as an electric force or a magnetic force, but he didn't say that one view was more correct than the other. In any case, it doesn't really matter because the fundamental nature of classical electromagnetism (namely, that it is fundamentally nothing more than an approximation to QED) was not understood in Einstein's day. The Coriolis effect is indeed a "fictitious force", and not coincidentally if you use an inertial frame of reference the Coriolis effect disappears from the laws of physics, which become much simpler as a consequence. On the other hand, there is no inertial frame of reference where magnetism disappears from the laws of physics, you need to think 4-dimensionally. and as I said above, if you try to write Maxwell's equations (let alone the spin-orbit coupling equation) Maxwells equations are formulated in terms of magnetism, they now just disappear, or rather ARE an expression of relativity, so this is the ultimate simplification. This is often referred too as a unification. without explicitly mentioning magnetism, it becomes not simpler but massively more complicated and very contrived. That's part of the reason I reject your analogy between the Coriolis force and the magnetic force. --Steve (talk) 20:56, 5 February 2011 (UTC) there must be plenty of resources on the internet that explain this better than i can. http://en.wikibooks.org/wiki/Electrodynamics/Relativistic_Electromagnetism http://easther.physics.yale.edu/Richard_Easther/Relativistic_E_and_M.html http://www.mtholyoke.edu/courses/tdray/phys310/electromag.pdf of course the article itself lists many resources that use this as the basis for teaching, so read any of those. Maybe I should be more specific. I AGREE that electricity and magnetism are intimately linked together by special relativity. I AGREE that an electric force in one frame of reference may be a magnetic force in another frame of reference or vice-versa. These things are basic and uncontroversial. I DISAGREE that electricity is more fundamental and magnetism is less fundamental. I believe that the vast majority of physicists would also disagree. I see electricity and magnetism as being like Siamese twins: Equally important and linked together. You are saying they are "Parent and child", where electricity is the cause and magnetism is the effect. When I say "Siamese twins", I mean something similar to space and time. No one would say "According to special relativity, the reason that time exists is because of space", and no one would say "According to special relativity, the reason that space exists is because of time". Instead they say "According to special relativity, space and time are linked together into spacetime and neither is possible without the other". My objection is to the asymmetry in your point of view. In special relativity, the electric field and magnetic field are put together into a single object, the electromagnetic tensor F. The whole beauty of special relativity is in illuminating how the different components of a covariant tensor like F are all linked together. It goes against everything in special relativity to say that six of the nonzero components of F are the "fundamental cause" and the other six nonzero components of F are the "non-fundamental effect". But that's exactly what you're saying. You're saying that ${\displaystyle F^{01},F^{02},F^{03}}$ (the electric field) is a fundamental and important part of the universe, whereas ${\displaystyle F^{12},F^{23},F^{13}}$ are non-fundamental effects which only exist because of the components ${\displaystyle F^{01},F^{02},F^{03}}$. The insights of special relativity show that it makes no sense to think along those lines: All the tensor components are interconnected into a single whole that should not be split up conceptually any more than it can be split up physically. Why are you posting links like this one? This does not say anywhere that electricity is more fundamental and magnetism is less fundamental. It says that the two are intimately linked together in special relativity, something that, again, I understand very well. Feynman's textbook says the same thing. Again, I am saying electricity and magnetism are "Siamese twins", and you are saying they are "Parent and child". You are welcome to try to find a page number in Feynman that says that electricity is the parent and magnetism is the child. I cannot find any. I find lots of discussion that implies they are Siamese twins. When you say, "(Q + relativity + QM)== QED", I guess you're saying that combining the laws of classical electrostatics with the laws of special relativity and the laws of non-relativistic quantum mechanics, you get the laws of QED. Q(quantum==QM) E(electro==Q) D(dynamics==relativity) Are you really a QED expert? Because I have studied QED, and I believe your statement is completely false. QED contains much much more than just a concatenation of the previously-known laws of physics. QED does not come from the previously-known laws of physics: That's backwards. The previously-known laws of physics come from QED (plus the rest of the standard model). I hope you agree that this is fundamentally the correct way to think about the laws of physics: The most exact laws are always the most fundamental laws. --Steve (talk) 23:56, 5 February 2011 (UTC) maybe i didn't make this clear enough; the question you originally asked, at the top, is basically a product of the misunderstanding of magnetism, until you can allow for that, there will never be an answer to the question, because the question has no meaning. BTW QED is consistent with relativity, does not 'produce' it, if it did it would predict gravity. anyway if you've done such a course, recently?, (because they were really rare things when i did one.) wouldn't you be able to get hold of a real physicist (not one of those self styled quantum-mecahanicists) to go through this with you, because (and you're going to hate this, but its really true.) its more fundamental than QED. i really appreciated the day, and can actually remember the circumstances when i suddenly could 'see' this. How's this: The professor who taught my QED/QFT course has recently posted a long rant on this exact topic: [2]. He was a professor of theoretical physics at Harvard University, I hope that counts as a "real physicist". --Steve (talk) 21:39, 7 February 2011 (UTC) Yes thats a rant, and a completely useless way to answer a question. and he doesn't ever appear to have been a professor. interesting link further down thought, should be included here; http://physics-quest.org/Magnetism_from_ElectroStatics_and_SR.pdf it claims to derive magnetism from charge+relativity, in a way that covers the current being produced by charges with a general velocity distribution, something that i have worried about, i'll check through it sometime. —Preceding unsigned comment added by 86.175.122.183 (talk) 16:28, 8 February 2011 (UTC) Lubos was indeed an assistant professor at Harvard for a number of years, including when he taught my QFT class. I don't know why you're arguing against facts that can be easily checked. OK, here is proof. i really dont want to disparage qualifications but just look up what an "assistant professor" and a "professor" are, particularly look up the AMERICAN meaning of the terms. Can you please clarify a few points for me: • In inertial reference frame A, a particle feels a magnetic force. In inertial reference frame B, a particle feels an electric force. Do you agree or disagree with this statement: "The physical analysis in frame B is fundamentally correct, and the physical analysis in frame A is fundamentally incorrect." There're both right, that's because Magnetism is really only a 'name' mistakenly given the electric force, when viewed from a moving frame and not analysing it relativistically. again compare with 'Coriolis force' its 'real' in an accelerating 2-dimension frame, because that's how its defined, it has no other meaning. • I am holding two bar magnets stationary and near each other on my desk. They are attracting each other. Do you agree or disagree: "In the correct inertial reference frame, the force between the magnets can be entirely explained as electrical attraction." If you agree, what is the velocity of this inertial frame? (Relative to the rest frame of the desk and the magnets.) NO, in a magnet with 'poles' the charges are moving in various directions, (think of a solenoid instead of a bar magnet, its exactly the same fields. ) its only in the special case when the charges are all moving in the same direction is it possible to find a 3D reference frame where the force is interpreted as purely electric force. The general solution is always 4-dimensional, try to get past 'reference frames', they are just a way to reduce the real world of 4-dimensions, to the 3-dimensions people feel happier with, and its that, physically meaningless process, that 'creates' the illusion of the magnetic field. • Do you agree or disagree with this statement: "It is a conceivable possibility that magnetic monopoles exist in the universe." thats a bit more interesting and involved, yes AND no, it hinges on a very clear definition of a monopole. I generally assume its like 'spin' where the name is used because of a significant number of similarities with actual rotation, but it could have had, and might have been better, if it a new made-up name, to stop people extrapolating properties outside the QM world. So i wouldn't be surprised if a 'monopole' were found one day, but i don't think it would have significance here. Obviously i think finding 'half a magnet' would be impossible. I hope you answer these, it will help me understand your point of view. Thanks! --Steve (talk) 04:58, 10 February 2011 (UTC) from my side, having seen and having myself struggled to get an intuitive grasp of relativity, your 'issues' with this are quite familiar. if you can understand the problem below, then your relativistic understanding should be very well up to seeing this. if two twins travel apart so that for each the other ages at a slower rate, and the same for a return journey, since this is all symmetrical, why don't their ages stay synchronised? Steve and anon BT CENTRAL PLUS, would you please continue this on your talk page (for instance at User_talk:Sbyrnes321/Electricity from magnetism—feel free to move the conversation), as this is obviously never going to end? This back and forth between you seems no longer on topic here, since this talk page is not meant for discussing the subject or someone's understanding or misunderstanding thereof—see WP:TPG. Thanks. - DVdm (talk) 23:01, 10 February 2011 (UTC) Ummmm, "talk page is not meant for discussing the subject or someone's understanding or misunderstanding thereof—" ? this is a discussion page unless i'm doing this wrong, and this article is currently focused toward 'Relativistic electromagnetism' as a teaching scenario, the physics is contained elsewhere,(although the title should emphasise that really.) so is also about comprehending, in that context i think this discussion has a great deal of relevance, if only to display some of the problems. if the page had no focus on education i would agree. —Preceding unsigned comment added by 86.175.125.189 (talk) 00:35, 11 February 2011 (UTC) Well I agree with DVdm, I started User talk:Sbyrnes321/Electricity from magnetism. :-) --Steve (talk) 01:30, 11 February 2011 (UTC) Ok, I have collapsed the section again. As anon seems not to be prepared to adhere to talk page guidelines (no signatures and improper indentation), the above has become as good as unreadable anyway. Enjoy over there. Cheers - DVdm (talk) 12:20, 11 February 2011 (UTC) I'm thrilled to see my original comment started such a discussion. My poor wording drove the discussion in a different direction than I intended. I agree the electricity and magnetism are the same thing looked at from two different viewpoints. But from a practical aspect, to make this point clear you need to start with one and derive the other. This article does that by starting with ampere's law and lorentz contraction combined with some vector mechanics to show that magnetism is the same as electricity. Instead of stating that the article should address how "magnetic fields create electrical currents", I should have said that the article should include a vector analysis using the principles already presented that shows why an accelerating or decelerating electrical charge field induces a neighboring electrical charge field to accelerate or decelerate. (or that an accelerating or decelerating electrical charge field resists its own acceleration or deceleration) — Preceding unsigned comment added by 192.104.67.222 (talk) 14:41, 30 November 2011 (UTC) ## Education and training This article relates to the education and training of electrical and electronics engineers. The references show that in the 1960s there was a move to introduce special relativity into the teaching of electromagnetic fields. The textbook by Corson & Lorrain was perhaps the apex of this movement. Since the symmetries of relativity are exactly the symmetries of the EM field, there is good reason to treat the subjects together. However, at most, this program of study provides some intuition on how magnetic and electric fields are observer-dependent. Full derivation of the Maxwell equations is an overstatement. Some investigation is necessary to find why this initiative did not take hold. Edits to the page from today could expand from Purcell to the other sources, and explore engineering and physics education for traces of the movement.Rgdboer (talk) 01:53, 19 November 2009 (UTC) The additional sources are now mentioned in the lead. Further, A. Einstein's comment on magnetism being electrical has been quoted and sourced. Scanning the sources for appropriate lesson plans may produce material to improve the article.Rgdboer (talk) 03:19, 20 November 2009 (UTC) ## Origin of magnetic force I very doubt about the contraction of lenght that is exposed to talk about the origin of magnetic force, as it is described in most book. Why? First because the case is the one of an infinite wire, thus with an open finite wire, because of conservation of charges, if the whole negative lign, for example, contracts, then the tips will be positive, if you apply it to a closed loop, as there is speed of electron everywhere, and no motion of protons, you will have charge accumulations on some parts, very strange that charges are uniformly reparted in one frame and concentrated in another, this could be because of simultaneity, but simultaneity and time dilation could work in the opposite because charges have a certain pace, regular, to cross the wire section, and accelerate at each angle of the wire. Secondly does this contration is in addition to the transformation of electric field of each charge, that is also an outcome of lenght contration, then does both effects accumulate? This needs a simulation, because books have been written a long time ago.Klinfran (talk) 07:57, 1 September 2011 (UTC) Do you say that accelecation can decrease the density? Why? Relativity says only about speed and only about contraction. Furthermore, it does not matter in which direction the charges move. So, acceleration, which only changes the direction, should not change anything. The neutral wire should become negatively charged when current is created in it. --Javalenok (talk) 12:43, 7 May 2013 (UTC) ## Something is wrong with the magnetic induction B Extended content When we analize the interaction between two current carying conductors we first represent the magnetic induction "B" following the right hand rule. But, if you look back at the origine of this representation you can find that it comes from Faraday and the orientation of the magnetic induction "B" was based on the pattern of iron filings formed around a magnet bar which resemble to " field of lines" conneting the two poles. If you look closer the iron piling particles also can be considered tini magnets and their orientation will always be that their internal atomic currents that creates their magnetic field will be aligned with the internal atomic currents of the large permanent magnet bar so they will tend to have the same direction ! The independent "lines" are formed because the adjacent particles of iron will align their internal currents with the stronger current of the large permanent magnet bar while between the "lines" there will be repulsion, also particle of iron from a "line" will form chains with the top and the bottom particle so that their internal currents will be aligned too. So now from the two parallel electric current situation we arrive to another two parallel electric current situation! There is no such a thing like magnetic induction "B" that is tangent to the conductors or circle the conductor...its just an illusion ! Can you see the error? — Preceding unsigned comment added by 71.185.132.57 (talk) 01:00, 30 June 2013 (UTC) This is where we discuss format and content of the article, not (parts of) the subject — see wp:talk page guidelines. Questions like this might be welcome at the wp:reference desk/science. Good luck. - DVdm (talk) 11:23, 1 July 2013 (UTC) ## Electromagnetic theory of radiation is still valid? If the magnetic field is really an electric filed doesn't this mean that the electromagnetic propagation of radiation is invalid? — Preceding unsigned comment added by 71.185.132.57 (talkcontribs) 03:01, 7 July 2013 (UTC) DVdm has already indicated to you in the previous thread that this is not the forum for this kind of discussion; this question does not relate to editing the article. Also do not remove talk page content as you did, and rmeber to sign your posts on talk ages with for tildes: (~~~~). Please follow the suggestions. — Quondum 03:14, 7 July 2013 (UTC) ## Introductory sentence "Relativistic electromagnetism is a modern teaching strategy for developing electromagnetic field theory from Coulomb’s law and Lorentz transformations." This statement would be more rigorously complete if it included conservation (invariance) of charge as a premise. Should it be changed? — Quondum 20:15, 29 July 2013 (UTC) Yes, it is a good idea to get away from Coulomb’s law when doing relativity, even if the references use the electrostatic principle as a backdrop. Suggestion to focus on charge conservation is worthy. However, the topic is fraught with difficulties though well motivated. Caution should be taken regarding electro-mechanics as a deductive system.Rgdboer (talk) 22:36, 29 July 2013 (UTC) I'm not sure we understand each other. I was not trying to change the concept, only saying that in the deductive process, there is an unstated assumption of charge conservation. Charge conservation + Coulomb's law + Lorentz transformations => Relativistic electromagnetism. If there is no charge conservation, as in relativistic mass + Newton's law of gravitation + Lorentz transformations, we get a different set of laws (being something like gravitoelectromagnetism). I was simply suggesting something like "Relativistic electromagnetism is a modern teaching strategy based on developing electromagnetic field theory from charge conservation, Coulomb’s law and Lorentz transformations." — Quondum 22:56, 29 July 2013 (UTC) Conservation of charge is not so much the point as relative charge density. On the topic of relativistic electromagnetism, I have suggested that the eddy current brake is instructive. On March 5, someone logged in just to delete that suggestion, making no other contribution. At its best, this article may help students begin to appreciate EM field theory.Rgdboer (talk) 22:33, 30 July 2013 (UTC) ## Opposite charges with opposite motions and matching length contraction In a frame of reference where length contractions of positive and negative charges in a neutral wire are non-zero and identical, it would seem that any charge whose frame deviates from this frame would receive a magnetic force. If the positive charges of the metal ions in a typical conductor are stationary in the lab frame (or "lab-stationary"), this would mean that in the lab frame the magnetic field "loops" propagate (or "drift") at half the drift velocity as the electron current (or approximately half as per the relativistic velocity addition formula). Therefore, even for a current flowing at a constant rate in a lab-stationary wire, the magnetic field produced is non-lab-stationary! If, in the lab frame, you apply magnetic Lorentz force q(v x B) on a lab-stationary charge q via a magnetic field B traveling at this "midway" velocity v between that of the metal ions and that of the electron current, what you will find is that the otherwise neutral wire in the lab frame will have electric fields lines pointing to it (due to the negativity of conducting charge) which are proportional to (v x B) which, ceteris paribus, is proportional to v^2, or alternatively B^2. Therefore, the distance between electrons in a wire does not have to decrease due to their relative motion with respect to the observer in order to explain this electric field. The increase of the electric field can be explained just as well by saying that the magnetic field of the wire is stationary only in the frame of reference between that of the metal ions and that of the electrons, and therefore an electric field is produced from an otherwise neutral wire via the magnetic Lorentz force on lab-stationary charges. In the case of completely negative electron beam, the magnetic field of the beam travels at the velocity of the beam, and any lab-stationary charge would be subject to a magnetic Lorentz force based on an electric field that points toward the beam. The induced electric field complements the electric field of the charged particles of the beam, which in this special case are electrons.siNkarma86—Expert Sectioneer of Wikipedia 86 = 19+9+14 + karma = 19+9+14 + talk 08:55, 15 March 2014 (UTC) ## Old content After almost 5 years with no support with references, the following sections have been removed today: ### Uniform electric field — simple analysis Figure 1: Two oppositely charged plates produce uniform electric field even when moving. The electric field is shown as 'flowing' from top to bottom plate. The Gaussian pill box (at rest) can be used to find the strength of the field. Consider the very simple situation of a charged parallel-plate capacitor, whose electric field (in its rest frame) is uniform (neglecting edge effects) between the plates and zero outside. To calculate the electric field of this charge distribution in a reference frame where it is in motion, suppose that the motion is in a direction parallel to the plates as shown in figure 1. The plates will then be shorter by a factor of: ${\displaystyle {\sqrt {1-v^{2}/c^{2}}}}$ than they are in their rest frame, but the distance between them will be the same. Since charge is independent of the frame in which it is measured, the total charge on each plate is also the same. So the charge per unit area on the plates is therefore larger than in the rest frame by a factor of: ${\displaystyle 1 \over {\sqrt {1-v^{2}/c^{2}}}}$ The field between the plates is therefore stronger by this factor. ### More rigorous analysis Figure 2a: The electric field lines are shown flowing outward from the positive plate Figure 2b: The electric field lines flow inward toward the negative plate Consider the electric field of a single, infinite plate of positive charge, moving parallel to itself. The field must be uniform both above and below the plate, since it is uniform in its rest frame. We also assume that knowing the field in one frame is sufficient for calculating it in the other frame. The plate however could have a non zero component of electric field in the direction of motion as in Fig 2a. Even in this case, the field of the infinite plane of negative charge must be equal and opposite to that of the positive plate (as in Fig 2b), since the combination of plates is neutral and cannot therefore produce any net fields. When the plates are separated, the horizontal components still cancel, and the resultant is a uniform vertical field as shown in Fig 1. If Gauss's law is applied to pillbox as shown in Fig 1, it can be shown that the magnitude of the electric field between the plates is given by: ${\displaystyle |E'|={\sigma ' \over \epsilon _{0}}\ }$ where the prime (') indicates the value measured in the frame in which the plates are moving. ${\displaystyle \sigma }$ represents the surface charge density of the positive plate. Since the plates are contracted in length by the factor ${\displaystyle {\sqrt {1-v^{2}/c^{2}}}}$ then the surface charge density in the primed frame is related to the value in the rest frame of the plates by: ${\displaystyle \sigma '\ ={\sigma \over {\sqrt {1-v^{2}/c^{2}}}}}$ But the electric field in the rest frame has value σ / ε0 and the field points in the same direction on both of the frames, so ${\displaystyle E'={E \over {\sqrt {1-v^{2}/c^{2}}}}\ }$ The E field in the primed frame is therefore stronger than in the unprimed frame. If the direction of motion is perpendicular to the plates, length contraction of the plates does not occur, but the distance between them is reduced. This closer spacing however does not affect the strength of the electric field. So for motion parallel to the electric field E, ${\displaystyle E'=E\ }$ In the general case where motion is in a diagonal direction relative to the field the field is merely a superposition of the perpendicular and parallel fields., each generated by a set of plates at right angles to each other as shown in Fig 3. Since both sets of plates are length contracted, the two components of the E field are ${\displaystyle E'_{y}={E_{y} \over {\sqrt {1-v^{2}/c^{2}}}}}$ and ${\displaystyle E'_{x}=E_{x}\ }$ where the y subscript denotes perpendicular, and the x subscript, parallel. These transformation equations only apply if the source of the field is at rest in the unprimed frame. ### The field of a moving point charge Figure 3: A point charge at rest, surrounded by an imaginary sphere. Figure 4: A view of the electric field of a point charge moving at constant velocity. A very important application of the electric field transformation equations is to the field of a single point charge moving with constant velocity. In its rest frame, the electric field of a positive point charge has the same strength in all directions and points directly away from the charge. In some other reference frame the field will appear differently. In applying the transformation equations to a nonuniform electric field, it is important to record not only the value of the field, but also at what point in space it has this value. In the rest frame of the particle, the point charge can be imagined to be surrounded by a spherical shell which is also at rest. In our reference frame, however, both the particle and its sphere are moving. Length contraction therefore states that the sphere is deformed into an oblate spheroid, as shown in cross section in Fig 4. Consider the value of the electric field at any point on the surface of the sphere. Let x and y be the components of the displacement (in the rest frame of the charge), from the charge to a point on the sphere, measured parallel and perpendicular to the direction of motion as shown in the figure. Because the field in the rest frame of the charge points directly away from the charge, its components are in the same ratio as the components of the displacement: ${\displaystyle {E_{y} \over E_{x}}={y \over x}}$ In our reference frame, where the charge is moving, the displacement x' in the direction of motion is length-contracted: ${\displaystyle x'=x{\sqrt {1-v^{2}/c^{2}}}}$ The electric field at any point on the sphere points directly away from the charge. (b) In a reference frame where the charge and the sphere are moving to the right, the sphere is length-contracted but the vertical component of the field is stronger. These two effects combine to make the field again point directly away from the current location of the charge. (While the y component of the displacement is the same in both frames). However, according to the above results, the y component of the field is enhanced by a similar factor: ${\displaystyle E'_{y}={E_{y} \over {\sqrt {1-v^{2}/c^{2}}}}}$ whilst the x component of the field is the same in both frames. The ratio of the field components is therefore ${\displaystyle {E'_{y} \over E'_{x}}={E_{y} \over E_{x}{\sqrt {1-v^{2}/c^{2}}}}={y' \over x'}}$ So, the field in the primed frame points directly away from the charge, just as in the unprimed frame. A view of the electric field of a point charge moving at constant velocity is shown in figure 4. The faster the charge is moving, the more noticeable the enhancement of the perpendicular component of the field becomes. If the speed of the charge is much less than the speed of light, this enhancement is often negligible. But under certain circumstances, it is crucially important even at low velocities. ### The origin of magnetic forces Figure 5, lab frame: A horizontal wire carrying a current, represented by evenly spaced positive charges moving to the right whilst an equal number of negative charges remain at rest, with a positively charged particle outside the wire and traveling in a direction parallel to the current. In the simple model of events in a wire stretched out horizontally, a current can be represented by the evenly spaced positive charges, moving to the right, whilst an equal number of negative charges remain at rest. If the wire is electrostatically neutral, the distance between adjacent positive charges must be the same as the distance between adjacent negative charges. Assume that in our 'lab frame' (Figure 5), we have a positive test charge, Q, outside the wire, traveling parallel to the current, at the speed, v, which is equal to the speed of the moving charges in the wire. It should experience a magnetic force, which can be easily confirmed by experiment. Figure 6, test charge frame: The same situation as in fig. 5, but viewed from the reference frame in which positive charges are at rest. The negative charges flow to the left. The distance between the negative charges is length-contracted relative to the lab frame, while the distance between the positive charges is expanded, so the wire carries a net negative charge. Inside 'test charge frame'(Fig. 6), the only possible force is the electrostatic force Fe = Q · E because, although the magnetic field is the same, the test charge is at rest and, therefore, cannot feel it. In this frame, the negative charge density has Lorentz-contracted with respect to what we had in lab frame because of the increased speed. This means that spacing between charges has reduced by the Lorentz factor with respect to the lab frame spacing, l: ${\displaystyle l_{-}={l{\sqrt {1-v^{2}/c^{2}}}}}$ Thus, positive charges have Lorentz-expanded (because their speed has dropped): ${\displaystyle l_{+}=l/{\sqrt {1-v^{2}/c^{2}}}}$ Both of these effects combine to give the wire a net negative charge in the test charge frame. Since the negatively charged wire exerts an attractive force on a positively charged particle, the test charge will therefore be attracted and will move toward the wire. For ${\displaystyle v\ll c}$, we can concretely compute both, the magnetic force sensed in the lab frame ${\displaystyle F_{m}={QvI \over 2\pi \epsilon _{0}c^{2}R}}$ and electrostatic force, sensed in the test charge frame, where we first compute the charge density with respect to the lab frame length, l: ${\displaystyle \lambda ={q \over l}_{+}-{q \over l}_{-}={q \over l}{\bigl (}{\sqrt {1-v^{2}/c^{2}}}-1/{\sqrt {1-v^{2}/c^{2}}}{\bigr )}\approx {q \over l}{\Bigl (}1-{\frac {1}{2}}{v^{2} \over c^{2}}\,-\,1-{\frac {1}{2}}{v^{2} \over c^{2}}{\Bigr )}=-{q \over l}{v^{2} \over c^{2}}}$ and, keeping in mind that current ${\displaystyle I={q \over t}=q{v \over l}}$, resulting electrostatic force ${\displaystyle F_{e}=QE=Q{\lambda \over 2\pi \epsilon _{0}R}={Qqv^{2} \over 2\pi \epsilon _{0}c^{2}Rl}={QvI \over 2\pi \epsilon _{0}c^{2}R}}$ which comes out exactly equal to the magnetic force sensed in the lab frame, ${\displaystyle F_{e}=F_{m}}$. The lesson is that observers in different frames of reference see the same phenomena but disagree on their reasons. If the currents are in opposite directions, consider the charge moving to the left. No charges are now at rest in the reference frame of the test charge. The negative charges are moving with speed v in the test charge frame so their spacing is again: ${\displaystyle l_{(-)}={l{\sqrt {1-v^{2}/c^{2}}}}}$ The distance between positive charges is more difficult to calculate. The relative velocity should be less than 2v due to special relativity. For simplicity, assume it is 2v. The positive charge spacing contraction is then: ${\displaystyle {\sqrt {1-(2v/c)^{2}}}}$ relative to its value in their rest frame. Now its value in their rest frame was found to be ${\displaystyle l_{(+)}={l \over {\sqrt {1-v^{2}/c^{2}}}}}$ So the final spacing of positive charges is: ${\displaystyle l_{(+)}={l \over {\sqrt {1-v^{2}/c^{2}}}}{\sqrt {1-(2v/c)^{2}}}}$ To determine whether l(+) or l(-) is larger we assume that v << c and use the binomial approximation that ${\displaystyle (1+x)^{p}\approxeq 1+px\;{\text{ when }}\;x\ll 1}$ After some algebraic calculation it is found that l(+) < l(-), and so the wire is positively charged in the frame of the test charge. citation: A. French (1968), no page given One may think that the picture, presented here, is artificial because electrons, which accelerated in fact, must condense in the lab frame, making the wire charged. Naturally, however, all electrons feel the same accelerating force and, therefore, identically to the Bell's spaceships, the distance between them does not change in the lab frame (i.e. expands in their proper moving frame). Rigid bodies, like trains, don't expand however in their proper frame, and, therefore, really contract, when observed from the stationary frame. ### Calculating the magnetic field #### The Lorentz force law A moving test charge near a wire carrying current will experience a magnetic force dependent on the velocity of the moving charges in the wire. If the current is flowing to the right, and a positive test charge is moving below the wire, then there is a force in a direction 90° counterclockwise from the direction of motion. #### The magnetic field of a wire Calculation of the magnitude of the force exerted by a current-carrying wire on a moving charge is equivalent to calculating the magnetic field produced by the wire. Consider again the situation shown in figures. The latter figure, showing the situation in the reference frame of the test charge, is reproduced in the figure. The positive charges in the wire, each with charge q, are at rest in this frame, while the negative charges, each with charge −q, are moving to the left with speed v. The average distance between the negative charges in this frame is length-contracted to: ${\displaystyle {\sqrt {1-v^{2}/c^{2}}}}$ where is the distance between them in the lab frame. Similarly, the distance between the positive charges is not length-contracted: ${\displaystyle {\sqrt {1-v^{2}/c^{2}}}}$ Both of these effects give the wire a net negative charge in the test charge frame, so that it exerts an attractive force on the test charge. Electromechanics and relativistic electromagnetism has several sources now listed. — Rgdboer (talk) 01:42, 28 November 2015 (UTC) Well, as I recall, the Feynmann lectures in physics has a marvelous derivation of the magnetic field as the Lorentz-contracted electric field. However, it is far more subtle and nuanced than the above deleted section (e.g. you need to work out the cross-products, to get B pointing in the correct direction). Thus, yes, deleting the above seems appropriate: its somehow oversimplifying some very important details. 67.198.37.16 (talk) 19:20, 21 February 2016 (UTC)
14,392
63,333
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 36, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.5625
4
CC-MAIN-2018-05
latest
en
0.93899
https://www.physicsforums.com/threads/issue-with-the-definition-of-a-lie-derivative-and-its-components-carrolls-gr.976344/
1,726,261,777,000,000,000
text/html
crawl-data/CC-MAIN-2024-38/segments/1725700651540.48/warc/CC-MAIN-20240913201909-20240913231909-00138.warc.gz
891,833,290
26,908
# Issue with the definition of a Lie derivative and its components (Carroll's GR) • A • haushofer In summary, the conversation discusses the notion of Lie derivatives in the context of Carroll's notes on general relativity. The Lie derivative of a tensor T with respect to a vector field V is defined using a diffeomorphism. The confusion arises in the definition of the term \phi_{* t}\Bigl[ T(\phi_t (p))\Bigr] where the tensor is first evaluated at a shifted point and then pulled back to the original point. The confusion is further compounded when evaluating the tensor components in a particular coordinate system, leading to the introduction of Lie brackets. The conversation concludes with a discussion on the evaluation of tensor components at different points and the use of partial derivatives in the haushofer Dear all, I'm having a small issue with the notion of Lie-derivatives after rereading Carroll's notes https://arxiv.org/abs/gr-qc/9712019 page 135 onward. The Lie derivative of a tensor T w.r.t. a vector field V is defined in eqn.(5.18) via a diffeomorphism ##\phi##. In this definition, both terms are "tensors at the point p", as he remarks after eqn.(5.17). My issue is with the term $$\phi_{* t}\Bigl[ T(\phi_t (p))\Bigr]$$ in eqn.(5.17). As I read this, you first evaluate the tensor T at a shifted point ##\phi_t (p)##, and after that you pull this back via ##\phi_{* t}## at the point p. After eqn.(5.21) however, I get confused. In this part, Carroll tries to show that in a particular coordinate system the Lie derivative becomes an ordinary partial derivative, so he can introduce Lie brackets. He takes as a vector field ##V=\partial_1##, and states: "The magic of this coordinate system is that a diffeomorphism by t amounts to a coordinate transformation from ##x^{\mu} = (x^1, x^2, . . . , x^n)## to ##y^{\mu} = (x^1 + t, x^2, . . . , x^n)##." All right, you go along the flow in the ##x^1##-direction. My confusion is with eqn.(5.23): he evaluates the tensor components in ##y^{\mu} = (x^1 + t, x^2, . . . , x^n)##. But why? Aren't we supposed to evaluate the terms in the original point p, that is, with coordinates ##x^{\mu} = (x^1, x^2, . . . , x^n)##? The confusion also arises, because I thought the point of Lie derivaties was that you compare tensor components in the very same point (and hence, evaluated at the same coordinate values!). But in eqn.(5.24), the Lie derivative becomes an ordinary partial derivative because in Carroll's magic coordinate system it falls down to $$\lim_{t \rightarrow 0 }\frac{T^{\mu \ldots}_{\nu \ldots}(x^1 + t, x^2, . . . , x^n) - T^{\mu \ldots}_{\nu \ldots}(x^1, x^2, . . . , x^n)}{t} = \partial_{x^1} T^{\mu \ldots}_{\nu \ldots}(x^1, x^2, . . . , x^n)$$ But here we are comparing the same tensor components in two different points! I understand that this is the whole idea of a partial derivative, but I'm confused in the context of Lie derivatives and Carroll's remark after eqn.(5.17). So I guess my question really is: if a diffeomorphism brings us from a point with coordinates ##x^{\mu}## to a point with coordinates ##y^{\mu}##, how do the components of $$\phi_{* t}\Bigl[ T(\phi_t (p))\Bigr]$$ look like? I thought it would be ##T^{'\mu \ldots}_{'\nu \ldots}(x)##, but because of Carroll's discussion above I'm confused. Don't we simply have ##\phi_t(p)=\phi_t(x_1,x_2,\ldots,x_n)=(x_1+t,x_2,\ldots,x_n)=:(y_1,\ldots,y_n)\stackrel{(t \to 0)}{\longrightarrow}(5.24)## in this case? Then we apply ##T## as we formally went from ##\phi\, : \,M\longrightarrow N##, leading us to the tangent space ##T_{\phi(p)}N## which we pull back to ##T_pM##. I guess I don't understand your concerns. A bit confusing is that we have only one potato here ##N=M##. I am not sure what exactly you worry about but I'll try to answer. Yes, you have to evaluate both terms in the definition of the Lie derivative at the same point. If $$P$$ is that point and $$\Phi_t$$ is the flow of the vector field $$V=V^\mu \partial_\mu$$, let the point Q be the point lying on the integral line of $$V$$ i.e. $$Q = \Phi_t(P)$$. At this point, field $$V$$ has coordinate expression $$V(y)=V^\mu(y) \frac{\partial}{ \partial y^\mu}$$ where $$y^\mu = x^\mu +t V^\mu$$ are the coordinates of $$Q$$ up to the first order in $$t$$. Mapping $$x\mapsto y$$ is the coordinate expression for the flow and order to calculate the pull back you need to transform $$V(y)$$ to coordinates $$x$$ as usual coordinate transformation for vector fields by Jacobi matrix. You also need to expand $$V^\mu(y)$$ in $$t$$ to express y as function of x. Ok, I agree, so how do you explain that eqn.(5.23)? Why evaluate it in the point with coordinates y and not x? Sorry, I wasn't very focused when I wrote the first post. So let's go from the beginning. We agree that the Lie derivative of tensor field ##T## in the direction of vector field V at point P is $$L_V T = \lim_{t\to 0} \frac{1}{t}( \Phi_t^* T(\Phi_t(P)) - T(P)),$$ right? ##\Phi_t## is the flow of ##V## and ##\Phi_t^*## is the pull-back against the flow. So, tensor T must be evaluated at, in coordinates, y where $$y^\mu = x^\mu + t\,V^\mu(x) + O(t^2)$$ Now, if the coordinates ##x^\mu## are adapted to field V, we have $$V^\mu = \delta_1^\mu$$ and $$y^\mu = x^\mu + t\,\delta^\mu_1 + O(t^2)$$ The tensor field evaluated at y is $$T^\mu(y) = T^\mu(x^1 + t, x^2, \dots x^n)$$ For simplicity, suppose that T is a vector field ##T = T^\mu \partial_\mu##. Then $$\Phi_t^* T(y)|_{x} = T^\mu(y) \frac{\partial x^\nu}{\partial y^\mu}\,\frac{\partial}{\partial x^\nu}= T^\mu(x^1 + t, \dots x^n) \partial_\mu|_{x}$$ where $$\partial x^\mu / \partial y^\nu = \delta ^\mu_\nu$$ Then the abstract definition of the Lie derivative immediately gives you simply the partial derivative. If you employ general coordinates, derivatives of the field V would appear both in the Jacobi matrix and the expansion of V(y). vanhees71, kent davidge and Orodruin Thanks for thinking with me everyone, much appreciated as always. Martin Scholtz said: Sorry, I wasn't very focused when I wrote the first post. So let's go from the beginning. We agree that the Lie derivative of tensor field ##T## in the direction of vector field V at point P is $$L_V T = \lim_{t\to 0} \frac{1}{t}( \Phi_t^* T(\Phi_t(P)) - T(P)),$$ right? ##\Phi_t## is the flow of ##V## and ##\Phi_t^*## is the pull-back against the flow. So, tensor T must be evaluated at, in coordinates, y where $$y^\mu = x^\mu + t\,V^\mu(x) + O(t^2)$$ Yes, I agree with the definition of the Lie derivative and follow your derivation afterwards, but my issue is with your conclusion in the quote abovem "So, tensor T must be evaluated at the point with coordinates y". So, in the first term of the Lie derivative ##\Phi_t^* T(\Phi_t(P))##, if I first evaluate the tensor at the shifted point with coordinates y, ##T(\Phi_t(P))## and then pull it back to the point with coordinates x, ##\Phi_t^* \Bigl(T(\Phi_t(P))\Bigr)## I get a tensor "at P" (as the remark after eqn.(5.17) states), even though the argument involves the (point ##\Phi_t(P)## with) coordinate y. So my confusion arises because of the "evaluate the tensor first in the shifted point, and then pull it back to the original point, so we get a tensor at that original point". I have to think this through more carefully, but let me ask a follow-up question which should clarify. Imagine, as in Carroll's text, we're using a diffeomorphism ##\Phi_t## to shift the point P with coordinates ##x^{\mu}## to another point ##\Phi_t(P)## with coordinates ##y^{\mu}##. I want to understand how the terms in the Lie derivative of our tensor ##T## translate to "tensor component transformation notation". So, e.g. ##T(P) : T^{\mu\ldots}_{\nu\ldots} (x)## ##T(\Phi_t(P)) : T^{\mu\ldots}_{\nu\ldots} (y)## ##\Phi_t^* \Bigr(T(P)\Bigr) : \frac{\partial x^{\mu}}{\partial y^{\rho}} \ldots \frac{\partial y^{\lambda} }{\partial x^{\nu} } T^{\rho\ldots}_{\lambda\ldots} (y)## (the standard "tensor transformation law") And then, last but not least, of course: ##\Phi_t^* \Bigl(T(\Phi_t(P))\Bigr)## What will this term become in component notation, according to you? Would this be ##\Phi_t^* \Bigl(T(\Phi_t(P))\Bigr) : \frac{\partial x^{\mu}}{\partial y^{\rho}} \ldots \frac{\partial y^{\lambda} }{\partial x^{\nu} } T^{\rho\ldots}_{\lambda\ldots} (x)## ? And how would you translate then the Lie derivative in this component notation? (removed a few confusing lines) Last edited: You could formally consider ##\phi\, : \,M \longrightarrow N## as a diffeomorphism between two different manifolds. Now we use the tensor field on ##N## to make a statement about ##M##. ... and we can ask how fast a tensor changes as we travel down the integral curves ... We compare ##T(p)## with its variation along the flow ##\phi##. That's what a differentiation does: it compares a tangent vector at a certain point with what happens nearby. In school nearby is a secant, here it is a flow. fresh_42 said: You could formally consider ##\phi\, : \,M \longrightarrow N## as a diffeomorphism between two different manifolds. Now we use the tensor field on ##N## to make a statement about ##M##. We compare ##T(p)## with its variation along the flow ##\phi##. That's what a differentiation does: it compares a tangent vector at a certain point with what happens nearby. In school nearby is a secant, here it is a flow. Yes, but that's not the issue. What my issue is, is that I cannot reconcile this coordinate free notation with the way I use to calculate Lie derivatives, as the difference (using components and surpressing indices) ##T'(x)-T(x)## (where the prime indicates the transformed components under infinitesimal transformations) and expanding. Both terms are evaluated at the same point x, while in the example of Carroll you evaluate one term in the shifted point with coordinates y (here that would be x'). I think Stephani provides a more intuitive explanation of the Lie derivative. Given is a congruence of world lines, defining a vector field ##a^{\mu}(x)## through tangent vectors along these world lines. Now consider an observer, who moves along one of the lines by an infinitesimal step, i.e., in coordinates from $$x^{\mu} \rightarrow \bar{x}^{\mu}=x^{\mu}+\delta t a^{\mu}(x).$$ Now suppose that observer uses his coordinate system at the original point ##P## at the point ##\bar{P}##. This implies a coordinate transformation $$x^{\prime \mu}=x^{\mu}-\delta t a^{\mu}(x)$$ and a transformation matrix $${A^{\mu}}_{\nu}=\delta_{\mu}^{\nu} - \delta t \partial_{\nu} a^{\mu}(x).$$ Now take an arbitrary vector field ##T^{\mu}(x)##. The observer will associate the components of this field at point ##\bar{P}## to $$T^{\prime \mu}(\bar{P})={A^{\mu}}_{\nu} T^{\nu}(x+\delta t a(x))=T^{\mu}(P) + \delta t (a^{\nu}(x) \partial_{\nu} T^{\mu}-T^{\nu} \partial_{\nu} a^\mu) +\mathcal{O}(\delta t^2).$$ Then the Lie derivative of ##T## at point ##P## is defined as $$\mathcal{L}_{a} T^{\mu}=\lim_{\delta t \rightarrow 0} \frac{1}{\delta t} [T^{\prime \mu}(\bar{P})-T^{\mu}(P)]=a^{\nu} \partial_{\nu} T^{\mu} - T^{\nu} \partial_{\nu} a^{\mu}.$$ That's (slightly adapted to a more careful notation concerning the indices) from H. Stephani, Relativity, An introduction to Special and General Relativity, 3rd Ed., Cambridge University Press (2004). It's of course straight forward to define the Lie derivative for any tensor field of higher rank and also for covariant components. It's also easy to show that instead of the partial derivatives ##\partial_{\nu}## you can write the covariant derivatives ##\nabla_{\nu}## everywhere since the terms involving Christoffel symbols all finally cancel. vanhees71 said: I think Stephani provides a more intuitive explanation of the Lie derivative. Given is a congruence of world lines, defining a vector field ##a^{\mu}(x)## through tangent vectors along these world lines. Now consider an observer, who moves along one of the lines by an infinitesimal step, i.e., in coordinates from $$x^{\mu} \rightarrow \bar{x}^{\mu}=x^{\mu}+\delta t a^{\mu}(x).$$ Now suppose that observer uses his coordinate system at the original point ##P## at the point ##\bar{P}##. This implies a coordinate transformation $$x^{\prime \mu}=x^{\mu}-\delta t a^{\mu}(x)$$ and a transformation matrix $${A^{\mu}}_{\nu}=\delta_{\mu}^{\nu} - \delta t \partial_{\nu} a^{\mu}(x).$$ Now take an arbitrary vector field ##T^{\mu}(x)##. The observer will associate the components of this field at point ##\bar{P}## to $$T^{\prime \mu}(\bar{P})={A^{\mu}}_{\nu} T^{\nu}(x+\delta t a(x))=T^{\mu}(P) + \delta t (a^{\nu}(x) \partial_{\nu} T^{\mu}-T^{\nu} \partial_{\nu} a^\mu) +\mathcal{O}(\delta t^2).$$ Then the Lie derivative of ##T## at point ##P## is defined as $$\mathcal{L}_{a} T^{\mu}=\lim_{\delta t \rightarrow 0} \frac{1}{\delta t} [T^{\prime \mu}(\bar{P})-T^{\mu}(P)]=a^{\nu} \partial_{\nu} T^{\mu} - T^{\nu} \partial_{\nu} a^{\mu}.$$ That's (slightly adapted to a more careful notation concerning the indices) from H. Stephani, Relativity, An introduction to Special and General Relativity, 3rd Ed., Cambridge University Press (2004). It's of course straight forward to define the Lie derivative for any tensor field of higher rank and also for covariant components. It's also easy to show that instead of the partial derivatives ##\partial_{\nu}## you can write the covariant derivatives ##\nabla_{\nu}## everywhere since the terms involving Christoffel symbols all finally cancel. Thanks. My issue is funnily enough with how to derive the Lie derivative algebraically; I can dream these derivations. My issue is that I can't reconcile different texts which use (apparently) slightly different notions and definitions (and then I'm not even beginning about the passive v.s. active point of view, which some texts seem to combine in defining Lie derivatives.) But I think I do understand the answer to my question i posed to @Martin Scholtz, so thanks Martin. $$\mathcal{L}_{a} T^{\mu}=\lim_{\delta t \rightarrow 0} \frac{1}{\delta t} [T^{\prime \mu}(\bar{P})-T^{\mu}(P)] .$$ which I would define as $$\mathcal{L}_{a} T^{\mu}=\lim_{\delta t \rightarrow 0} \frac{1}{\delta t} [T^{\prime \mu}(\bar{P})-T^{\mu}(\bar{P})]$$ This is equivalent to how e.g. Zee or Inverno define the Lie derivatives. It makes sense, because you compare tensors at two equal points (here ##\bar{P}##), which is the only sensible thing to do without a connection. What I don't get specifically in your post, is your quote "The observer will associate the components of this field at point ##\bar{P}## to" $$T^{\prime \mu}(\bar{P})={A^{\mu}}_{\nu} T^{\nu}(x+\delta t a(x))$$ I'd say that one gets instead $$T^{\prime \mu}(\bar{P})={A^{\mu}}_{\nu} T^{\nu}(x)$$ i.e. the tensor transformation law, $$T^{\prime \mu}(x')= \frac{\partial x^{'\mu}}{\partial x^{\nu}} T^{\nu}(x)$$ One then first evaluates the tensor in the shifted point ##\bar{P}##, and compares this value with the 'dragged along value of the tensor' at that very same point. Note that this definition differs from the earlier mentioned definition by Martin and me, but it gives the same notion of the derivative. Maybe I should just stick to the definitions and conventions I do understand. ;) Anyway, I'm starting with a new job tomorrow, so I'll probably won't be able to give more elaborate reactions for the coming days, but if anyone has more comments, be assured they will be read. Many thanks for all of those who helped me with this issue :) The latter is the transformation of the vector components at the same space-time point ##P##. In the Lie derivative you carry the vector components in the above specified way to the "infinitesimally close" point ##\bar{P}## along the world-line with tangent ##a^{\mu}##. In the postings above this has been formalized in the coordinate-free way using the diffeomorphism ##\phi_t## parametrizing (locally) the curve along which you want to evaluate the Lie derivative. The pullback is formally describing what Stephani calls "the observer takes his coordinate bases with him". vanhees71 said: The pullback is formally describing what Stephani calls "the observer takes his coordinate bases with him". I find these kind of statements highly confusing. The only thing I can make of these wordings, is that you first shift the points actively, and then perform a passive transformation successively such that the old point in the old coordinate system has the same numerical value as the shifted point in the new (passively obtained) coordinate system. But as I said, this combines active and passive notions of coordinate transformations ("diffeomorphisms on the manifold ##M## and diffeomorphisms in ##R^n##"), and personally that confuses things a whole lot more than just sticking to the active picture. I'll compare your given definition more carefully (=algebraically and conceptually) with my own understanding. (edit:) I suspect Stephani means something subtly different with ##T^{\prime \mu}(\bar{P})## in his Lie derivative than what I'm used to ;) I'm usually puzzled more about the coordinate-free statements, but that's because I'm not familiar enough with it :-((. Just a sideremark: as a teacher, I know how important it is to be aware of pre-existing concepts in students heads when they learning something new. I guess somewhere in the process years ago of learning this stuff I implanted myself wrong/confusing concepts which makes me feel uncomfortable about it everytime I compare different textbooks. So in that sense I did learn a valuable lesson :P vanhees71 vanhees71 said: I'm usually puzzled more about the coordinate-free statements, but that's because I'm not familiar enough with it :-((. Well, maybe that's the other lesson to learn today: stay away from it as much as you can if you understand your own definitions! :P ## 1. What is the Lie derivative and why is it important in Carroll's GR? The Lie derivative is a mathematical tool used in differential geometry to measure how a vector field changes along the flow of another vector field. In Carroll's GR, it is important because it is used to define the change in a tensor field along the flow of a diffeomorphism, which is a key concept in general relativity. ## 2. What are the components of the Lie derivative and how are they related? The components of the Lie derivative are the transformation law, the commutator, and the infinitesimal generator. The transformation law describes how a tensor field changes under a diffeomorphism, the commutator measures the failure of tensors to commute under the transformation, and the infinitesimal generator is the vector field that generates the diffeomorphism. ## 3. What is the issue with the definition of the Lie derivative in Carroll's GR? The issue with the definition of the Lie derivative in Carroll's GR is that it depends on the choice of coordinates, which can lead to inconsistencies and difficulties in calculations. This is because the Lie derivative is defined using the commutator, which is not coordinate-independent. ## 4. How can the issue with the definition of the Lie derivative be resolved? One way to resolve the issue is to use a coordinate-independent definition of the Lie derivative, such as the Cartan derivative. This involves using differential forms instead of tensor components, which eliminates the dependence on coordinates and leads to more consistent and simpler calculations. ## 5. What are some applications of the Lie derivative in Carroll's GR? The Lie derivative is used in various applications in Carroll's GR, such as in the study of black holes, gravitational waves, and cosmology. It is also used in the formulation of the Einstein field equations and in the analysis of the symmetries and conservation laws of the theory. Replies 4 Views 807 Replies 1 Views 783 Replies 2 Views 1K Replies 2 Views 1K Replies 11 Views 2K Replies 13 Views 1K Replies 1 Views 928 Replies 9 Views 5K Replies 3 Views 2K Replies 73 Views 9K
5,557
20,043
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 2, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.90625
3
CC-MAIN-2024-38
latest
en
0.893186
https://www.kingexcel.info/2023/09/budget-vs-actual-variance-reports-with.html
1,716,153,417,000,000,000
text/html
crawl-data/CC-MAIN-2024-22/segments/1715971057922.86/warc/CC-MAIN-20240519193629-20240519223629-00214.warc.gz
756,680,061
73,384
Budget vs Actual Variance Reports with “In the Cell Charts” in Excel - KING OF EXCEL ## Thursday, September 7, 2023 In life we all want to be in control. And usually we measure or judge whether things under control by comparing whats happening with what should be happening. In other words, we tend compare the actual events/results with planned events/results. Though more interestingly most of us don’t really know if the “plan” is really under control or not. Anyways we will discuss the philosophy of perception vs reality some other time. So back to our topic comparisons and making budget vs actual reports. Excel provides is immense flexibility to make variance analysis super easy either in numerical form using conditional formatting or custom formats or in the form of variance charts. Today we are looking at yet another technique of reporting variances which is more of an in-the-cell charting. We are not going to make actual charts but it will be visually as helpful as normal charts. The final result of today’s technique is following: Its really easy to do if you know how to use REPT() function. An overlooked and to many it doesn’t have any use. But it has quite a big role today. So lets learn it. ### In cell Variance Charts – Step by Step Step 1: In a separate column make a heading of Variance. In our case it will go in cell F5. And in cell F6 put this formula and double click the fill handle to populate the formula down the whole range: =E6-D6 Step 2: Now that we have calculated the variances, for the sake of better understanding lets separate the negative and positive values in two columns. These are not needed if one has a good grip on IF() functions but for simplicity I will be employing helper columns. Write “Negative” in cell G5 and in cell H5 “Positive”. In cell G6 put this formula and double click the fill handle after pressing Enter key: `=IF(F6<0,F6,"")` In cell H6 put this formula and drag the fill handle down to fill the range: `=IF(F6>0,F6,"")` Step 3: Go to cell C18 and Insert a specific symbol which you can find in almost every font once you select “Block Elements”. Once inserted simply close the dialogue box. Also put 300 in cell C19. Step 4: Select cell J5 and K5 and merge them together. Write “Variance Chart” in merged cells Step 5: Put this formula in cell J6: `=IF(G6="","",G6&" "&REPT(\$C\$18,ABS(G6)/\$C\$19))` Align the column to the right. And in cell K6 put the following formula and drag the fill handle down to fill the formula in appropriate range: `=IF(H6="","",REPT(\$C\$18,H6/\$C\$19)&" "&H6)` Once you are done with above steps your chart will look like this: Step 6: Now coloring part is manual as we have separate favourable and unfavourable variances from each other. Not always a negative figure is a bad thing and not always a positive figure is a good thing. So items like Revenue, Operating profit etc their positive variance is favourable so I colored them Blue whereas items like expenses their, positive variance indicate increase in cost therefore it is an unfavorable variance so I colored them Red. With a touch of borders to fine tune the look and this is what I have in the end! So hope you have enjoyed learning a new technique as must as I enjoyed writing about it. Check out more Excel tutorials by clicking here
777
3,331
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.453125
3
CC-MAIN-2024-22
latest
en
0.88473
http://oeis.org/A024508
1,571,221,069,000,000,000
text/html
crawl-data/CC-MAIN-2019-43/segments/1570986666959.47/warc/CC-MAIN-20191016090425-20191016113925-00484.warc.gz
145,754,590
4,351
This site is supported by donations to The OEIS Foundation. Hints (Greetings from The On-Line Encyclopedia of Integer Sequences!) A024508 Numbers that are a sum of 2 distinct nonzero squares in more than one way. 8 65, 85, 125, 130, 145, 170, 185, 205, 221, 250, 260, 265, 290, 305, 325, 340, 365, 370, 377, 410, 425, 442, 445, 481, 485, 493, 500, 505, 520, 530, 533, 545, 565, 580, 585, 610, 625, 629, 650, 680, 685, 689, 697, 725, 730, 740, 745, 754, 765, 785, 793, 820, 845, 850, 865, 884, 890, 901, 905, 925, 949, 962, 965, 970, 985, 986, 1000, 1010, 1025, 1037, 1040, 1060, 1066, 1073, 1090, 1105, 1125 (list; graph; refs; listen; history; text; internal format) OFFSET 1,1 COMMENTS Appears to be n such that sigma(n)==0 (mod 4) and n is expressible as a sum of 2 squares. - Benoit Cloitre, Apr 20 2003 The comment that is in above is true most of the time. However if number of odd divisors of n that is a term of this sequence is not divisible by 4, then sigma(n) cannot be divisible by 4. For example; 325, 425, 625, 650, ... See also A000443 for more related examples. - Altug Alkan, Jun 09 2016 If m is a term then (a^2 + b^2) * m is a term for a,b > 0. Hence this sequence is closed under multiplication. - David A. Corneth, Jun 10 2016 LINKS David A. Corneth, Table of n, a(n) for n = 1..10749 G. Xiao, Two squares MATHEMATICA lst={}; q=-1; k=1; Do[Do[x=a^2; Do[y=b^2; If[x+y==n, If[n==q&&k==1, AppendTo[lst, n]]; If[n!=q, q=n; k=1, k++ ]], {b, Floor[(n-x)^(1/2)], a+1, -1}], {a, Floor[n^(1/2)], 1, -1}], {n, 2*6!}]; lst (* Vladimir Joseph Stephan Orlovsky, Jan 22 2009 *) PROG (PARI) is(n) = {my(t=0, i); t=sum(i=1, sqrtint((n-1)\2), issquare(n-i^2)); t>1} \\ David A. Corneth, Jun 10 2016 (PARI) is(n)=if(n<9, return(0)); my(v=valuation(n, 2), f=factor(n>>v), t=1); for(i=1, #f[, 1], if(f[i, 1]%4==1, t*=f[i, 2]+1, if(f[i, 2]%2, return(0)))); if(t%2, t-(-1)^v, t)/2-issquare(n/2)>1 \\ Charles R Greathouse IV, Jun 10 2016 CROSSREFS Cf. A001481, A025303 (exactly 2 ways), A025304 (exactly 3 ways), A025305 (exactly 4 ways), A025306 (exactly 5 ways). Sequence in context: A056693 A164282 A025312 * A025303 A071011 A165158 Adjacent sequences:  A024505 A024506 A024507 * A024509 A024510 A024511 KEYWORD nonn AUTHOR STATUS approved Lookup | Welcome | Wiki | Register | Music | Plot 2 | Demos | Index | Browse | More | WebCam Contribute new seq. or comment | Format | Style Sheet | Transforms | Superseeker | Recent The OEIS Community | Maintained by The OEIS Foundation Inc. Last modified October 16 06:17 EDT 2019. Contains 328046 sequences. (Running on oeis4.)
1,013
2,578
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.5625
4
CC-MAIN-2019-43
latest
en
0.633689
https://stats.stackexchange.com/questions/365266/accuracy-metric-for-comparing-time-series-models
1,713,359,776,000,000,000
text/html
crawl-data/CC-MAIN-2024-18/segments/1712296817153.39/warc/CC-MAIN-20240417110701-20240417140701-00213.warc.gz
476,208,064
39,994
# Accuracy metric for comparing Time Series models? I'm writing a blog post on forecasting time series with autoregression. In it, I compare the performance of SLR, ARIMA, and SARIMAX on forecasting the number of Home Sales in Seattle (see below). All 3 have different numbers of "input parameters": SLR just uses time, ARIMA and SARIMAX both use time and 12 lagged $y$ values. *I say "input parameters" b/c I'm not sure how to consider $y$. I'm currently using RMSE to compare them. Is this an acceptable practice, or is there another measure I should use that takes model complexity into account (e.g. something akin to adjusted R^2)? I know that MAPE is a commonly used forecasting metric. But like RMSE, I'm not sure it's appropriate for comparing models with different numbers of input parameters. Just wondering if there's anything better out there. • Are you measuring RMSE in-sample or out-of-sample? Sep 3, 2018 at 21:21 • Out of sample. I'm interested in the RMSE of the forecast, plus the RMSE of the in-sample ARIMA and SARIMAX models are nearly identical (the differences b/t them only really show out-of-sample). Sep 4, 2018 at 4:27 • Rather than choosing a single measure, it is more common to include several relevant measures of accuracy for prediction. Sep 4, 2018 at 8:14 • These kinds of adjustments for model complexity are typical of in-sample measures, but not of out-of-sample measures, which tackle overfitting in an entirely different way. You can compare RMSE/MAPE/MASE/etc out-of-sample between these models, just make sure that your exogenous regressors aren't assumed known in the future if they aren't really. Sep 4, 2018 at 12:18 • @ChrisHaug: do you want to post your comment as an answer? In addition, the forecasting textbooks listed at this thread all give error metrics commonly used in forecast evaluation. OP may be interested in the shortcomings of the MAPE. And $R^2$ is not commonly used in forecast evaluation. Dec 8, 2023 at 23:23
481
1,979
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.734375
3
CC-MAIN-2024-18
latest
en
0.939355
https://mn.khanacademy.org/math/cc-fourth-grade-math/cc-4th-measurement-topic/conversion-word-problems/v/multi-step-unit-conversion-examples
1,632,085,860,000,000,000
text/html
crawl-data/CC-MAIN-2021-39/segments/1631780056900.32/warc/CC-MAIN-20210919190128-20210919220128-00327.warc.gz
459,059,247
29,991
If you're seeing this message, it means we're having trouble loading external resources on our website. Хэрэв та вэб шүүлтүүртэй газар байгаа бол домэйн нэрийг *.kastatic.org and *.kasandbox.org блоклосон эсэхийг нягтална уу. Үндсэн товъёог Multi-step unit conversion examples (U.S. Customary) Video transcript Brooklyn purchased three pounds of pasta and 12 ounces of cheese to make macaroni and cheese what is the total weight in ounces total weight in ounces of the pasta and cheese combined so they give us the weight of the pasta in pounds and they give us the weight of the cheese in ounces and we want to add the total weight but they have to all be in ounces so let's first let's first convert these three pounds of pasta into a certain amount of ounces amount of pasta so we already know that one pound is equal to 16 ounces they tell us that so what would three pounds be so if I add three pounds how many ounces would that be and encourage you to pause the video and think about it let's see if you're having three times the pounds you're also going to have three times the ounces so if one pound is 16 ounces if you have three times as many pounds you're gonna have three times as many ounces so 16 times 3 is 48 48 ounces so Brooklyn the 3 pounds of pasta that she purchased that can also be viewed as 48 ounces 48 ounces of pasta so she has 48 ounces of pasta and 12 ounces of cheese so what is the total weight in ounces of them combined well it's going to be 48 ounces 48 ounces plus the twelve ounces of cheese 48 ounces of pasta plus 12 ounces of cheese give us a total of 60 ounces of pasta and cheese combined let's do another example with other units so we are told David is making a wall out of bricks each brick is 2/3 of a foot long each brick is 2/3 of a foot long there are 20 bricks lined up end to end to make the wall what is the total length of the wall in inches we have to pay attention to the units so they give us the length of each brick in feet so we know that each brick here let me draw a brick the length here is 2/3 of a foot 2/3 of a foot so what would this length be in inches well once again we can look at the conversion between foot and inches if I go from one foot to 2/3 of a foot well I've multiplied by 2/3 so if one foot is 12 inches and if I want to know how many inches 2/3 inches or 2/3 of a foot is in inches well I would multiply by two-thirds again in either case you could view this as multiplying both sides of this conversion by 2/3 and so 2/3 of a foot is going to be 2/3 times 12 inches and so 12 times 2 over 3 that would be 24 over 3 so this is going to be equal to 24 over 3 inches I'll just write in4 short right over here 24 4 3 which is equal to 8 inches so each brick is either you can either view it as 2/3 of a foot long or you could view it as 8 inches 8 inches long and there are 20 bricks lined up end to end to make the wall so what is the total length of the wall in inches it's going to be 8 brick each brick is 8 inches times 20 times 20 and 8 times 2 is 16 so 8 times 20 is 160 inches let's do one more of these at this time let's deal with some volume so this is Blanca buys two gallons of green paint she uses five quarts to paint her front porch and 0.5 quarts to paint the front porch swing how many quarts of paint does Blanca have left so we're given the original Mountain gallons but we want to know how many quarts she has left let's convert the amount she originally has into quarts so we they tell us 1 gallon is equal to 4 quarts so two gallons two gallons is going to be equal to two gallons is going to be twice as many quarts is going to be eight quarts so that's what she starts with and one way to think about this I just took this conversion well if I'm gonna have twice as many gallons then I'm going to have twice as many quarts if I want to express that volume in quarts so one way of thinking about she starts with eight quarts she starts with eight quarts eight quarts she uses five quarts to plate to paint her front porch so minus five quarts on the front porch is what she used and to use 0.5 cores to paint the porch swing so minus 0.5 quarts that looks like a G zero point five quarts well what is that going to be well 8 minus 5 is 3 minus 0.5 is 2.5 so she is going to be left with two point five quarts and we have answered the question in terms of the units they care about in terms of quarts
1,117
4,406
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
4.09375
4
CC-MAIN-2021-39
latest
en
0.931614
https://www.mathlobby.com/post/set-language-and-notation
1,695,530,462,000,000,000
text/html
crawl-data/CC-MAIN-2023-40/segments/1695233506559.11/warc/CC-MAIN-20230924023050-20230924053050-00620.warc.gz
961,938,569
198,957
top of page Search # Set language and notation Updated: Jun 22, 2021 Dear Secondary Math students, we will be going through Set Language and Notations. This chapter consists of many special and unique symbols which you might not come across. So stay tuned and pay close attention to them! In this note, you will learn: 1. Use of set language and notations (Union, Intersection, etc.) ### Use of set language and notations (Union, Intersection, etc.) A “set” in mathematics context refers to the collection of object, things or symbols that are clearly defined. Each of the individual object in a set are called elements or members of the set. Elements can be presented in two ways: 1. By listing the elements 2. By description When we list elements of a set, we typical use curly brackets to represent a set. In a description, it is used to represent the phrase, “the set of” and we write the elements/members of a set within the curly brackets. For example: Listing Description {1, 3, 5, 7, 9} The set of odd numbers between 0 to 10 {5, 10, 15, 20, 25, 30} The set of multiples of 5 from 5 to 30 ### Union of two sets, “A ∪ B”: The union of two sets is denoted by the symbol, ‘’. Given that we have two sets, A and B. The union of A and B basically means that the new set must include all the elements that are unique to A, all the elements that are unique to B, and the elements that exist in both A and B. For example: Set A consist of the numbers from 1 to 10: A = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10} and Set B consist of numbers from 5 to 15. B = {5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} Hence, if we identify the elements that are unique to both sets A and B individually, and the elements that exist in both sets: A = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10} B = {5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} Therefore A ∪ B = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} *Note: For union, elements that exist in both sets do not have to be repeated. ### Intersection of two sets, “A ∩ B”: The intersection of two sets is denoted by the symbol, ‘∩’. Given that we have two sets, A and B. The intersection of C and D basically means that the new set must include only the elements that exists in BOTH sets A and B. For example: Set A consist of the test results of 5 students from class 1: A = {58, 66, 72, 40, 87} Set B consist of the test results of 5 students from class 2: B = {58, 54, 62, 87, 72} Hence, if we identify the elements that exist in both sets A and B: A = {58, 66, 72, 40, 87} B = {58, 54, 62, 87, 72} Therefore, A ∩ B = {58, 72, 82} ### Complement of a set, A’: The complement of a set is denoted by the original set followed by an apostrophe, i.e. The complement of set A is A’. The complement of a set is basically a set that has all the elements in the universal set, except those that are in the original set A. For example: If Ƹ = {a, b, c, d, e, f} and A = {b, d, f}, Therefore, A’ = {a, c, e} ### “… is a (proper) subset of …”, or “A ⊂ B”: Given that we have two sets, A and B. When we say set A is a subset of set B, it is denoted by ‘’, and this basically means that every element in set A also exists in set B. When we say set A is a proper subset of set B, this basically means that not only every element in set A also exists in set B, set B has MORE elements than set A. E.g. Given that A = {vowels of the English Alphabet} and B = {letters of the English Alphabet} This means that set A is a proper subset of set B, or A ⊂ B, since set A contains only “a, e, i, o, u”, but set B contains all the 26 letters of the English alphabet, which has consist of all the elements in set A and has more elements than set A as well. ### “… is not a (proper) subset of …”, or “A ⊄ B”: When a set A is a not subset of set B, it is denoted by ‘’. This basically means that there is at least one element in set A that does not exist in set B. E.g. Given that A = {vowels of the English Alphabet, 1, 2, 3} and B = {letters of the English Alphabet} This means that set A is not a subset of set B, or A ⊄ B, because although set A contains “a,e,i,o,u”, which are all elements that exist in set B, which contains all the 26 letters of the English alphabet, but set B does not contain the numbers, “1, 2, 3” unlike in set A. ### *Some other important notations in set language that you must know: Ƹ – represents a universal set. A universal set is a set that contains all the objects or elements and of which all other sets are subsets. E.g. Given that Ƹ = {1, 2, 3… 998, 999, 1000}, then other sets must only contain elements that are within the universal set i.e. from 1 to 1000 ∈ - represents ‘… is an element of …’ E.g. x A, which also means “x is an element of A ∉ - represents ‘…is not an element of …’ E.g. y ∉ A, which also means “y is not an element of A or { }- represents an empty set. An empty set is basically a set that does not contain any elements at all. E.g. A = {1, 3, 5, 7}, B = {2, 4, 6, 8} A ∩ B = { } = ∅ And that’s all for today, students! Math Lobby hopes that after this article, you have a clear understanding on the set language and notations! If you have any pending questions, please do go on to our Facebook page, Instagram or contact us directly at Math Lobby! We have certified mathematics tutors to aid you in your journey to becoming a better student! As always: Work hard, stay motivated and we wish all students a successful and enjoyable journey with Math Lobby! If you want to receive more Secondary Math Tips from us, Visit and FOLLOW our Instagram page at https://www.instagram.com/mathlobbymotivation/
1,655
5,620
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
4.875
5
CC-MAIN-2023-40
longest
en
0.933242
http://bigsstroy.info/simplifying-fractions-worksheet-6th-grade/
1,527,356,750,000,000,000
text/html
crawl-data/CC-MAIN-2018-22/segments/1526794867841.63/warc/CC-MAIN-20180526170654-20180526190654-00436.warc.gz
36,934,414
5,419
Students will use integers to describe simplifying fractions worksheet 6th grade, so find the PPT that suits your class best. Students will read, students will apply the order of operations in order to solve problems. Students will identify outliers, estimate decimal sums and differences through the nearest hundred thousandth and add and subtract decimals through the hundred thousandths. Students will estimate products and quotients using a variety of estimating techniques including front; students will define and identify terminating and repeating decimals. Students will make comparisons with measurements in metric and customary units. Various combinations of penny, we offer PDF printables in the highest quality. And division of whole numbers in problem, students will find volume of prisms, students will compare and order fractions including using a number line. Students will use a compass and straightedge to construct equilateral, lists and tables. Students will identify and draw points, multiplication and division. Students will develop number sense, students will use numeric digits and words to read and write numbers including the trillions. Students will solve two, tools and formulas to solve problems involving length, students will identify function relationships and solve and interpret on graphs. They are marked online, systems of Equations and Quadratic Equations. Students will use a calculator to add — access the materials by looking at topics, and distributive properties. Print small coins cards for matching, a worksheet on expanding and factorising quadratics. Expanding brackets and factorising, students will compare and order whole numbers up to one trillion. New UK coin designs, aRE YOU READY FOR SOME MATHBALL? Students will define and identify attributes of three, this is a whole lesson on substitution. Students will identify and apply pattern rules using diagrams — 3 to find the area and perimeter of triangles. Students will apply strategies in order to solve real, students will calculate the probability of an event and its complement. Students will identify points, students will multiply fractions and mixed numbers. Organize and display data about local, arithmetic and geometric sequences. Students will analyze and solve problems using number, students will read and interpret stem and leaf plots.
421
2,361
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.96875
4
CC-MAIN-2018-22
latest
en
0.888299
https://discusstest.codechef.com/t/what-is-problem-in-my-code/21287
1,627,818,303,000,000,000
text/html
crawl-data/CC-MAIN-2021-31/segments/1627046154175.76/warc/CC-MAIN-20210801092716-20210801122716-00073.warc.gz
229,195,760
3,341
what is problem in my code? n=int(input()) p1=0 p2=0 l=0 for i in range(n): s1,s2=list(map(int,input().split())) c=abs(s1-s2) if(c>=l and s1>s2): l=c p1=1 p2=0 elif(c>=l and s2>s1): l=c p2=1 p1=0 if(p1==1): print(1,l,sep=" “) if(p2==1): print(2,l,sep=” ") //
127
260
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.53125
3
CC-MAIN-2021-31
latest
en
0.240122
https://www.wyzant.com/resources/answers/4383/x_6_x_3_x_6_x_5
1,435,718,387,000,000,000
text/html
crawl-data/CC-MAIN-2015-27/segments/1435375094634.87/warc/CC-MAIN-20150627031814-00206-ip-10-179-60-89.ec2.internal.warc.gz
987,395,141
11,631
Search 81,848 tutors 0 0 # (x-6)(x+3)+ (x-6)(x+5) factor out the greatest common factor simplify the factors, if possible (x - 6) is common to both terms. so if you pull it out, you get (x-6)[(x + 3) + (x + 5)] We can simplify the second terms to get (x - 6)[x + 3 + x + 5] = (x - 6)(2x + 8) Since both terms in the second factor are even, we can pull out a 2: (2)(x - 6)(x + 4)
150
383
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.90625
4
CC-MAIN-2015-27
longest
en
0.865446
https://www.studypool.com/discuss/1144851/one-gallon-of-paint-volume-3-78-10-3-m3-covers-an-area-of-12-0-m2-what-is?free
1,510,968,023,000,000,000
text/html
crawl-data/CC-MAIN-2017-47/segments/1510934804125.49/warc/CC-MAIN-20171118002717-20171118022717-00570.warc.gz
896,997,072
14,434
# One gallon of paint (volume = 3.78 10-3 m3) covers an area of 12.0 m2. What is label Physics account_circle Unassigned schedule 1 Day account_balance_wallet \$5 One gallon of paint (volume = 3.78  10-3 m3) covers an area of 12.0 m2. What is the thickness of the fresh paint on the wall? Sep 1st, 2015 To solve this, you can use the equation: Volume=area*thickness thickness=volume/area thickness=(3.78*10^-3)/(12.0) thickness=3.15*10^(-4) meters To help you better understand the issue, you can assume that thickness is in the unit of 'meters'.  You are given the volume in 'meters^3' and area in 'meters^2'.  To get an answer in 'meters', you can divide the volume by the area, and the 'meters^2' will cancel out, leaving you with 'meters'. Sep 1st, 2015 ... Sep 1st, 2015 ... Sep 1st, 2015 Nov 18th, 2017 check_circle
277
832
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.46875
3
CC-MAIN-2017-47
latest
en
0.823224
https://dsp.stackexchange.com/questions/63194/fourier-transform-of-sum-n-infty-infty-1n-deltat-nt-0
1,653,472,928,000,000,000
text/html
crawl-data/CC-MAIN-2022-21/segments/1652662584398.89/warc/CC-MAIN-20220525085552-20220525115552-00161.warc.gz
277,493,438
65,980
# Fourier transform of $\sum_{n=-\infty}^\infty(-1)^n\delta(t-nT_0)$ Given $$x(t)$$ and $$h(t)=\sum_{n=-\infty}^\infty(-1)^n\delta(t-nT_0)$$, I have to compute $$Y(f)$$, where $$y(t)=x(t)h(t)$$. I have thought about using that, in this case, $$Y(f)=X(f)*H(f)$$. I know that $$\mathscr{F}(\sum_{n=-\infty}^\infty\delta(t-nT_0))=T_0^{-1}\sum_{n=-\infty}^\infty\delta(t-nf_0)$$, but how can I deal with that $$(-1)^n?$$ Note that the given $$h(t)$$ can be written as $$h(t)=g(t)-g(t-T_0)\tag{1}$$ with some $$g(t)$$ the Fourier transform $$G(f)$$ of which you know. So from $$(1)$$ you then get $$H(f)=G(f)\left(1-e^{-j2\pi fT_0}\right)\tag{2}$$ • I think I have it: If $g(t)=\sum_{n=-\infty}^\infty\delta(t-2nT_0)$, then $g(t)-g(t-T_0)=\sum_{n=-\infty}^\infty\delta(t-2nT_0)-\sum_{n=-\infty}^\infty\delta(t-2(n+1)T_0)=h(t)$, and then we can calculate $H(f)$. So tricky! Thanks!! Jan 12, 2020 at 21:06 • @Gibbs: Almost there. There should be a minus sign on the RHS of your equation, and the argument of the delta impulse should be $(t-T_0-2nT_0)=(t-(2n+1)T_0)$. But you just need $g(t)$ and $G(f)$, and then you just use Eq. $(2)$. Jan 12, 2020 at 21:10
494
1,153
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 13, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.875
4
CC-MAIN-2022-21
longest
en
0.688367
https://nl.mathworks.com/matlabcentral/cody/problems/44401-vertical-matrix-sort/solutions/1360488
1,601,285,068,000,000,000
text/html
crawl-data/CC-MAIN-2020-40/segments/1600401598891.71/warc/CC-MAIN-20200928073028-20200928103028-00597.warc.gz
512,757,137
16,685
Cody # Problem 44401. Vertical matrix sort Solution 1360488 Submitted on 28 Nov 2017 by tzhai asor This solution is locked. To view this solution, you need to provide a solution of the same size or smaller. ### Test Suite Test Status Code Input and Output 1   Pass x = []; y_correct = []; assert(isequal(upAndDown(x),y_correct)) y = [] 2   Pass x = 0; y_correct = [0;0]; assert(isequal(upAndDown(x),y_correct)) y = 0 0 3   Pass x = [1 2 3 4]; y_correct = [1 2 3 4; 1 2 3 4]; assert(isequal(upAndDown(x),y_correct)) y = 1 2 3 4 1 2 3 4 4   Pass x = [8 9 3 9; 9 6 5 2; 2 1 9 9]; y_correct = [2 1 3 2; 8 6 5 9; 9 9 9 9; 9 9 9 9; 8 6 5 9; 2 1 3 2]; assert(isequal(upAndDown(x),y_correct)) y = 2 1 3 2 8 6 5 9 9 9 9 9 9 9 9 9 8 6 5 9 2 1 3 2
350
748
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.859375
3
CC-MAIN-2020-40
latest
en
0.540761
https://ch.mathworks.com/matlabcentral/cody/problems/1813-06-matrix-equations-1/solutions/1986108
1,582,907,349,000,000,000
text/html
crawl-data/CC-MAIN-2020-10/segments/1581875147234.52/warc/CC-MAIN-20200228135132-20200228165132-00112.warc.gz
303,401,870
15,904
Cody # Problem 1813. 06 - Matrix Equations 1 Solution 1986108 Submitted on 22 Oct 2019 by Adobe Fl This solution is locked. To view this solution, you need to provide a solution of the same size or smaller. ### Test Suite Test Status Code Input and Output 1   Pass aVec = [3.14 15 9 26]; bVec = [2.71;8;28;182]; aMat = ones(9,9)*2; ref = (aVec*bVec)*aMat^2; user = MyFunc(); assert(isequal(user,ref)) aVec = 3.1400 15.0000 9.0000 26.0000 bVec = 2.7100 8.0000 28.0000 182.0000 aMat = 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 xMat = 1.0e+05 * 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 2   Pass aRef = [3.14 15 9 26]; [xMat aVec] = MyFunc(); assert(isequal(aRef,aVec)) aVec = 3.1400 15.0000 9.0000 26.0000 bVec = 2.7100 8.0000 28.0000 182.0000 aMat = 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 xMat = 1.0e+05 * 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 3   Pass bRef = [2.71;8;28;182]; [xMat aVec bVec] = MyFunc(); assert(isequal(bRef,bVec)) aVec = 3.1400 15.0000 9.0000 26.0000 bVec = 2.7100 8.0000 28.0000 182.0000 aMat = 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 xMat = 1.0e+05 * 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 4   Pass aRef = ones(9,9)*2; [xMat aVec bVec aMat] = MyFunc(); assert(isequal(aRef,aMat)) aVec = 3.1400 15.0000 9.0000 26.0000 bVec = 2.7100 8.0000 28.0000 182.0000 aMat = 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 xMat = 1.0e+05 * 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405 1.8405
2,765
3,986
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.21875
3
CC-MAIN-2020-10
latest
en
0.381923
https://mathematica.stackexchange.com/questions/47341/what-is-the-correct-way-to-pattern-match-nested-lists
1,701,205,196,000,000,000
text/html
crawl-data/CC-MAIN-2023-50/segments/1700679099942.90/warc/CC-MAIN-20231128183116-20231128213116-00380.warc.gz
460,356,608
42,139
# What is the correct way to pattern match nested lists? I have the following function: f[lst_List /; MatchQ[lst, SOME PATTERN HERE]] := ({#[[1]] + #[[2]], #[[1]] - #[[2]]} &) /@ lst; I want to match lists that are ordered pairs of reals: {{0.1, 0.2}, {0.3, 0.4}}, and so on. I tried the following pattern: MatchQ[{{0.1, 0.2},{0.3, 0.4}}, _List[_List[_Real, _Real]]] but this returned False. I know that: MatchQ[{{0.1, 0.2},{0.3, 0.4}}, {__List}] returns True, but this pattern is not sufficiently specific, since it also matches {{}}, among others. What is the correct way to construct the pattern I want? • MatchQ[{{0.1, 0.2}, {0.3, 0.4}}, List[List[_?NumericQ, _?NumericQ] ..]] ? May 5, 2014 at 23:05 • MatchQ[{{0.1, 0.2}, {0.3, 0.4}}, {{Except[_Complex], Except[_Complex]} ..}] – ciao May 5, 2014 at 23:06 • Thank you very much. They work. May 5, 2014 at 23:17 There is no need to explicitly refer to MatchQ. Mathematica's pattern language is up to expressing what you want as an argument pattern. f[lst : {{Repeated[Except[_Complex, _?NumberQ], {2}]} ..}] := ({#[[1]] + #[[2]], #[[1]] - #[[2]]}&) /@ lst f @ {{0.1, 0.2}, {0.3, 0.4}, {1/2, 1}, {2, 2.}} {{0.3, -0.1}, {0.7, -0.1}, {3/2, -(1/2)}, {4., 0.}} The function does not evaluate for lists containing pairs having a complex component. f @ {{1, 1}, {1, I}} f[{{1, 1}, {1, I}}] It is not defined in the question what you wish to consider "real" for the purpose of this pattern. Is Pi real for your purposes for example? If you want to match only explicit decimal values you should use _Real, e.g. {{_Real, _Real} ..}. If you want something more general I propose: realQ = Re[#] == # &; Because Re is Listable this can be applied to arrays, allowing: f[lst : {{_, _} ..}?realQ] := (* body *) This is clear, concise, and has lower overhead than what m_goldberg proposed. • Thank you very much for this explanation. Jul 9, 2014 at 7:53
670
1,918
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.875
3
CC-MAIN-2023-50
longest
en
0.832781
https://justaaa.com/statistics-and-probability/336472-assume-that-an-economy-is-governed-by-the
1,695,419,749,000,000,000
text/html
crawl-data/CC-MAIN-2023-40/segments/1695233506423.70/warc/CC-MAIN-20230922202444-20230922232444-00340.warc.gz
374,746,586
10,637
Question # Assume that an economy is governed by the Phillips curve π = πe – 0.5(u –... Assume that an economy is governed by the Phillips curve π = πe – 0.5(u – 0.06), where π = (P – P–1)/P–1, πe = (Pe – P–1)/P–1, and 0.06 is the natural rate of unemployment. Further assume 9 πe = π–1. Suppose that, in period zero, π = 0.03 and πe = 0.03—that is, that the economy is experiencing steady inflation at a 3-percent rate. a. Now assume that the government decides to impose whatever demand is necessary to cut unemployment to 0.04. Suppose the government follows this policy for periods 1 through 5. Create a table of π and πe for these five periods. b. Assume that, for periods 6 through 10, the government decides to hold unemployment at 0.06. Create another table of π and πe for these five periods. Is there any reason to expect the inflation rate to go back to 0.03? c. If the government persisted in its behavior under part a, do you think the public would continue for long forming expectations according to πe = π–1? Why? #### Homework Answers Answer #1 Answer: NOTE:: I HOPE YOUR HAPPY WITH MY ANSWER....***PLEASE SUPPORT ME WITH YOUR RATING... ***PLEASE GIVE ME "LIKE"...ITS VERY IMPORTANT FOR ME NOW....PLEASE SUPPORT ME ....THANK YOU Know the answer? Your Answer: #### Post as a guest Your Name: What's your source? #### Earn Coins Coins can be redeemed for fabulous gifts. ##### Not the answer you're looking for? Ask your own homework help question
386
1,478
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.046875
3
CC-MAIN-2023-40
latest
en
0.908851
https://appdividend.com/2020/01/25/golang-operator-example-operators-in-go-tutorial/
1,582,677,809,000,000,000
text/html
crawl-data/CC-MAIN-2020-10/segments/1581875146176.73/warc/CC-MAIN-20200225233214-20200226023214-00316.warc.gz
268,255,749
36,537
AppDividend Latest Code Tutorials # Golang Operator Example | Operators In Go Tutorial Golang operator is a symbol that tells the compiler to perform specific mathematical or logical manipulations. Go language has rich inbuilt operators and provides the following types of operators. Golang operators are the foundation of any programming language. The functionality of the Go language is incomplete without the use of operators. Operators allow us to perform the various kinds of operations on operands. In Go language, operators can be classified based upon their different functionality. ## Golang Operator Operators are tokens that tell the compiler to perform particular operations that may be mathematical, relational, or logical. The Golang language has inbuilt operators that can be grouped into the following categories. 1. Arithmetic Operators 2. Relational Operators 3. Logical Operators 4. Bitwise Operators 5. Misc Operators ## Arithmetic Operators These are used to perform Arithmetic operators on operands in Golang. Operator Description Example + Adds two operands X + Y gives 21 Subtracts second operand from the first X – Y gives -11 * Multiplies both operands X * Y gives 100 / Divides the numerator by the denominator. Y / X gives 29 % Modulus operator gives a remainder after the integer division. Y % X gives 0 ++ Increment operator. It increases the integer value by one. X++ gives 11 Decrement operator. It decreases the integer value by one. X– gives 90 The -, +, !, &, *, <-, and ^ are also known as unary operators, and the precedence of unary operators is higher. ++ and — operators are from statements they are not expressions, so they are out from the operator hierarchy. See the following code example. ```// hello.go package main import "fmt" func main() { var p int = 23 var q int = 60 fmt.Println(p + q) } ``` #### Output ```go run hello.go 40``` ## Relational or Comparison Operators The following table lists all the comparison operators supported by the Golang. Assume variable X holds 11, and variable Y holds 21. Operator Description Example == It checks if the values of two operands are equal or not; if yes, then the condition becomes true. (X == Y) is not true. != It checks if the values of two operands are equal or not; if not equal, then the condition becomes true. (X != Y) is true. > It checks if the value of the left operand is greater than the value of the right operand; if yes, then the condition becomes true. (X > Y) is not true. < It checks if the value of the left operand is less than the value of the right operand; if yes, the condition becomes true. (X < Y) is true. >= It checks if the value of the left operand is greater than or equal to the value of the right operand; if yes, the condition becomes true. (X >= Y) is not true. <= It checks if the value of the left operand is less than or equal to the value of the right operand; if yes, the condition becomes true. (X <= Y) is true. See the following code example of Relational operators. ```// hello.go package main import "fmt" func main() { var k int = 21 var b int = 19 if k != b && k <= b { fmt.Println("1st True") } if k != b || k <= b { fmt.Println("2nd True") } if !(k == b) { fmt.Println("3rd True") } }``` #### Output ```go run hello.go 2nd True 3rd True``` ### Key points 1. Boolean, integer, floats, complex values, and strings are equal. 2. Strings are ordered lexically byte-wise. 3. Two pointers are equal if they point to the same variable or if both are nil. 4. Two-channel values are equal if they were built by the same call to make or if both are nil. 5. Two interface values are equal if they have identical dynamic types and equal concrete values or if both are nil. ## Logical Operators Logical operators are used to combine two or more conditions/constraints or to complement the evaluation of the original condition into consideration. 1. Logical AND: The ‘&&’ operator returns boolean True when both conditions in consideration are satisfied. Otherwise, it returns false. For example, a && b returns True when both a and b are True (i.e., non-zero). 2. Logical OR: The ‘|| operator returns True when one or both of the conditions in consideration are satisfied. Otherwise, it returns False. For example, a || b returns True if one of a or b is True (i.e., non-zero). Of course, it returns true when both a and b are True. 3. Logical NOT: The ‘!’(NOT) operator returns True, if the condition in consideration is not satisfied. Otherwise, it returns false. For example, !a returns true if a is False, i.e., when a=0. See the following code example of Logical operators. ```// hello.go package main import "fmt" func main() { var x int = 11 var y int = 21 if x != y && x >= y { fmt.Println("1st True") } if x != y || x <= y { fmt.Println("2nd True") } } ``` #### Output ```go run hello.go 2nd True``` ## Bitwise Operators Bitwise operators work on bits and perform the bit-by-bit operation. The truth tables for &, |, and ^ are as follows. 1. & (bitwise AND): It takes two numbers as operands and does AND on every bit of the two numbers. The output of AND is 1 only if both bits are 1. 2. | (bitwise OR): It takes two numbers as operands and does OR on every bit of two numbers. The output of OR is 1 any of the two bits is 1. 3. ^ (bitwise XOR): It takes two numbers as operands and does XOR on every bit of two numbers. The output of XOR is 1 if the two bits are different. 4. << (left shift): It takes two numbers, left shifts the bits of the first operand, the second operand decides a number of places to shift. 5. >> (right shift): It takes two numbers, right shifts the bits of the first operand, the second operand decides a number of places to shift. 6. &^ (AND NOT): The &^ is a bit, clear operator. See the following code example of a bitwise operator in Go. ```// hello.go package main import "fmt" func main() { p := 19 k := 21 // & (bitwise AND) op1 := p & k fmt.Printf("Result of p & k = %d", op1) // | (bitwise OR) op2 := p | k fmt.Printf("\nResult of p | k = %d", op2) // ^ (bitwise XOR) op3 := p ^ k fmt.Printf("\nResult of p ^ k = %d", op3) // << (left shift) op4 := p << 1 fmt.Printf("\nResult of p << 1 = %d", op4) // >> (right shift) op5 := p >> 1 fmt.Printf("\nResult of p >> 1 = %d", op5) // &^ (AND NOT) op6 := p &^ k fmt.Printf("\nResult of p &^ k = %d", op6) } ``` #### Output ```go run hello.go Result of p & k = 17 Result of p | k = 23 Result of p ^ k = 6 Result of p << 1 = 38 Result of p >> 1 = 9 Result of p &^ k = 2``` ## Assignment Operators See the following assignment operators. Operator Description Example = It is a simple assignment operator, Assigns values from right side operands to left side operand. Z = X + Y will assign value of X + Y into Z += It adds AND assignment operator, It adds right operand to the left operand and assigns the result to left operand. Z += X is equivalent to Z = X + Y -= It subtracts AND assignment operator, It subtracts right operand from the left operand and assigns the result to left operand. Z -= X is equivalent to Z = Z – X *= It multiplies AND assignment operator, It multiplies right operand with the left operand and assigns the result to the left operand. Z *= X is equivalent to Z = Z * X /= It divides AND assignment operator, It divides left operand with the right operand and assigns the result to left operand. Z /= X is equivalent to Z = Z / X %= Modulus AND assignment operator, It takes modulus using two operands and assigns the result to the left operand. Z %= X is equivalent to Z = Z % X <<= Left shift AND assignment operator. Z <<= 2 is same as Z = Z << 2 >>= Right shift AND assignment operator. Z >>= 2 is same as Z = Z >> 2 &= Bitwise AND assignment operator. Z &= Z is same as Z = Z & 2 ^= Bitwise exclusive OR and assignment operator. Z ^= 2 is same as Z = Z ^ 2 |= Bitwise inclusive OR and assignment operator. Z |= 2 is same as Z = Z | 2 See the following code example of Assignment Operators. ```// hello.go package main import "fmt" func main() { var x int = 19 var y int = 21 // “=”(Simple Assignment) x = y fmt.Println(x) x += y fmt.Println(x) //“-=”(Subtract Assignment) x -= y fmt.Println(x) // “*=”(Multiply Assignment) x *= y fmt.Println(x) // “/=”(Division Assignment) x /= y fmt.Println(x) // “%=”(Modulus Assignment) x %= y fmt.Println(x) } ``` #### Output ```go run hello.go 21 42 21 441 21``` ## Miscellaneous Operators See the following operators. 1. &: This operator returns the address of the variable. 2. *: This operator provides a pointer to a variable. 3. <-:The name of this operator is received. It is used to receive a value from the channel. See the following code example of Miscellaneous operators. ```// hello.go package main import "fmt" func main() { x := 9 y := &x fmt.Println(*y) *y = 11 fmt.Println(x) } ``` #### Output ```go run hello.go 9 11``` ## Conclusion Operators are one of the building blocks of any programming language. That is why the complete introduction of the Go language is incomplete without the use of operators. We have seen Arithmetic, logical, comparison, bitwise, assignment, and miscellaneous operator. Finally, the Golang operator example is over. Identifiers in Golang Keywords in Golang Golang Variables Golang Constants Introduction to Golang Installing Golang on MacOS This site uses Akismet to reduce spam. Learn how your comment data is processed.
2,406
9,458
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.765625
3
CC-MAIN-2020-10
longest
en
0.865789
https://oeis.org/A175318/internal
1,627,652,359,000,000,000
text/html
crawl-data/CC-MAIN-2021-31/segments/1627046153966.60/warc/CC-MAIN-20210730122926-20210730152926-00569.warc.gz
447,128,610
2,788
The OEIS Foundation is supported by donations from users of the OEIS and by a grant from the Simons Foundation. Hints (Greetings from The On-Line Encyclopedia of Integer Sequences!) A175318 a(n) = Sum_{k<=n} A007955(k), where A007955(m) = product of divisors of m. 0 %I %S 1,3,6,14,19,55,62,126,153,253,264,1992,2005,2201,2426,3450,3467,9299, %T 9318,17318,17759,18243,18266,350042,350167,350843,351572,373524, %U 373553,1183553,1183584,1216352,1217441,1218597,1219822,11297518,11297555,11298999,11300520 %N a(n) = Sum_{k<=n} A007955(k), where A007955(m) = product of divisors of m. %e For n = 4, with b(n) = A007955(n), a(4) = b(1)+b(2)+b(3)+b(4) = 1+2+3+8 = 14. %o (PARI) prd(n) = my(dn = divisors(n)); prod(i=1, #dn, dn[i]); %o a(n) = sum(k=1, n, prd(k)); \\ _Michel Marcus_, Dec 09 2014 %Y Cf. A007955. %K nonn %O 1,2 %A _Jaroslav Krizek_, Apr 01 2010 %E Edited and more terms from _Michel Marcus_, Dec 09 2014 Lookup | Welcome | Wiki | Register | Music | Plot 2 | Demos | Index | Browse | More | WebCam Contribute new seq. or comment | Format | Style Sheet | Transforms | Superseeker | Recent The OEIS Community | Maintained by The OEIS Foundation Inc. Last modified July 30 09:18 EDT 2021. Contains 346359 sequences. (Running on oeis4.)
486
1,260
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.828125
3
CC-MAIN-2021-31
latest
en
0.602091
http://www.nelson.com/nelson/school/elementary/mathK8/math6/parentcentre/parentsurf_ch08_lesson01.html
1,516,138,658,000,000,000
text/html
crawl-data/CC-MAIN-2018-05/segments/1516084886739.5/warc/CC-MAIN-20180116204303-20180116224303-00091.warc.gz
505,276,624
3,095
Mathematics 6 # Surf for More Math ## Lesson 1- Unit Relationships To encourage your child to have fun on the Web while learning about Unit Relationships, here are some games and interactive activities they can do on their own or in pairs. ### Goal Identify relationships between and among linear and square metric units. ### Builds Upon Student Book pages 240-241 ### Instructions for Use Touch Pegs prompts your child to investigate measurable attributes of objects and the units. To use Touch Pegs, select a circle by holding the mouse button down, and drag it to any location on the gird. Release the circle to anchor it. A red node appears at the top of the circle. Click on any part of the circle and drag it in any direction. It will stretch much like a rubber band. Secure the band to any points on the grid. Make squares, triangles, rectangles and many geometrical shapes. Click on the circle again to add other bands. To color the area in the shape, click first on the shape and then on the color buttons to the left. To release a node, hold the Ctrl key down and click on the node. Click on the 'Measure' button to determine the area and perimeter of the shape. Click on the 'Clear' button to start the exercise over again.
271
1,245
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.8125
3
CC-MAIN-2018-05
latest
en
0.889908
http://www.thestudentroom.co.uk/wiki/Revision:Waves_2
1,369,427,270,000,000,000
text/html
crawl-data/CC-MAIN-2013-20/segments/1368705043997/warc/CC-MAIN-20130516115043-00013-ip-10-60-113-184.ec2.internal.warc.gz
751,264,956
27,323
• # Revision:Waves 2 ## 11.1 Traveling waves ### 11.1.1 I assume what we're talking about here is , the equation in the data book. This can be used to describe a traveling wave as follows: • the amplitude is (because sine curves range from 1 to -1, multiplying by will make it range from to ) • is defined as • is defined as . The value of will shift the whole curve to the left or right (assuming + in the middle, increasing moves it to the left, decreasing to the right). The period of the curve will be defined by the wavelength and frequency in the equation...it's a good idea to play around with this on a graphics calculator (or even better, a prog like the PowerMac graphics calculator, which can let you change parameters and graph in real time) - anyway, it's used for modeling waves. ### 11.1.2 This is effectively what I described in the SL bit...In case you forgot Displacement vs Time - This graph tracks the movement of a particle as a wave moves through it. With displacement on the vertical axis, and time on the horizontal, the particle will move up and down in a sine curve type pattern. This graph allows up to find both frequency (which will be the number of crests in 1 sec) and period (which will be the time between crests), but tells us nothing about the wave speed or wavelength. Displacement vs position - This is basically a 'snapshot' of the displacement of all the particles going through the medium at a given time. Displacement is on the vertical axis, and position (or ie distance from an arbitrary origin in the material) is on the . The distance between peaks represents the wavelength. The wave speed can not be calculated directly from this graph, but only by combining the information from this and the previous one ### 11.1.3 Huygens' principle - This is a geometrical representation of how waves move through media. Each wave front is assumed to be an infinite number of point sources, each radiating in a circle. After a given period of time, a new wave front is drawn along the edges of these radiated circles, and the process is repeated...to draw it on paper, start with a wave front, place a number of points, and from these, draw the waves being emitted as if each of these were a point source. This results in a series of circles, but obstructions can change this. Waves could be reflected or absorbed by an object, waves entering a medium of higher optical density will slow down (and so won't go as far). After a given period of time (depends on the speed of the wave), draw an new wave front running along the edges of these circles as appropriate for the situation. The process is repeated over and over until it gets so boring that you stop. This helps to explain some of the phenomena of waves...diffraction...a very thin slit will only have a single point source, and so it will radiate in a circle, or wraps around an object, but you really need to draw a diagram to see that...refraction...as it enters the more dense medium, the waves slow down, pulling the wave around, and so on. This model can be applied to any waves but they'll probably be light, water or sound. ### 11.1.4 Partial reflection occurs when ever light changes media...when light goes from water to air, some light is reflected from the boundary, ditto going the other way. Total internal reflection occurs when light enters a boundary (from the more dense side) at an angle greater than the critical angle, and all the light is refracted back into the medium. This critical angle can be found by inserting 90 as the angle of refraction in Snell's law, thus creating . Any angle of incidence above this will totally internally reflect. At this exact angle, the light will run along the boundary, and below it, refraction (and partial reflection) will occur as usual. ### 11.1.5 : Some examples Light through optical fibers - This is used both as a communication system, and as a sort of camera in hard to reach places. Light is totally internally reflected through the glass core, which can be bent as long as the light passing through it does not exceed the critical angle (see optics for more info) Prismatic reflectors - Glass has a critical angle above 45, and so it is possible to use a iss, right angled triangular prism as a reflector...light enters the longest side, bounces off one side, off the other, then out the way it came in...this is more effective than using a mirror because 100% of the light is reflected, where as mirrors are never 100% efficient. This set up can also be rearranged to build a periscope (light goes an and out the two short sides, bouncing off the long one) without mirrors. Air near hot surfaces - air's refractive index changes with temperature, and so some refraction can occur when waves travel through an area of hot air, making that shimmering type thing you see near the ground on hot days (at least I suppose that's what this means) ### 11.1.6 Refractive index is dependent on the wavelength thus different wavelengths of light will be refracted different amounts through the same boundary. Short wavelength light will be refracted more, and long wave length less. This means that if white light is shone onto a prism, then the light can be separated out into it's component colors, red being refracted the least, and violet the most. ## 11.2 Interference and Diffraction ### 11.2.1 If, for example, we have two point sources producing waves in a circle, they will interfere differently at different points...the easiest way to do this is to draw circles out from the source representing the crests (Except now we can call this Huygen's principle)...when two of these coincide, constructive interference produces a bigger crest. When two gaps coincide, we get a bigger trough, when one crest and one trough coincide, there is destructive interference, and they add to zero. This allows the interference pattern, and the amplitude at each point to be found. Also relevant to the discussion of Huygen's principle is that fact that these point sources effectively produce a wave front, since other parts of the wave destruct, thus demonstrating how exactly the Huygen principle can be accounted for (beyond being a geometric representation). ### 11.2.2 For two sources to be coherent, they must emit the frequency waves, in the same phase (ie when one emits a crest, so must the other). Path difference is the difference between the distances of a certain point from each source. The the path difference is a multiple of the wavelength, then constructive interference (an antinode) is produced), if it's a multiple + 1/2 complete destructive interference occurs (producing an node), and points in between have something between a node and an antinode. The pattern produced is a series of lines pointing away from the point exactly between the sources, and alternating constructive-destructive-constructive out from the center. ### 11.2.3 Light strikes the two slits, and then produces two coherent point sources next to each other. 1) Light striking the center of the screen has an equal path difference from both, and so produces a bright band on the screen level with the slit (since the light is spread over the smallest area). 2) Light traveling out at such an angle that the light from the top source must travel exactly 1/2 a wavelength further than the bottom one to reach the screen. this means they are out of phase, and annul on the screen. As we move further around, the path difference will be 1 wavelength, they will reinforce, and produce a bright band, and so on alternating. This experiment can be defined by the equation: where is the distance between the centers of the two slits, is the bandwidth (distance between consecutive bright bands on the screen) and is the distance to the screen). The bit assumes a curved screen, but it's ok so long at you're not too far from the center. I don't know if this is really necessary - but seen Optics for more detail. ### 11.2.4 Thin films - This is straight out of optics, so there might be too much detail...you decide. This films - The classic example of this is a thin layer of oil (assumed to have lower refractive index than water) floating on top of water. (This produces a sort of rainbow effect in the right light conditions). When light enters the oil, some of it is reflected (with a phase change). The remaining light continues down and some is reflected of the oil-water boundary (again with a phase change, meaning the two can be ignored...though if the film is like a soap bubble, only one phase change will occur, and it must be accounted for). This means that if the film is a certain thickness, certain wavelengths will be reinforced will others will destructively interfere (this is how they make those sun glasses which look red from the outside etc...). Nb - the light is always assumed to enter and leave vertically, though it will be easier to draw at an angle, this should be noted with any diagram...It may be necessary to think of the angle involved if the question wants fringes on the film rather than certain wavelengths being reinforced/destructively interfering though. I don't know what to say about newton's rings, since it says no experimental details will be required...well, here's all about it. Newton's rings - In newton's rings, there is a flat glass surface with a curved plate (think of the bottom part of a sphere being cut off) placed on top of it. This means the gap between the two pieces of glass increases going further out from the center. Light is reflect of the bottom of the curved plate (with no phase change) and off the top of the base plate (with a phase change). This means that to reinforce, the actual difference between the two distance traveled must be (k+1/2) x wavelength (where k is an int). Nb...this means that at the very center there will be a dark spot, not a bright spot (as with the various slit ones above). ### 11.2.5 A diffraction grating is basically a series of slits, rather than two (as in young's double slit). These slits produce much more precise lines, because rather than just requiring two beams to coincide, they require many to do so. This produces a much sharper pattern, and is more easy to analyze. If white light goes through the diffraction grating, different frequencies will diffract different amounts, and so spectra will be produced. Like this, then, the component colors of light can be found, with there exact wavelengths (because is affects the angle at which the bright bands occur). Calculations can be done with . where is the distance between the center of two consecutive slits, is the bandwidth (distance between consecutive bright bands on the screen) and is the distance to the screen). Also relevant here is a quick explanation of the diffraction pattern for each single slit (as this 'defines an envelope on the interference patters)...ie it shows what it will be under. There is a large wide peak of intensity in the center, dropping to zero, followed by a series of smaller peaks of half the width of the central one. Each minima for this is defined by where is the width of each slit. I don't know if they really want much detail on this... ## 11.3 Source/detector movement ### 11.3.1 Shock waves are generally formed when the source of sound waves is traveling above the speed of sound. as the plane (since it's usually a plane) approaches the speed of sound, the sound waves don't really get away form the plane, but rather build up in front of the plane. Over time, many of these waves constructively interfere, producing what is known as the sound barrier. Once the plane moves faster than this, the sound waves are left behind the plane, creating a shock wave, which follows under the plane. The angle of the shock wave can be found by taking one point to be the source, then finding where the source would have been 1 second ago. from this point, calculate how far the wave would have gone out from this point in that second, and draw in the circle. A line can then be drawn from the point to the edge of the circle (in a tangent). This will be at 90 degrees to a line from the center, and since two sides are known, the angle of the shock wave can be calculated. ### 11.3.2 Doppler effect...This effect is seen by the change in frequency of sound when either the source or the observer are moving...this therefore affects the actual number of waves the observer hears per second, and so changes the observed frequency. If the observer and source are moving closer together, then more wavefronts will be observed per second, and so the frequency will be higher. If they are moving apart, then fewer wavefronts will be observed, and so the frequency will be lower. ### 11.3.3 When the source is at rest, the distance between wave crests is lambda. The the frequency is , then the time () between crests is . If we then assume that the source is moving towards the observer at vs, then in time , the first crest has moved a distance (): . In the same time, the source has moved in the same direction. At time , the source emits another wave, and so the distance between these two will be . Therefore, the new wavelength will be . This can be expressed as: ( and since , and ) The new frequency is given by: Unparseable or potentially dangerous latex formula. Error 4: no dvi output from LaTeX. It is likely that your formula contains syntax errors or worse. \displaystyle f' = \frac{V{\lambda '} = \frac{v}{(\lambda (1 - \frac{V_s}{V}))} , and since (which is the same as the one in the data book). If the motion is away from the observer, then will be negative, making the sign in the middle positive, but this can be determined as you work out the problem if you know whether the wavelength should be higher or lower ) When the observer is moving towards the source, the problem is slightly different because the wavelength isn't actually changing, but rather the relative velocity of the waves. The speed of the wave, . where is the velocity of sound in air. Thus, . Since , we get (This is for an observer moving towards the source, a sign change will be necessary as above). These can both be applied as appropriate to solve problems. ## 11.4 Standing waves ### 11.4.4 Nothing for this section - what goes here? Can anyone help out? ### 11.4.2 An overall graph of a standing wave will look like a sine curve superimposed over a -sine curve. at any given point in time, though, consecutive antinodes will be on opposite sides, so if one is up, the next will be down, then up and so on. The nodes will divide the string into equal segments, and so calculations can be done with a sort of arithmetic sequence thing. ### 11.4.3 Equation relating fundamental frequency to tension and mass per unit length. First, I should mention the equation . This allows us to calculate the velocity of a wave in a given string based on , the period and , the mass per meter of string. This equation can be equated to: . We can then play around with it, to get various formulae...for example, , and so on... ### 11.4.4 As I've said before, an open end in a pipe will have an antinode, and a closed end will have an node. Therefore, a closed-closed pipe will have a half wavelength, as will an open-open pipe, but an open-closed pipe will have one quarter. These are the fundamental frequencies, then half wavelengths can be added to get the first, then second and so on harmonics. Most of the problems involve relating the length to the wavelength / frequency of the sound produced.
3,428
15,582
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.59375
4
CC-MAIN-2013-20
latest
en
0.925891
https://oeis.org/A165973
1,675,409,004,000,000,000
text/html
crawl-data/CC-MAIN-2023-06/segments/1674764500044.16/warc/CC-MAIN-20230203055519-20230203085519-00013.warc.gz
448,029,553
4,393
The OEIS is supported by the many generous donors to the OEIS Foundation. Hints (Greetings from The On-Line Encyclopedia of Integer Sequences!) A165973 Number of reduced words of length n in Coxeter group on 26 generators S_i with relations (S_i)^2 = (S_i S_j)^10 = I. 1 1, 26, 650, 16250, 406250, 10156250, 253906250, 6347656250, 158691406250, 3967285156250, 99182128905925, 2479553222640000, 61988830565797200, 1549720764139860000, 38743019103369750000, 968575477581075000000 (list; graph; refs; listen; history; text; internal format) OFFSET 0,2 COMMENTS The initial terms coincide with those of A170745, although the two sequences are eventually different. Computed with MAGMA using commands similar to those used to compute A154638. LINKS G. C. Greubel, Table of n, a(n) for n = 0..500 Index entries for linear recurrences with constant coefficients, signature (24,24,24,24,24,24,24,24,24,-300). FORMULA G.f.: (t^10 + 2*t^9 + 2*t^8 + 2*t^7 + 2*t^6 + 2*t^5 + 2*t^4 + 2*t^3 + 2*t^2 + 2*t + 1)/(300*t^10 - 24*t^9 - 24*t^8 - 24*t^7 - 24*t^6 - 24*t^5 - 24*t^4 - 24*t^3 - 24*t^2 - 24*t + 1). MAPLE seq(coeff(series((1+t)*(1-t^10)/(1-25*t+324*t^10-300*t^11), t, n+1), t, n), n = 0..30); # G. C. Greubel, Sep 26 2019 MATHEMATICA coxG[{10, 300, -24}] (* The coxG program is at A169452 *) (* Harvey P. Dale, Mar 03 2016 *) CoefficientList[Series[(1+t)*(1-t^10)/(1-25*t+324*t^10-300*t^11), {t, 0, 25}], t] (* G. C. Greubel, Sep 26 2019 *) PROG (PARI) my(t='t+O('t^30)); Vec((1+t)*(1-t^10)/(1-25*t+324*t^10-300*t^11)) \\ G. C. Greubel, Sep 26 2019 (Magma) R:=PowerSeriesRing(Integers(), 30); Coefficients(R!( (1+t)*(1-t^10)/(1-25*t+324*t^10-300*t^11) )); // G. C. Greubel, Sep 26 2019 (Sage) def A165973_list(prec): P. = PowerSeriesRing(ZZ, prec) return P((1+t)*(1-t^10)/(1-25*t+324*t^10-300*t^11)).list() A165973_list(30) # G. C. Greubel, Sep 26 2019 (GAP) a:=[26, 650, 16250, 406250, 10156250, 253906250, 6347656250, 158691406250, 3967285156250, 99182128905925];; for n in [11..30] do a[n]:=24*Sum([1..9], j-> a[n-j]) -300*a[n-10]; od; Concatenation([1], a); # G. C. Greubel, Sep 26 2019 CROSSREFS Sequence in context: A164639 A164964 A165369 * A166420 A166613 A167079 Adjacent sequences: A165970 A165971 A165972 * A165974 A165975 A165976 KEYWORD nonn AUTHOR John Cannon and N. J. A. Sloane, Dec 03 2009 STATUS approved Lookup | Welcome | Wiki | Register | Music | Plot 2 | Demos | Index | Browse | More | WebCam Contribute new seq. or comment | Format | Style Sheet | Transforms | Superseeker | Recents The OEIS Community | Maintained by The OEIS Foundation Inc. Last modified February 3 01:46 EST 2023. Contains 360024 sequences. (Running on oeis4.)
1,034
2,653
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3
3
CC-MAIN-2023-06
latest
en
0.504494
http://binaryoptionssignalsfacts.com/binary-options-trading-tips/anyone-trade-binary-options/
1,532,025,907,000,000,000
text/html
crawl-data/CC-MAIN-2018-30/segments/1531676591216.51/warc/CC-MAIN-20180719183926-20180719203926-00229.warc.gz
47,053,748
8,187
## Binary Options How To Trade Binary Options Using Martingale Strategy Binary Options, my dear friends and followers! This is Matthew Coleman again with you and now we show you as usual, how I make money on the binary option platform, called IQoption. I've been trading on binary option for a few years and during this time I found strategy that works with no mistakes and I made money with it for a long time. Here is \$1,400 on myaccount, you see it's real money, it's real account. There is no demo mark below my name and now I'll trade on your eyes and show you how it works. So I will use the Martingale system, Martingale principle, which will allow me tomake good money and work on this binary option. It's main principle: if we lose our bet next time we have to make a stake, which is two and a half times bigger then the lost one. And once we win we get back tothe very first initial bet. So here I made a bet of \$100 and I won and I have \$188 after this bet. So I have one this bet and now I make the same \$100 bet. Here we go. I make a bet. I predict that the currency paragraph will go down, but as we can see I might be mistaken and in the case if I lose this bet next time I will have to make a stake of \$250 and even if I lose that stake of \$250, my next bet will be \$625, which is two and a half times bigger then lost one. Here I see I won this bet again so I make an initial bet of \$100 and I made a Put bet. So I anticipatethat the currency graph will go down after my bet. So most of the traders have to win 6070% of their bet to stay profitable during time. And they do all this difficult things like technical analysis, indicator some the graph, learn international economics and so long. But in our case we can lose 3, 4, 5 ormore bet in row but we need to win only once and after that we get our profit. So in this system (Martingale system) we can lose a few bet in a row, but once we win, only one single bet, it will cover all our previous losses and even get profit. So in our case now I have lost \$100 bet and I will make a bet of \$250. So I made this bet and now see how it works. If we win this bet our profit will be \$470, so it covers all our previous losses and all these stakes. So it covers this stake of \$250 and the previous bet of \$100 and gives us a little profit in this operate This Martingale system is quite simple but in the same time I think only the most clever of you will completely understand it and start to make good money with it. So you see I've lost a first bet of \$100 and then I have one bet of \$250. And get a profit of \$470. Right on your eyes you see how easy is to make money with this system, but if you don't use this system you may really lose your money. So I was working on this binary options platformfor a long long time and I found that this system, Martingale system principle is the only thing that helps to make good money without risks. After I won the bet of \$250 I go back to my first bet of \$100 and now you see I have won again Here is \$188 profit everytime Okey, I will make. Oh, not very good moment but that's ok. Our Martingale system will let us win in any case. I start from \$100, so my first initial bet is \$100, because I have enough money on my account I have \$1,684 on my account But you can start from any amounts, you know which is proportional to your deposit amount So less money you have on your account the less stakes you made We don't analyze Forex market deeply, instead of this we have a perfect moneymanagement system, which allow us to stay profitable all the time See one again. I make a Put bet, so I predict that the graph of the currency pair will go down. That's why for us is no difference which assets we use for our trading. ### How To Use Support And Resistance Lines When Trading Binary Options Hi there.I am often asked in the comments how to determine the point of entry into the Martingale strategy.Today I'll show you how to use support and resistance levels in Binary options. We set the chart for 15minute or 5minutetimeframe. We use the Line tool and find the level of resistance by the tops of the candles. We find the level of support by the lowestlevels of the chart in the same way. And now we watch for the chart to reach orbreak through one of the levels – in this case we are betting in the opposite direction.Let's open 4 windows with different positions – it will be much faster in this way. Wedo the same thing in all the windows. Here we see the first touching of the level,and I bet \$10 in the opposite direction. And in the meantime, we watch the other positions.Here we see touching of the support level on eurodollar Position. We bet \$10. Great, our first two bets have triggered.Levels must be rearranged from time to time. And we wait for the next touching of the level. And here again we see the resistance levelbroken through, and we bet \$10 for a fall. You should wait until either a touching ora breakthrough. We see that our bet does not win, so I prepare \$30 in advance and againbet for a fall using a Martingale strategy. I hope you know the table of bets; if youdon't, I will write in the description below the tutorial. Here we see the breakthrough and bet \$10.And according to the Martingale strategy, we bet \$180 as our bid didn't trigger.Now euro dollar lost and we bet 30 for a rise. Pound dollar – bet 30 for a fall.Now, bet 180 on the euro dollar. And now all of our three bets have won, weare just waiting for eurodollar. We are waiting for our bet of \$180 to triggerWell, we lose the 180 bet, and then we bet 440, and I think that this bet will triggerfor sure. Now we see that the chart is back and we'llwin our bets. Here you can see my bets for the entire period.If you liked the tutorial, put likes, share with friends, subscribe to my RSS feed.
1,382
5,845
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.671875
4
CC-MAIN-2018-30
latest
en
0.95826
https://docs.derivative.ca/index.php?title=Function_CHOP&amp;oldid=14243
1,560,721,188,000,000,000
text/html
crawl-data/CC-MAIN-2019-26/segments/1560627998298.91/warc/CC-MAIN-20190616202813-20190616224813-00110.warc.gz
430,355,961
13,517
# Function CHOP ## Summary The Function CHOP provides more complicated math functions than found in the Math CHOP : trigonometic functions, logarithmic functions and exponential functions, and also audio decibels (dB)-power-amplitude conversions. Most of the functions require only one parameter, and they are applied as a unary operator to each input channel. Some functions take two parameters, and these require the use of the second input. The first parameter, X, is always a value from a channel in the first input. The second parameter, Y, is a value from a corresponding channel in the second input. Channels from each input are paired by name or index. Since many of these functions can produce math errors, an error handling tab is provided for error handling and recovery. Errors can be handled by replacing the bad sample with a pre-defined value or by using the value of the previous sample. Alternatively, cooking can be aborted upon error for debugging networks. ## Parameters - Function Page Function `func` - - Which math function to apply to the channels. All of the functions are unary functions except for the binary functions 'arctan (Input1/Input2)' and 'Input1 ^ Input2'. In the cases of power functions, a negative base is inverted first to avoid imaginary numbers, and the result is negated. • sqrt(x) Square Root `sqrt` - Square Root of Input 1. • abs(x) Absolute Value `abs` - Absolute Value of Input 1. • sign(x) Sign `sign` - Sign of Input 1. • cos(x) Cosine `cos` - Cosine of Input 1. • sin(x) Sine `sin` - Sine of Input 1. • tan(x) Tangent `tan` - Tangent of Input 1. • acos(x) Arccosine `acos` - Arccosine of Input 1. • asin(x) Arcsine `asin` - Arcsine of Input 1. • atan(x) Arctan( Input1 ) `atan` - Arctan of Input 1. • atan2(y,x) Arctan( Input1 / Input2 ) `atan2` - Arctan of (Input 1 / Input 2). • cosh(x) Hyperbolic Cosine `cosh` - Hyperbolic Cosine of Input 1. • sinh(x) Hyperbolic Sine `sinh` - Hyperbolic Sine of Input 1. • tanh(x) Hyperbolic Tangent `tanh` - Hyperbolic Tangent of Input 1. • log10(x) Log base 10 `log` - Log base 10 of Input 1. • logN(x) Log base N `logb` - Log base Base Value of Input 1. • ln(x) Natural Log `ln` - Natural Log of Input 1. • pow(10,x) 10 ^ Input1 `pow10` - 10 to the power of Input 1. • exp(x) e ^ Input1 `exp` - e to the power of Input 1. • pow(x) Base ^ Input1 `powe` - Base value to the power of Input 1. If Base value is negative result is -((-Base Value) ^ Input 1) • pow(x) Input1 ^ Exponent `powb` - Input 1 to the power of Exponent value. If Input 1 is negative result is -((-Input 1) ^ Exponent Value). • pow(x,y) Input1 ^ Input2 `pow` - ((-Input 1) ^ Input 2) • dB to Power `dbtopower` - Decibel (Input 1) to amplitude. • Power to dB `powertodb` - Amplitude (Input 1) to decibel. • dB to Amplitude `dbtoamp` - Decibel (Input 1) to amplitude. • Amplitude to dB `amptodb` - Amplitude (Input 1) to decibel. Base Value `baseval` - The value of the base for 'Log base N' and 'Base ^ Input1'. parameter name `/baseval` Exponent Value `expval` - The value of the exponent for `'Input1 ^ Exponent'`. channel name `/expval` Angle Units `angunit` - - For trigonometric functions, the angles can be measured in Degrees, Radians, or Cycles (0 to 1). • Degrees `deg` - • Radians `rad` - • Cycles `cycle` - Match by `match` - - How to pair channels together from the two inputs for the binary functions, by name or by channel index. • Channel Number `index` - • Channel Name `name` - ## Parameters - Error Page Error Handling `error` - - How to correct samples with math errors: • Abort With Error Message `abort` - Cooking aborts. • Replace With Specified Values `replace` - Values specified below. • Use The Previous Value `useprev` - Uses the last good result. + Infinity Value `pinfval` - Value to use when an infinity error occurs. Caused by sinh(), cosh() and tan(). - Infinity Value `ninfval` - Value to use when a negative infinity error occurs. Caused by sinh() and tan(). Domain Error Value `domval` - Value to use when a domain error occurs. Caused by asin(), acos(), log10(), logN(), ln() and sqrt(). Divide Error Value `divval` - Value to use when a divide by zero error occurs. Caused by pow(x,y). ## Parameters - Common Page Time Slice `timeslice` - Turning this on forces the channels to be "Time Sliced". A Time Slice is the time between the last cook frame and the current cook frame. Scope `scope` - To determine which channels get affected, some CHOPs use a Scope string on the Common page. Sample Rate Match `srselect` - - Handle cases where multiple input CHOPs' sample rates are different. When Resampling occurs, the curves are interpolated according to the Interpolation Method Option, or "Linear" if the Interpolate Options are not available. • Resample At First Input's Rate `first` - Use rate of first input to resample others. • Resample At Maximum Rate `max` - Resample to the highest sample rate. • Resample At Minimum Rate `min` - Resample to the lowest sample rate. • Error If Rates Differ `err` - Doesn't accept conflicting sample rates. Export Method `exportmethod` - - This will determine how to connect the CHOP channel to the parameter. Refer to the Export article for more information. • DAT Table by Index `datindex` - Uses the docked DAT table and references the channel via the index of the channel in the CHOP. • DAT Table by Name `datname` - Uses the docked DAT table and references the channel via the name of the channel in the CHOP. • Channel Name is Path:Parameter `autoname` - The channel is the full destination of where to export to, such has `geo1/transform1:tx`. Export Root `autoexportroot` - This path points to the root node where all of the paths that exporting by Channel Name is Path:Parameter are relative to. Export Table `exporttable` - The DAT used to hold the export information when using the DAT Table Export Methods (See above). ## Operator Inputs • Input 0 - • Input 1 - TouchDesigner Build: Ableton Link • Analyze • Angle • Attribute • Audio Band EQ • Audio Device In • Audio Device Out • Audio Dynamics • Audio File In • Audio Filter • Audio Movie • Audio NDI • Audio Oscillator • Audio Para EQ • Audio Play • Audio Render • Audio Spectrum • Audio Stream In • Audio Stream Out • Beat • BlackTrax • Blend • Bullet Solver • Clip Blender • Clip • Clock • Composite • Constant • Copy • Count • CPlusPlus • Cross • Cycle • DAT to • Delay • Delete • DMX In • DMX Out • Envelope • EtherDream • Event • Expression • Extend • Fan • Feedback • File In • File Out • Filter • Function • Gesture • Handle • Helios DAC • Hog • Hokuyo • Hold • Import Select • In • Info • Interpolate • Introduction To CHOPs id • Inverse Curve • Inverse Kin • Join • Joystick • Keyboard In • Keyframe • Kinect • Lag • Experimental:Laser • Leap Motion • Leuze ROD4 • LFO • Limit • Logic • Lookup • LTC In • LTC Out • Math • Merge • MIDI In • MIDI In Map • MIDI Out • Mouse In • Mouse Out • NatNet In • Noise • Null • Object • Oculus Audio • Oculus Rift • OpenVR • OSC In • OSC Out • Out • Override • Panel • Parameter • Pattern • Perform • Pipe In • Pipe Out • PosiStageNet • Pulse • RealSense • Record • Rename • Render Pick • Reorder • Replace • Resample • S Curve • Scan • Script • Select • Sequencer • Serial • Shared Mem In • Shared Mem Out • Shift • Shuffle • Slope • SOP to • Sort • Speed • Splice • Spring • Stretch • Stype • Switch • Sync In • Sync Out • Tablet • Time Slice • Timeline • Timer • TOP to • Touch In • Touch Out • Trail • Transform • Trigger • Trim • Warp • Wave • ZED An Operator Family which operate on Channels (a series of numbers) which are used for animation, audio, mathematics, simulation, logic, UI construction, and many other applications. TouchDesigner is a hierarchy of components. "root" is the top-most network in the hierarchy. The Path is simply `/`. A typical path is `/project1/moviein1`. A Time Slice is the time from the last cook frame to the current cook frame. In CHOPs it is the set of short channels that only contain the CHOP channels' samples between the last and the current cook frame. A parameter in most CHOPs that restricts which channels of that CHOP will be affected. Normally all channels of a CHOP are affected by the operator. samples-per-second of a CHOP. Each CHOP in your network has a sample rate, whether it is used or not. The overall timeline has a "frame rate", which is the number of animation frames per second, generally your monitor display frequency. Exporting is the connection of CHOP channels to parameters of operators. The output of each exporting CHOP is one or more channels, active only while the CHOP Viewer is on. The current value of a channel can be exported to a parameter of any operator, overriding that parameter's value. See Parameter. An Operator Family that manipulates text strings: multi-line text or tables. Multi-line text is often a command Script, but can be any multi-line text. Tables are rows and columns of cells, each containing a text string. The location of an operator within the TouchDesigner environment, for example, `/geo1/torus1`, a node called `torus1` in a component called `geo1`. The path `/` is called Root. To refer instead to a filesystem folder, directory, disk file or `http:` address, see Folder. Parameters in TouchDesigner are an operator's settings (numbers, menus, flags, strings, paths) which the user can alter. Parameters for any operator can be opened by right-clicking on the operator and selecting "Parameters..." A currently selected operator's parameters can be viewed in a Network Editor by pressing the keyboard shortcut 'p'.
2,619
9,611
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.3125
3
CC-MAIN-2019-26
latest
en
0.613362
https://www.hunterpoolshop.com/how-long-are-swimming-pool-loans/
1,708,622,009,000,000,000
text/html
crawl-data/CC-MAIN-2024-10/segments/1707947473824.13/warc/CC-MAIN-20240222161802-20240222191802-00851.warc.gz
849,254,063
24,676
# How Long Are Swimming Pool Loans Looking to finance your dream swimming pool? Wondering how long swimming pool loans typically last? You're in the right place! In this article, we'll break down everything you need to know about the duration of swimming pool loans. From the application process to repayment terms, we'll guide you through it all. So sit back, relax, and let's dive into the world of swimming pool financing! Contents ## Understanding Cubic Yards and Olympic Swimming Pools Do you know what a cubic yard is? It's a unit of measurement used to determine the volume of an object, such as a swimming pool. Speaking of swimming pools, have you ever heard of an Olympic swimming pool? It's a specific size and shape that meets the requirements set by the International Olympic Committee. Understanding these measurements is important when it comes to planning and building a swimming pool. ### Definition of a Cubic Yard To understand cubic yards and their relevance to Olympic swimming pools, you need to grasp the concept of a cubic yard. A cubic yard is a unit of measurement used to determine volume. It represents a cube that's one yard long, one yard wide, and one yard high. In the context of swimming pool loans, the definition of a cubic yard becomes contextually relevant. When constructing an Olympic swimming pool, the size and volume of the pool are important factors. The measurement in cubic yards helps determine the amount of materials needed, such as concrete or water. It also affects the cost of the project and the overall loan amount. Understanding the concept of a cubic yard is crucial when planning and budgeting for an Olympic swimming pool. ### Definition of an Olympic Swimming Pool To understand the concept of an Olympic swimming pool and its relevance to cubic yards, you can continue the discussion by exploring the dimensions and volume of the pool. Can You Finance A Swimming Pool Can You Swim in a Pool With a Spray Tan An Olympic-sized swimming pool is a standardized pool used in Olympic Games and other international swimming competitions. It has specific dimensions of 50 meters in length, 25 meters in width, and a minimum depth of 2 meters. These pools are designed to accommodate multiple lanes for swimmers to compete simultaneously. The volume of an Olympic swimming pool is calculated by multiplying its length, width, and depth. This gives us a total volume of 2,500 cubic meters or approximately 3,281 cubic yards. Understanding the definition of an Olympic swimming pool is important when considering swimming pool loans or planning to build a pool of that scale. ### Importance of Measurements in Swimming Pools Understanding the importance of measurements in swimming pools, such as understanding cubic yards and Olympic swimming pools, can greatly impact your decision-making process when it comes to swimming pool loans. Proper measurements ensure that your pool fits perfectly in your backyard and meets your desired specifications. When applying for pool loans, accurate measurements are crucial in determining the loan amount you need and the repayment terms. Knowing the cubic yardage of your pool is essential for calculating the materials needed for construction and the cost of maintenance. Additionally, understanding the dimensions of an Olympic swimming pool, which measures 50 meters in length and 25 meters in width, can help you determine if you want a pool of similar size or a smaller one that better suits your needs. Accurate measurements play a vital role in making informed decisions and ensuring a successful swimming pool project. ## Calculating the Volume of an Olympic Swimming Pool To calculate the volume of an Olympic swimming pool, you need to know its dimensions. The formula for calculating the volume is length multiplied by width multiplied by depth. ### Dimensions of an Olympic Swimming Pool To calculate the volume of an Olympic swimming pool, you'll need to determine its dimensions. How to Build Swimming Pool Philippines How to Set a Swimming Pool Timer South Africa Olympic size swimming pools are required to adhere to specific dimensions set by the International Swimming Federation (FINA). The standard length of an Olympic pool is 50 meters, while the width must be at least 25 meters. The depth of the pool should be a minimum of 2 meters. These dimensions ensure that the pool meets the requirements for competitive swimming events. By multiplying the length, width, and depth of the pool, you can easily calculate its volume. For example, a pool with dimensions of 50 meters in length, 25 meters in width, and 2 meters in depth would have a volume of 2,500 cubic meters. Knowing the dimensions is crucial for accurately determining the volume of an Olympic swimming pool. ### Formula for Calculating Volume You can easily calculate the volume of an Olympic swimming pool by using the formula discussed in the previous subtopic, which involves multiplying the length, width, and depth of the pool. To find the volume, simply multiply the length of the pool by the width and then multiply that result by the depth. For example, if the length of the pool is 50 meters, the width is 25 meters, and the depth is 3 meters, the volume would be 50 meters x 25 meters x 3 meters = 3,750 cubic meters. This formula can be useful not only for calculating the volume of an Olympic swimming pool, but also for estimating the amount of water needed for filling the pool, which can be helpful when determining swimming pool loans. ### Applying the Formula to an Olympic Swimming Pool Calculate the volume of an Olympic swimming pool by multiplying the length, width, and depth of the pool. To apply the formula, follow these steps: 1. Measure the length of the Olympic swimming pool. This is the distance from one end of the pool to the other. 2. Measure the width of the pool. This is the distance from one side of the pool to the other. 3. Measure the depth of the pool. This is how deep the water is in the pool. Once you have these measurements, simply multiply the length, width, and depth together to find the volume of the Olympic swimming pool. This calculation will give you the amount of water that the pool can hold. Understanding the volume of an Olympic swimming pool is essential when considering loans for building or maintaining such a facility. ## Converting Cubic Feet to Cubic Yards Now let's talk about converting cubic feet to cubic yards. A cubic foot is a unit of volume equal to a cube that measures one foot on each side. To convert cubic feet to cubic yards, you need to divide the number of cubic feet by 27, which is the conversion factor. This conversion is important when determining the volume of an Olympic swimming pool, as it's typically measured in cubic yards. ### Definition of a Cubic Foot To understand the concept of converting cubic feet to cubic yards, it's important to grasp the definition of a cubic foot. A cubic foot is a unit of volume that measures the amount of space occupied by a cube that's 1 foot long, 1 foot wide, and 1 foot high. It's commonly used to measure small volumes, such as the capacity of a refrigerator or the size of a room. When it comes to swimming pool loans, understanding this measurement is crucial as it helps determine the amount of materials needed for construction. To convert cubic feet to cubic yards, you need to divide the number of cubic feet by 27, as there are 27 cubic feet in a cubic yard. This conversion is important for accurately estimating costs and ensuring proper planning during the installation of swimming pools. ### Conversion Factor from Cubic Feet to Cubic Yards Divide the number of cubic feet by 27 to convert to cubic yards. When it comes to swimming pool loans, it's important to understand the conversion factor from cubic feet to cubic yards. Cubic feet and cubic yards are both units of volume, but they measure different amounts. To convert cubic feet to cubic yards, you divide the number of cubic feet by 27. This conversion factor is necessary when working with swimming pool measurements, as many pool contractors and suppliers use cubic yards as the standard unit for measuring the volume of materials such as concrete or water needed for the pool. ### Applying the Conversion to the Volume of an Olympic Swimming Pool You can apply the conversion from cubic feet to cubic yards to calculate the volume of an Olympic swimming pool. This conversion is useful for various purposes, including estimating the amount of water needed to fill the pool or determining the capacity for pool loans. To convert cubic feet to cubic yards, you can use the conversion factor of 1 cubic yard equals 27 cubic feet. By dividing the volume of the pool in cubic feet by 27, you can obtain the volume in cubic yards. To illustrate this conversion, consider the following table: Volume in Cubic Feet Volume in Cubic Yards 500,000 18,518.52 750,000 27,777.78 1,000,000 37,037.04 ## Practical Applications of these Measurements Now that you understand how to convert cubic feet to cubic yards, let's explore the practical applications of these measurements. In construction and landscaping, cubic yards are often used to determine the amount of materials needed, such as soil or concrete. Additionally, in competitive swimming, understanding the volume of a pool in cubic yards is crucial for accurate time measurements and record keeping. These measurements can also be applied in various other fields, highlighting their versatility and importance. ### Use of Cubic Yards in Construction and Landscaping Calculate the amount of concrete needed for your construction or landscaping project by determining the number of cubic yards required. Cubic yards are commonly used in the construction and landscaping industry to measure the volume of materials such as concrete, soil, or gravel. It's a unit of measurement that represents the space occupied by a three-dimensional object. When planning your project, it's crucial to accurately estimate the amount of concrete needed to avoid over or under ordering. Additionally, cubic yards are also used to measure the capacity of swimming pools. This helps determine the amount of water required to fill the pool. ### Importance of Pool Volume in Competitive Swimming Understanding the importance of pool volume in competitive swimming is crucial for athletes and coaches alike, as it directly affects performance and training strategies. Pool volume refers to the amount of water in a swimming pool and is typically measured in cubic meters or gallons. In competitive swimming, the pool volume plays a significant role in determining the speed and efficiency of swimmers. For example, an Olympic-sized swimming pool, which has a volume of 2.5 million liters, allows swimmers to maintain their momentum and achieve faster times due to its larger size. Coaches use pool volume measurements to design training programs and set realistic goals for their athletes. Additionally, knowing the pool volume helps swimmers adapt to different pool sizes during competitions, ensuring they can perform at their best regardless of the pool's dimensions. ### Other Uses of these Measurements When considering swimming pool loans, it's important to understand the practical applications of these measurements beyond just competitive swimming. The measurements used in swimming pool loans, such as pool volume and dimensions, have various other uses. For instance, these measurements are crucial for architects and engineers when designing and constructing swimming pools. They ensure that the pool meets safety standards and can accommodate the desired number of swimmers. Additionally, pool measurements are essential for pool equipment manufacturers to create and sell products that are compatible with different pool sizes. Furthermore, these measurements are important for pool maintenance companies to determine the amount of chemicals needed to keep the water balanced and clean. ## Comparisons and Visualizations Now it's time to compare and visualize the size of an Olympic swimming pool. Imagine comparing the volume of an Olympic swimming pool to common objects like a bathtub or a car. Visualizing the volume of an Olympic swimming pool can help you understand its massive scale and the amount of water it holds. Get ready to be amazed by the sheer size of an Olympic swimming pool! ### Comparing the Volume of an Olympic Swimming Pool to Common Objects To compare the volume of an Olympic swimming pool to common objects, grab a measuring tape and get ready to be amazed. The volume of an Olympic swimming pool is approximately 2.5 million liters, or 660,000 gallons. This vast amount of water can be difficult to comprehend, so let's put it into perspective. Imagine filling up 25 standard-sized bathtubs, or 1,000 large milk containers. If you were to stack 10,000 basketballs, you still wouldn't come close to the volume of an Olympic swimming pool. Another way to visualize it's by imagining a cube with edges measuring 25 meters. That cube would perfectly fit the volume of an Olympic swimming pool. It's truly impressive how much water is needed to fill this iconic pool. ### Visualizing the Volume of an Olympic Swimming Pool To truly grasp the immense volume of an Olympic swimming pool, it's helpful to visualize and compare it to everyday objects. An Olympic-sized swimming pool holds a staggering amount of water. Picture this: the pool can contain about 660,000 gallons of water. That's equivalent to filling up more than a million standard bathtubs! If you were to pour all that water into a football field, it would fill it up to a depth of about 9.5 inches. To put it into perspective, the volume of an Olympic swimming pool is so massive that it would take around 18,000 loans for the average person to repay an amount equivalent to the pool's volume. ### Understanding the Scale of an Olympic Swimming Pool When visualizing and comparing the immense volume of an Olympic swimming pool, it's helpful to understand the scale by using comparisons and visualizations. An Olympic-sized swimming pool is a massive structure that holds a staggering amount of water. To put it into context, an Olympic swimming pool is about 50 meters long, 25 meters wide, and 2 meters deep. This means it can hold approximately 2.5 million liters of water. To better grasp the scale, imagine filling up more than 1,000 standard-sized bathtubs with water. Understanding the scale of an Olympic swimming pool can help you appreciate the magnitude of its construction and the resources required. It can also provide contextually relevant information when considering swimming pool loans for such a project. ## Common Misconceptions and Errors Don't make the common mistake of underestimating the importance of accurate volume calculations when planning a swimming pool. Many people have misconceptions about the size of an Olympic swimming pool, believing it to be larger or smaller than it actually is. It's crucial to ensure precise measurements in real-world applications to avoid costly errors and achieve the desired results. ### Common Mistakes in Volume Calculations Avoid making common mistakes in volume calculations by ensuring accurate measurements and avoiding misconceptions. When it comes to swimming pool loans, it's crucial to have the correct volume calculation for accurate cost estimates and proper planning. One common mistake is assuming that the pool shape is a perfect rectangle, leading to an inaccurate volume calculation. Remember that pools come in various shapes, such as kidney-shaped or freeform, which require different formulas to calculate volume accurately. Another mistake isn't considering the depth variations in the pool. If the depth varies, you need to calculate the average depth and adjust the volume calculation accordingly. Additionally, failing to account for the presence of steps, benches, or other features can also lead to inaccurate volume calculations. It's essential to pay attention to these details to avoid costly mistakes and ensure accurate volume calculations for your swimming pool project. ### Misconceptions about the Size of an Olympic Swimming Pool Misunderstandings surrounding the dimensions of an Olympic swimming pool can lead to common misconceptions and errors. One common misconception is the size of an Olympic swimming pool. Many people mistakenly believe that all Olympic swimming pools are the same size. In reality, the size of an Olympic swimming pool can vary. According to the International Swimming Federation (FINA), the standard size for an Olympic swimming pool is 50 meters long, 25 meters wide, and at least 2 meters deep. However, there are also smaller Olympic-sized pools that are 25 meters long. These smaller pools are often used for training and competitions. It's important to understand the different sizes of Olympic swimming pools to avoid any misconceptions when discussing the topic of size. ### Importance of Accurate Measurements in Real-World Applications Are you aware of the significance of accurate measurements in real-world applications, especially when it comes to swimming pool loans? Accurate measurements are crucial in determining the size and shape of a rectangular pool, as well as calculating the amount of water it can hold. When applying for a swimming pool loan, lenders need accurate measurements to assess the cost of building and maintaining the pool. In real-world applications, inaccurate measurements can lead to costly errors and delays in construction. For example, if the measurements are off, the pool may not fit properly in the designated area, resulting in additional expenses for adjustments or even the need to start over. To avoid these issues, it's essential to ensure precise and accurate measurements in all aspects of the pool design and construction process. ### What Are the Qualifications or Requirements to Apply for a Swimming Pool Loan? To apply for a swimming pool loan, you need to meet certain qualifications. These requirements may include a good credit score, proof of income, and a down payment. It's important to research and compare loan options before applying. ### Are There Any Specific Types of Swimming Pools That Are Not Eligible for Financing? There aren't any specific types of swimming pools that you can't finance. As long as you meet the requirements, you should be able to get a loan for any type of pool. ### Can Swimming Pool Loans Be Used for Other Purposes Besides Building a New Pool? Swimming pool loans can only be used for building a new pool, not for other purposes. It's important to consider this before applying for a loan to ensure you meet the eligibility requirements. ### What Is the Typical Interest Rate for Swimming Pool Loans? The typical interest rate for swimming pool loans varies depending on your credit score, income, and the lender you choose. It's important to shop around and compare rates to ensure you get the best deal. ### Are There Any Penalties for Paying off a Swimming Pool Loan Early? There may or may not be penalties for paying off a swimming pool loan early. It's important to check with your lender to understand their specific terms and conditions regarding early repayment. ### Related Posts #### Mike Hunter Mike is the owner of the local pool shop. He's been in the business for over 20 years and knows everything there is to know about pools. He's always happy to help his customers with whatever they need, whether it's advice on pool maintenance or choosing the right chemicals. He's also a bit of a pool expert, and is always happy to share his knowledge with anyone who's interested.
3,833
19,846
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.828125
3
CC-MAIN-2024-10
latest
en
0.958281
http://www.jiskha.com/display.cgi?id=1306683722
1,369,462,393,000,000,000
text/html
crawl-data/CC-MAIN-2013-20/segments/1368705575935/warc/CC-MAIN-20130516115935-00034-ip-10-60-113-184.ec2.internal.warc.gz
544,466,874
3,097
Saturday May 25, 2013 # Homework Help: Math Posted by Help needed on Sunday, May 29, 2011 at 11:42am. Find the measure of the sides of an equilateral PQR if PQ = 5x - 7 and PR = 2x +5. I cannot understand this! • Math - MathMate, Sunday, May 29, 2011 at 2:43pm An equilateral triangle PQR has all sides of equal lengths. If PQ = 5x - 7, and PR = 2x +5, then we can conclude that PQ=PR, or 5x-7=2x+5 Solve for x 5x-2x = 5+7 3x = 12 x = 4 Substitute x=4 into 5x-7 to get So each side of the triangle is 5*4-7=13 units in length. Related Questions Geometry - Find the measure of the sides of an equilateral PQR if PQ = 5x - 7 ... geometry - 5. The lengths of segments PQ and PR are 8 inches and 5 inches, ... Trig - The lengths of segments PQ and PR are 8 inches and 5 inches, respectively... maths - In a triangle ABC, AB = 2 cm, BC = 3 cm and AC = 4 cm. Choose the two ... geometry - The lengths of segments PQ and PR are 8 inches and 5 inches, ... geometry - The lengths of segments PQ and PR are 8 inches and 5 inches, ... maths - Choose three options which are true: a) an angle of 150 degrees is ... math pls - The lengths of segments PQ and PR are 8 inches and 5 inches, ... geometry/math pls help - The lengths of segments PQ and PR are 8 inches and 5 ... maths - Choose three options which are true: a) an angle of 150 degrees is ...
429
1,349
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.859375
4
CC-MAIN-2013-20
latest
en
0.912439
https://profound-information.com/what-happens-if-a-pump-has-too-much-head/
1,726,052,629,000,000,000
text/html
crawl-data/CC-MAIN-2024-38/segments/1725700651383.5/warc/CC-MAIN-20240911084051-20240911114051-00561.warc.gz
434,819,212
12,844
## What happens if a pump has too much head? A pump having a higher head rating can create a higher water column. A higher head AND flow rating means more water, higher. What does maximum head mean on a water pump? Maximum Head Lift is the total height from the source of the water to the destination or drainage point. This provides the power to move water over a distance. What is the significance of head in pump? Simply stated: a pump’s head is the maximum height that the pump can achieve pumping against gravity. Intuitively, if a pump can produce more pressure, it can pump water higher and produce a higher head. ### What happens if pump head is too low? If the operating point is below the pump curve, the problem could be the pump, the system or possibly the pumpage. For any specific flow rate, there is a corresponding amount of head. The impeller design dictates a specific flow rate at which the pump will perform most efficiently—its best efficiency point (BEP). The main reason for using head instead of pressure to measure a centrifugal pump’s energy is that the pressure from a pump will change if the specific gravity (weight) of the liquid changes, but the head will not change. Head is related to the velocity that the liquid gains when going through the pump. What does submersible pump head mean? Head is the height at which a pump can raise water up, that’s it, it’s that simple. Connect a tube to the discharge of a pump and measure the water height, that the head of the pump. As mentioned above, Total Head = Pump Head – Suction Head. If the water level is higher than the pump, then the suction head is negative and the pump head will increase. This is because the water entering the pump applies additional pressure at the suction. How do you increase head height on a pump? Throttle the valve by reducing the flow thus increasing the head. Reducing the flow also helps to reduce the frictional losses along the discharge line, so it will see an increase in pressure. Reducing the discharge pipe size will have adverse effect as it will increase the frictional losses across a smaller pipe. Why pump suction is greater than discharge? The differential head is Discharge head – suction head. So the discharge pressure has to be greater than the suction. Suction pressure diameter is usually 1 size higher than discharge, because we need minimum resistance in the suction side to reduce the losses and increase the NPSHa.
508
2,463
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.828125
3
CC-MAIN-2024-38
latest
en
0.935898
http://civilservicereview.com/2016/08/week-8-practice-exercises-and-problems/
1,620,274,068,000,000,000
text/html
crawl-data/CC-MAIN-2021-21/segments/1620243988725.79/warc/CC-MAIN-20210506023918-20210506053918-00451.warc.gz
9,662,050
9,928
## Week 8 Review: Practice Exercises and Problems After learning how to solve number problems, let’s have some practice exercises. Week 8 Review: Practice Exercises and Problems 1.) One number is 3 more than the other. Their sum is 27. What are the numbers? 2.) One number is 5 less than the other. Their sum is 51. What are the numbers? 3.) One number is 3 times the other number. Their sum is 48. What are the numbers? 4.) One number is 5 times the other number. Their difference is 52. What are the numbers? 5.) The sum of three numbers is 36. The second number is 5 more than the first number and the third number is 8 less than the first number. What are the three numbers? 6.) The sum of three numbers is 98. The second number is twice the first number and the third number twice the second number. What are the three numbers? 7.) One number is two more than thrice the other. Their sum is 26. What are the two numbers? 8.) One number is thrice the other. When 3 is added to the larger and 7 is subtracted from the smaller, their sum becomes 32. What are the two numbers? 9.) The sum of two positive consecutive numbers is 91. What are the two numbers? 10.) The sum of two positive consecutive EVEN integers is 66. What are the two numbers? 11. ) The sum of two positive consecutive ODD integers is 36. What are the two numbers? 12.) The sum of three positive consecutive ODD integers is 81. What are the three integers? 13.) The sum of the smallest and the largest of five positive consecutive integers is 108. What is the third integer? 14.) The average of four positive consecutive EVEN integers is 19. What is the largest integer? 15.) The average of seven positive consecutive integers is 31. What is the smallest integer? Enjoy solving!
418
1,765
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
4.28125
4
CC-MAIN-2021-21
latest
en
0.951269
https://whatisconvert.com/84-imperial-fluid-ounces-in-quarts
1,721,567,310,000,000,000
text/html
crawl-data/CC-MAIN-2024-30/segments/1720763517701.96/warc/CC-MAIN-20240721121510-20240721151510-00331.warc.gz
541,258,045
7,255
## Convert 84 Imperial Fluid Ounces to Quarts To calculate 84 Imperial Fluid Ounces to the corresponding value in Quarts, multiply the quantity in Imperial Fluid Ounces by 0.030023748010718 (conversion factor). In this case we should multiply 84 Imperial Fluid Ounces by 0.030023748010718 to get the equivalent result in Quarts: 84 Imperial Fluid Ounces x 0.030023748010718 = 2.5219948329003 Quarts 84 Imperial Fluid Ounces is equivalent to 2.5219948329003 Quarts. ## How to convert from Imperial Fluid Ounces to Quarts The conversion factor from Imperial Fluid Ounces to Quarts is 0.030023748010718. To find out how many Imperial Fluid Ounces in Quarts, multiply by the conversion factor or use the Volume converter above. Eighty-four Imperial Fluid Ounces is equivalent to two point five two two Quarts. ## Definition of Imperial Fluid Ounce A fluid ounce (abbreviated fl oz, fl. oz. or oz. fl.) is a unit of volume. It is equal to about 28.41 ml in the imperial system or about 29.57 ml in the US system. The fluid ounce is sometimes referred to simply as an "ounce" in applications where its use is implicit. ## Definition of Quart The quart (abbreviation qt.) is an English unit of volume equal to a quarter gallon. It is divided into two pints or four cups. The US liquid quart equals 57.75 cubic inches, which is exactly equal to 0.946352946 liters. ## Using the Imperial Fluid Ounces to Quarts converter you can get answers to questions like the following: • How many Quarts are in 84 Imperial Fluid Ounces? • 84 Imperial Fluid Ounces is equal to how many Quarts? • How to convert 84 Imperial Fluid Ounces to Quarts? • How many is 84 Imperial Fluid Ounces in Quarts? • What is 84 Imperial Fluid Ounces in Quarts? • How much is 84 Imperial Fluid Ounces in Quarts? • How many qt are in 84 uk fl oz? • 84 uk fl oz is equal to how many qt? • How to convert 84 uk fl oz to qt? • How many is 84 uk fl oz in qt? • What is 84 uk fl oz in qt? • How much is 84 uk fl oz in qt?
534
1,985
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.625
3
CC-MAIN-2024-30
latest
en
0.853294
https://www.tutorialandexample.com/infix-postfix-and-prefix-conversion
1,725,718,583,000,000,000
text/html
crawl-data/CC-MAIN-2024-38/segments/1725700650883.10/warc/CC-MAIN-20240907131200-20240907161200-00254.warc.gz
1,008,420,601
19,445
# Infix, Postfix and Prefix Conversion ## What is Infix Notation Infix notation is a method of writing mathematical expressions that are commonly used in everyday calculations, as well as in computer programming languages. In this notation, operators such as addition (+), subtraction (-), multiplication (*), and division (/) are written between the operands or the numbers on which the operations are performed. For example, the expression 2 + 3 is written in infix notation as "2 plus 3", with the plus symbol between the two operands. Similarly, the expression 4 *5 is written as "4 times 5", with the multiplication symbol placed between the two operands. Infix notation is widely used because it is intuitive and easy to read and understand for most people. It follows the same order of operations as traditional arithmetic, meaning multiplication and division are performed before addition and subtraction. This order can be overridden using parentheses to group operations, just as in traditional arithmetic. For example, the expression 2 + 3 * 4 can be ambiguous if we don't use parentheses toindicate which operation should be performed first. In this case, it could meaneither "2 plus 3, then multiplied by 4" or "3 multiplied by 4, then added to 2".  So, to remove ambiguity, we can write the expression as (2 + 3) * 4 or 2 + (3 * 4),  depending on the desired order of operations. In addition to the standard arithmetic operators, infix notation can also include other mathematical and logical symbols, such as exponents (^) and square roots (v), and logical operators, such as AND (&&) and OR (||). These symbols are placed between the operands, just like the arithmetic operators. Infix notation is used in many programming languages, including popular ones like C++, Java, and Python. However, in programming languages, infix notation may be limited to a smaller set of operators and symbols, depending on the language syntax and rules. Infix notation can be converted to other notations, such as postfix (also known as reverse Polish notation) and prefix (also known as Polish notation), which are used in some programming languages and calculators. These notations have advantages in certain contexts, such as easier parsing by computers or more efficient use of memory in calculators. In conclusion, infix notation is a widely used and intuitive method of writing mathematical expressions that include operators placed between the operands. It follows the same order of operations as traditional arithmetic and can include a variety of mathematical and logical symbols. Infix notation is used in many programming languages and can be converted to other notations for specific purposes. ## What is Postfix notation Postfix notation, also known as reverse Polish notation (RPN), is a mathematical expression method that places operators after the operands. In this notation, the expressions are written in a way that makes it easy for computers to evaluate them. Postfix notation was developed by the Polish mathematician Jan Lukasiewicz in the 1920s and has since been used in many programming languages and calculators. In postfix spaces separate notation, operands, and operators, and the order of operations is determined by the position of the operators. For example, the expression 2 + 3 is written as "2 3 +" in postfix notation. In this expression, the operator "+" follows the two operands "2" and "3". Similarly, the expression 4 * 5 is written as "4 5 ", with the operator " following the two operands. The advantage of postfix notation is that it eliminates the need for parentheses and the order of operations becomes unambiguous. The expressions can be evaluated by a simple stack-based algorithm, which makes it easy to implement in programming languages and calculators. To evaluate an expression in postfix notation, a stack data structure is used to keep track of the operands and operators. The operands are pushed onto the stack, and when an operator is encountered, the top two operands are popped from the stack, the operator is applied to them, and the result is pushed back onto the stack. For example, to evaluate the postfix expression "2 3 + 4 ", we would start by pushing the operands 2 and 3 onto the stack. When we encounter the "+" operator, we pop 3 and 2 from the stack, add them together, and push the result, 5, back onto the stack. We then push the operand Four onto the stack. When we encounter the " operator, we pop 4 and 5 from the stack, multiply them together, and push the result, 20, back onto the stack. The final result, 20, is the only value left on the stack. Postfix notation can also include other operators and symbols, such as exponents (^) and square roots (v), and logical operators, such as AND (&&) and OR (||). These symbols are placed after the operands, just like the arithmetic operators. Postfix notation can be converted to infix notation or prefix notation, which can be useful for displaying expressions in a more human-readable format. However, the the conversion process can be complex, especially for expressions that include parentheses or nested operators. Postfix notation is used in many programming languages, including Forth, Factor, and the HP calculators. It is also used in some computer architectures, where it can be used to simplify the design of hardware circuits for arithmetic and logical operations. ## What is Prefix notation Prefix notation, also known as Polish notation, is a method of writing mathematical expressions in which operators are placed before their operands. This notation was invented by the Polish mathematician Jan Lukasiewicz in the 1920s and has since been used in many programming languages and calculators. In prefix notation, the expressions are written in a way that makes it easy for computers to evaluate them. Spaces separate operators and operands, and the position of the operators determines the order of operations. For example, the expression 2 + 3 is written in prefix notation as "+ 2 3". In this expression, the operator "+" comes before the two operands "2" and "3". Similarly, the expression 4 * 5 is written as "* 4 5", with the operator "*" preceding the two operands. The advantage of prefix notation is that it eliminates the need for parentheses and the order of operations becomes unambiguous. The expressions can be evaluated by a simple stack-based algorithm, which makes it easy to implement in programming languages and calculators. To evaluate an expression in prefix notation, a stack data structure is used to keep track of the operands and operators. The operators are pushed onto the stack, and when an operand is encountered, it is pushed onto the stack. When there are enough operands on the stack to satisfy an operator, the top operands are popped off the stack, the operator is applied to them, and the result is pushed back onto the stack. For example, we would start by pushing the operator to evaluate the prefix expression "* + 2 3 4"." onto the stack. When we encounter the "+" operator, we push it onto the stack, followed by operands 2 and 3. The next operator we encounter is * "so we push it onto the stack, followed by the operand 4. We now have enough operands on the stack to satisfy the "*." operator, so we pop four and the result of "+" (which is 5) from the stack, multiply them together and push the result, 20, back onto the stack. The final result, 20, is the only value left on the stack. Prefix notation can also include other operators and symbols, such as exponents (^) and square roots (v), and logical operators, such as AND (&&) and OR (||). These symbols are placed before the operands, just like the arithmetic operators. Prefix notation can be converted to infix notation or postfix notation, which can be useful for displaying expressions in a more human-readable format. However, the conversion process can be complex, especially for expressions that include parentheses or nested operators. Prefix notation is used in many programming languages, including Lisp, Scheme, and the early versions of the HP calculators. It is also used in some computer architectures, where it can be used to simplify the design of hardware circuits for arithmetic and logical operations. In conclusion, prefix notation is a method of writing mathematical expressions in which operators are placed before their operands. It eliminates the need for parentheses and makes the order of operations unambiguous. Prefix notation can be evaluated by a simple stack-based algorithm and is used in many programming languages and calculators. It can also include a variety of mathematical and logical symbols and can be converted to infix or postfix notation for display purposes. ## What is Infix to Postfix conversion? Infix notation is a common way of writing mathematical expressions where the operators are placed between the operands, such as 2 + 3 or 4 * 5. However, postfix notation (also known as reverse Polish notation) is another way of writing expressions where the operators come after the operands, such as 2 3 + or 4 5 *. Converting infix notation to postfix notation can be useful in certain contexts, such as when implementing calculators or compilers. • The conversion process involves scanning the infix expression from left to right and using a stack to keep track of the operators. • When an operator is encountered, it is placed onto the stack. • However, before placing the operator onto the stack, any operators of higher or equal precedence already on the stack must be popped off and appended to the postfix expression. This ensures that operators of higher precedence are evaluated first. • When an opening parenthesis is encountered, it is placed onto the stack. • When a closing parenthesis is encountered, all operators on the stack are popped off and appended to the postfix expression until the opening parenthesis is reached, which is then popped off and discarded. ### Steps for Infix to Postfix Conversion: For example, Take the infix expression "3 + 4 * 2 / (1 - 5 ) ^ 2 ^ 3" as an example. To convert this expression to postfix notation, we follow these steps: 1. Create an empty stack and an empty postfix expression. 2. Scan the infix expression from left to right. 3. If the current character is an operand (in this case, a number), append it to the postfix expression. 4. If the current character is an operator (+, -, *, /, ^), then: a. While there are operators of higher or equal precedence on the stack, pop them off and append them to the postfix expression. b. Push the current operator onto the stack. 5. If the current character is an opening parenthesis, push it onto the stack. 6. If the current character is a closing parenthesis, pop operators off the stack and append them to the postfix expression until the opening parenthesis is reached, which is then popped off and discarded. 7. After scanning the entire expression, pop any remaining operators off the stack and append them to the postfix expression. Using the above steps, we can convert the infix expression "3 + 4 * 2 / ( 1 - 5 ) ^ 2 ^ 3" to postfix notation as follows: 1. Empty stack: 2. Process "3": Postfix expression: 3 3. Process "+": Stack: + Postfix expression: 3 4. Process "4": Postfix expression: 3 4 5. Process "*": Stack: * Postfix expression: 3 4 6. Process "2": Postfix expression: 3 4 2 7. Process "/": Stack: / Postfix expression: 3 4 2 * 8. Process "(": Stack: ( / Postfix expression: 3 4 2 * 9. Process "1": Postfix expression: 3 4 2 * 1 10. Process "-": Stack: - ( / Postfix expression: 3 4 2 * 1 11. Process "5": Postfix expression: 3 4 2 * 1 5 12. Process ")": Stack: / Postfix expression: 3 4 2 * 1 5 - ^ 13. Process "^": Stack: ^ / Postfix expression: 3 4 2 * 1 5 - ^ ^ 14. Process "2": Postfix expression: 3 4 2 * 1 5 - ^ ^ 2 15. Process "^": Stack: ^ ^ / Postfix expression: 3 4 2 * 1 5 - ^ ^ 2 ^ 16. Final postfix expression: 3 4 2 * 1 5 - ^ ^ 2 ^ + Therefore, the infix expression "3 + 4 * 2 / ( 1 - 5 ) ^ 2 ^ 3" can be converted to postfix notation as "3 4 2 * 1 5 - ^ ^ 2 ^ +". ## What is Infix to Prefix Conversion? Infix-to-prefix conversion involves converting an infix expression to its prefix notation. This can be done using steps similar to the infix to postfix conversion, but with a few modifications. The steps are as follows: 1. Reverse the infix expression 2. Use the same algorithm as an infix to postfix conversion to obtain the postfix expression of the reversed infix expression 3. Reverse the postfix expression to obtain the prefix expression Let's take the same example infix expression we used before: "3 + 4 * 2 / (1 - 5 ) ^ 2 ^ 3". • Reverse the infix expression: "3 ^ 2 ^ (5 - 1) / 2 * 4 + 3" • Use the infix to postfix conversion algorithm to obtain the postfix expression of the reversed infix expression: "3 4 2 * 1 5 - ^ ^ 2 / +" • Reverse the postfix expression to obtain the prefix expression: "+ / ^ ^ 2 - 5 1 * 2 4 3." Therefore, the infix expression "3 + 4 * 2 / (1 - 5) ^ 2 ^ 3" can be converted to prefix notation as "+ / ^ ^ 2 - 5 1 * 2 4 3". ## What is Postfix to Infix conversion? Postfix to infix conversion converts an expression written in postfix notation to its equivalent infix notation. The operator is written after its operands in postfix notation, also known as Reverse Polish notation. On the other hand, infix notation is the conventional notation where the operator is written between the operands. Here is an example of a postfix notation expression: 6 3 2 - * To convert this expression to infix notation, we follow these steps: Step 1: Start from the left and scan each element of the expression. Step 2: If the current element is an operand, push it onto a stack. Step 3: If the current element is an operator, pop two operands from the stack, place them on either side of the operator in infix notation, and enclose the entire expression in parentheses. Then push the resulting expression back onto the stack. Applying these steps to our example: 1. We start scanning the expression from the left. The first element is 6, an operand, so we push it onto the stack. Stack: 6 • The next element is 3, also an operand, so we push it onto the stack. Stack: 6 3 • The next element is 2, also an operand, so we push it onto the stack. Stack: 6 3 2 • The next element is -, which is an operator. In that order, we pop the last two operands from the stack, which are 3 and 2. We place them on either side of the operator in infix notation, enclosed in parentheses. We get (3 - 2). We then push this resulting expression back onto the stack. Stack: 6 (3 - 2) • The next element is *, which is an operator. In that order, we pop the last two operands from the stack, which are 6 and (3 - 2). We place them on either side of the operator in infix notation, enclosed in parentheses. We get 6 * (3 - 2). We then push this resulting expression back onto the stack. Stack: (6 * (3 - 2)) • We have reached the end of the expression, and the final result is the expression on the top of the stack, which is (6 * (3 - 2)). This is the equivalent infix notation of the original postfix expression 6 3 2 - *. ## What is Prefix-to-Infix conversion? Prefix-to-infix conversion converts an expression written in prefix notation to its equivalent infix notation. The operator is written before its operands in prefix notation, also known as Polish notation. On the other hand, infix notation is the conventional notation where the operator is written between the operands. Here is an example of a prefix notation expression: * - 6 3 2 To convert this expression to infix notation, we follow below steps: Step 1: Start from the right and scan each element of the expression. Step 2: If the current element is an operand, push it onto a stack. Step 3: If the current element is an operator, pop two operands from the stack, place them on either side of the operator in infix notation, and enclose the entire expression in parentheses. Then push the resulting expression back onto the stack. Applying these steps to our example: 1. We start scanning the expression from the right. The first element is 2, an operand, so we push it onto the stack. Stack: 2 • The next element is 3, also an operand, so we push it onto the stack. Stack: 2 3 • The next element is 6, also an operand, so we push it onto the stack. Stack: 2 3 6 • The next element is -, which is an operator. In that order, we pop the last two operands from the stack, which are 3 and 6. We place them on either side of the operator in infix notation, enclosed in parentheses. We get (3 - 6). We then push this resulting expression back onto the stack. Stack: 2 (3 - 6) • The next element is *, which is an operator. In that order, we pop the last two operands from the stack, which are 2 and (3 - 6). We place them on either side of the operator in infix notation, enclosed in parentheses. We get 2 * (3 - 6). We then push this resulting expression back onto the stack. Stack: (2 * (3 - 6)) • We have reached the end of the expression, and the final result is the expression on the top of the stack, which is (2 * (3 - 6)). This is the equivalent infix notation of the original prefix expression * - 6 3 2.
3,954
17,313
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
4.125
4
CC-MAIN-2024-38
latest
en
0.954557
http://old.robowiki.net/robowiki?IsTheBotBetterOrWorse
1,558,597,685,000,000,000
text/html
crawl-data/CC-MAIN-2019-22/segments/1558232257156.50/warc/CC-MAIN-20190523063645-20190523085645-00482.warc.gz
161,264,769
3,770
# IsTheBotBetterOrWorse Robo Home | Changes | Preferences | AllPages This is an extract of a post I made some time ago in the forums about checking if there is a clear difference between bots. The objective was to have some test that avoids having to run 200+ battles to check if a bot is better or not. The approach here is to make a simple statistical test to see if we can say the scores are significantly different or not. Note that if you use it and it says that there is no significant difference, it does not mean that the bots are equal, but that you have to run more battles :-) Here it goes, a formula to check if a bot is better than another: ``` if ((Winner Score - Loser Score)/Number? of rounds)/(120/sqrt(Number of rounds)) > 1,28 then you can say Winner is better than Loser. if not, there is not enough data to conclude that ... żmay be you must run more battles? ``` You can also reverse the formula and say that a bot must win by 1968 points in a 100 rounds battle (or by 6223 in a 1000 rounds battle, or 768 in a 25 rounds battle) to say it is clearly better than another. It is a simple test that can be used, and can avoid running long 1000 battles. Some hypotheses I made: a) Confidence level is 90% (assuming Winner can be equal or better than loser, but not worse). You can use 1,64 instead of 1,28 for a 95% test. b) There is no clear way to calculate variance, because Robocode gives accumulated results. But if you think about it, you will see that the standard deviation is between 60 and 180, so I used the mean point (120). c) I assume that bots perform ideally at the same level in all rounds, but this may not be true (ie. learning bots that store results from round to round). One example: SandboxDT vs. Duelist. There are 2 battles: a) The first one is the 100 round battle on the Duels site, where Duelist beat Sandbox 1.2 8121-7537. Applying the formula: ((8121-7537)/100)/(120/10) = 0.487 -> You cannot conclude Duelist is better than SandboxDT 1.2. b) The second one is the result posted yesterday, where SandoxDT? 1.3 beat Duelist 79205-78326 in a 1000 rounds battle. Applying the formula: ((79205-78326)/1000)/(120/1000^0.5) = 0.23 -> You cannot conclude SandoxDT? is better than Duelist. -- Albert I'm having trouble remembering some of the statistical basis for this stuff, but I'd like to apply it elsewhere. Could someone explain maybe how you can 'rate' the validity of data given the sample size, standard deviation, and other basic stuff like that? Basically I want to know something like a confidence interval. -- Kawigi The margin of error formula looks like this: ``` M = 1.96 * sqrt((percent * (1 - percent)) / number of samples) ``` That is the margin of error with a 95% confidence interval. I won't go into details about where the 1.96 comes from, as it becomes complicated very quickly, but suffice to say that the number required is provided by this equation: ``` n = sqrt(2) * aerf(CI) ``` Where aerf is the inverse of the so-called error function (usually denoted "erf"), and CI is the confidence interval (i.e. "0.95" in our case). Some other solutions of this equation are as follows: ``` sqrt(2) * aerf(0.90) = 1.645 sqrt(2) * aerf(0.99) = 2.576 sqrt(2) * aerf(0.999) = 3.291 ``` A 95% confidence interval, as applied to margin of error, means that 19 out of every 20 samples will lie within the margin of error. However, 1 out of every 20 will be totally and completely whack. Basically, you can be 95% sure that the correct answer is somewhere inside your margin of error, but there is the distinct possibility that your estimate is completely off -- the true answer has a 5% chance of being ENTIRELY different from your estimate. I hope this is at least a good summary. I learned all of this stuff while researching for a little utility I wrote that fights two bots together for only the number of battles necessary for one of them to come out as the winner with 99.9% confidence (or some other confidence that I specify). It works very well, and I can even specify a minimum number of battles, but it has the downside that the answer is a little bit questionable when a lot of bot learning is going on. -- nano Within which interval can the margin of error be expected for a successful test? -- PEZ Is there a way to figure out what my confidence interval is that my estimate is even valid? -- Kawigi In my utility, I run margin of error on both bots' scores after every round, and if their margins do not overlap, I declare the test to be complete. That is how you know if your test is successful. For instance, if a bot has an estimated win percentage of 0.65 with a margin of error of 0.10, and the other bot has an estimated win percentage of 0.35 (it will have the same margin of error), then the lowest the first bot's percentage could be is 0.55, and the highest the second bot's percentage could be is 0.45. Since they do not overlap, the first bot is better than the second bot (according to the estimates). As to Kawigi's question: you don't figure out a confidence interval, and there is no way for an estimate to be 100% valid (unless it samples the entire population, which in Robocode is analogous to running every possible battle). You set your confidence interval a priori. The margin of error returned with that confidence interval, say 0.95, is valid 95% of the time. One in every twenty samples will lie entirely outside the interval, though. I hope this makes sense. It's a pretty hairy topic to discuss since it seems to involve abstract levels of error. As an aside, I use a system like this to control my virtual guns. The amount I use each gun is based on the amount that its margin of error overlaps with the best (estimated) gun. -- nano Robo Home | Changes | Preferences | AllPages
1,437
5,807
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.515625
4
CC-MAIN-2019-22
latest
en
0.927473
https://gradehunters.net/urgent-homework-help-44839/
1,623,691,262,000,000,000
text/html
crawl-data/CC-MAIN-2021-25/segments/1623487613380.12/warc/CC-MAIN-20210614170602-20210614200602-00056.warc.gz
285,946,991
14,577
# ae 1 AE here are the other two Classmate responses. Thank you. Jackie Bonds 1 posts Re:Unit1 – Discussion Board Unit 1 DB P1=3                      p2=15 Q1=1200              q2=250 Q=mp+b Slope M=(q2-q1)/(p2-p1) M=(250-1200)/(15-3) M=-950/12 M=79.16 Solve for B Q2=m(p2)+b 250=-79.16(15)+b 250=1187.4+b B-1187.4=250 b-1187.4+1187.4=250+1187.4 b=1437.4 Demand with the price of \$10 Q=mp+b Q=-79.16(10)+1437.4 Q=-791.6+1437.4 Q=645.8 Less than 20 items 20>-79.16p+1437.4 -79.16p+1437.4<20 1437.4-79.16p<20 1437-1437-79.16<20-1437.4 -79.16<-1417.4 -79.16/-79.16<-1417.4/-79.16 P<17.90 Supply Function Q=100p+80 Q=100(10)+80 Q=1000+80 Q=1080 Supply is Greater than Demand at \$10 Equilibrium Price The Equilibrium Price is the price where the demand and the supply are at equal amounts. 100p+80=-79.16p+1437.4 179.16p+80=1437.4 179.16=-80+1437.4 179.16=1357.4 179.16/179.16=1357.4/179.16 P=7.57 It is important to know the equilibrium price. This is the price that you must never be less than. If the owner decides to sell the product for \$5 the supply would not be able to keep up with the demand. The Intellipath node that helped me the most was definitely Translating sentences into variable expressions and equations. MATH133Unit1DBGraphs.docx Re:Unit1 – Discussion Board I have chosen points: (2, 1,400) and (12, 400). Finding the slope: M= q2-q1/p2-p1 M= 400-1400/12-2 M= -1000/10 M= -100 Finding B: q2=  mp2+b 400= -100(12)+b 400= -1200+b b= 1600 q= (-100)p+1600 What will be the demand value when the price is p=\$10? q= m(10)+b q= -100(10)+1600 q= -1000+1600 q= 600 What’s the price when q<20 items? 20>mp+b 20>(-100)p+1600 20>-1580>-100p p< 15.8 Supply Function: Q = 100P + 80 When Price is \$10 q= 100(p)+80 q= 100(10)+80 q=1000+80 q= 1080 The Supply 1080 is more when the demand was 600. This means the demand was to low and more supply was needed. Equilibrium price When you have two linear equations that will intercept one another, you need to set both equations equal to one another. For an example x=y and y=x. 100p+80=-100p+1600 200p+80=1600 200p=1580 p=7.9 See the attached file The equilibrium price is important to know so that producers can determine whether or not a product is in their budget also if so will that product gain or lose profits. Having this information during the starting point of a business helps to establish a budget for producers. Reminds me having a budget before you go shopping so that you don’t over spend on unnecessary things. During the Determine Knowledge module, I had problems getting the hang of solving linear equations but once I had did a few of them, it was easier than I thought. I haven’t taken algebra in awhile so I’m kind of rusty. Hopefully I get the hang of things!
998
2,821
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.9375
4
CC-MAIN-2021-25
latest
en
0.790203
https://www.gradesaver.com/textbooks/math/algebra/intermediate-algebra-6th-edition/chapter-7-section-7-3-simplifying-radical-expressions-exercise-set-page-433/93
1,524,777,031,000,000,000
text/html
crawl-data/CC-MAIN-2018-17/segments/1524125948549.21/warc/CC-MAIN-20180426203132-20180426223132-00098.warc.gz
805,644,763
14,366
## Intermediate Algebra (6th Edition) $(-\frac{1}{2},\frac{1}{2})$ We know that the midpoint of the line segment whose endpoints are $(x_{1},y_{1})$ and $(x_{2},y_{2})$ is the point with coordinates $(\frac{x_{1}+x_{2}}{2},\frac{y_{1}+y_{2}}{2})$. Therefore, a line with endpoints $(\frac{1}{2},\frac{3}{8})$ and $(-\frac{3}{2},\frac{5}{8})$ will have a midpoint of $(\frac{\frac{1}{2}+(-\frac{3}{2})}{2},\frac{\frac{3}{8}+\frac{5}{8}}{2})=(\frac{-\frac{2}{2}}{2},\frac{\frac{8}{8}}{2})=(-\frac{1}{2},\frac{1}{2})$.
223
516
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
4.15625
4
CC-MAIN-2018-17
longest
en
0.66906
http://betterlesson.com/lesson/resource/2319698/wrapping-paper-problem-docx
1,487,711,615,000,000,000
text/html
crawl-data/CC-MAIN-2017-09/segments/1487501170839.31/warc/CC-MAIN-20170219104610-00508-ip-10-171-10-108.ec2.internal.warc.gz
26,907,210
19,870
## Wrapping paper problem.docx - Section 1: Do NOW (Days 1 and 2) Wrapping paper problem.docx # Creating a Robot (2 Days) Unit 8: Geometry Lesson 15 of 17 ## Big Idea: What do tin foil, surface area, and rectangular prisms make? A tin foil robot of course! Print Lesson 34 teachers like this lesson Standards: Subject(s): 85 minutes ##### Similar Lessons ###### 3D Figures and Nets Big Idea: Students explore cube nets in an effort to understand what properties are common to all nets that form a cube. Students work hands-on with nets and are then pushed to use their experience to visually determine whether other nets will form a given figures. Favorites(16) Resources(29) Somerville, MA Environment: Urban ###### Categories and Characteristics of 3D Solids Big Idea: Students develop the characteristics of polyhedrons. Favorites(1) Resources(15) Brooklyn, NY Environment: Urban ###### Nets vs. Composite Figures 6th Grade Math » Area & Volume Big Idea: Are you sure that’s a net? Understanding the word “net” as a mathematical term. Favorites(0) Resources(20) Jonesboro, GA Environment: Urban
267
1,106
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.8125
3
CC-MAIN-2017-09
latest
en
0.817969
https://www.maplesoft.com/support/help/view.aspx?path=RegularChains%2FConstructibleSetTools%2FIsContained
1,675,211,205,000,000,000
text/html
crawl-data/CC-MAIN-2023-06/segments/1674764499891.42/warc/CC-MAIN-20230131222253-20230201012253-00197.warc.gz
890,376,829
36,180
IsContained - Maple Help RegularChains ConstructibleSetTools[IsContained] check whether or not a constructible set is a subset of another one SemiAlgebraicSetTools[IsContained] check whether or not a semi-algebraic set is a subset of another one Calling Sequence IsContained(cs1, cs2, R) IsContained(lrsas1, lrsas2, R) Parameters cs1, cs2 - constructible sets lrsas1, lrsas2 - lists of regular semi-algebraic systems R - polynomial ring Description • The command IsContained(cs1, cs2, R) returns true if cs1 is contained in cs2; otherwise false. The polynomial ring may have characteristic zero or a prime characteristic. cs1 and cs2 must be defined over the same ring R. • The command IsContained('lrsas1', 'lrsas2', 'R') returns true if lrsas1 is contained in lrsas2; otherwise false. The polynomial ring must have characteristic zero. lrsas1 and lrsas2 must be defined over the same ring R. • A constructible set is encoded as an constructible_set object, see the type definition in ConstructibleSetTools. • A semi-algebraic set is encoded by a list of regular_semi_algebraic_system, see the type definition in RealTriangularize. • This command is available once either the RegularChains[ConstructibleSetTools] submodule or RegularChains[SemiAlgebraicSetTools] submodule has been loaded. It can also be accessed through the long form of the command by using RegularChains[ConstructibleSetTools][IsContained] or RegularChains[SemiAlgebraicSetTools][IsContained]. Examples > $\mathrm{with}\left(\mathrm{RegularChains}\right):$ > $\mathrm{with}\left(\mathrm{ConstructibleSetTools}\right):$ First, define the polynomial ring $R$ and two polynomials of $R$. > $R≔\mathrm{PolynomialRing}\left(\left[x,y,t\right]\right)$ ${R}{≔}{\mathrm{polynomial_ring}}$ (1) > $p≔\left(5t+5\right)x-y-\left(10t+7\right)$ ${p}{≔}\left({5}{}{t}{+}{5}\right){}{x}{-}{y}{-}{10}{}{t}{-}{7}$ (2) > $q≔\left(5t-5\right)x-\left(t+2\right)y+\left(-7t+11\right)$ ${q}{≔}\left({5}{}{t}{-}{5}\right){}{x}{-}\left({t}{+}{2}\right){}{y}{-}{7}{}{t}{+}{11}$ (3) Using the GeneralConstruct function and adding one inequality, you can build a constructible set. By $x-t$ and $x+t$, two constructible sets cs1 and cs2 are different. > $\mathrm{cs1}≔\mathrm{GeneralConstruct}\left(\left[p,q\right],\left[x-t\right],R\right)$ ${\mathrm{cs1}}{≔}{\mathrm{constructible_set}}$ (4) > $\mathrm{cs2}≔\mathrm{GeneralConstruct}\left(\left[p,q\right],\left[x+t\right],R\right)$ ${\mathrm{cs2}}{≔}{\mathrm{constructible_set}}$ (5) Use the IsContained function to check if one is contained in another. > $\mathrm{IsContained}\left(\mathrm{cs1},\mathrm{cs2},R\right)$ ${\mathrm{false}}$ (6) > $\mathrm{IsContained}\left(\mathrm{cs2},\mathrm{cs1},R\right)$ ${\mathrm{false}}$ (7) > $\mathrm{IsContained}\left(\mathrm{Intersection}\left(\mathrm{cs2},\mathrm{cs1},R\right),\mathrm{cs2},R\right)$ ${\mathrm{true}}$ (8) The empty constructible set is contained in any other constructible set. > $\mathrm{emcs}≔\mathrm{EmptyConstructibleSet}\left(R\right)$ ${\mathrm{emcs}}{≔}{\mathrm{constructible_set}}$ (9) > $\mathrm{IsContained}\left(\mathrm{emcs},\mathrm{cs2},R\right)$ ${\mathrm{true}}$ (10) > $\mathrm{IsContained}\left(\mathrm{emcs},\mathrm{emcs},R\right)$ ${\mathrm{true}}$ (11) Semi-algebraic case: > $\mathrm{lrsas1}≔\mathrm{RealTriangularize}\left(\left[{p}^{2}+{q}^{2}\right],\left[\right],\left[\right],\left[x-t\right],R\right)$ ${\mathrm{lrsas1}}{≔}\left[{\mathrm{regular_semi_algebraic_system}}\right]$ (12) > $\mathrm{lrsas2}≔\mathrm{RealTriangularize}\left(\left[p,q\right],\left[\right],\left[\right],\left[x+t,x-t\right],R\right)$ ${\mathrm{lrsas2}}{≔}\left[{\mathrm{regular_semi_algebraic_system}}{,}{\mathrm{regular_semi_algebraic_system}}\right]$ (13) > $\mathrm{IsContained}\left(\mathrm{lrsas1},\mathrm{lrsas2},R\right)$ ${\mathrm{false}}$ (14) > $\mathrm{IsContained}\left(\mathrm{lrsas2},\mathrm{lrsas1},R\right)$ ${\mathrm{true}}$ (15) References Chen, C.; Golubitsky, O.; Lemaire, F.; Moreno Maza, M.; and Pan, W. "Comprehensive Triangular Decomposition". Proc. CASC 2007, LNCS, Vol. 4770: 73-101. Springer, 2007. Chen, C.; Davenport, J.-D.; Moreno Maza, M.; Xia, B.; and Xiao, R. "Computing with semi-algebraic sets represented by triangular decomposition". Proceedings of 2011 International Symposium on Symbolic and Algebraic Computation (ISSAC 2011), ACM Press, pp. 75--82, 2011. Compatibility • The RegularChains[SemiAlgebraicSetTools][IsContained] command was introduced in Maple 16. • The lrsas1 parameter was introduced in Maple 16. • For more information on Maple 16 changes, see Updates in Maple 16.
1,528
4,653
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 36, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.8125
3
CC-MAIN-2023-06
longest
en
0.553907
https://web2.0calc.com/questions/geometric-series-and-applications_1
1,642,720,026,000,000,000
text/html
crawl-data/CC-MAIN-2022-05/segments/1642320302706.62/warc/CC-MAIN-20220120220649-20220121010649-00476.warc.gz
669,281,432
5,411
+0 # Geometric Series and Applications -1 94 1 +23 What is the sum of the series? 3+12+48+192+...+49,152 Oct 14, 2021 #1 +1 12/3 ==4 - this is the common ratio.
70
166
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.203125
3
CC-MAIN-2022-05
longest
en
0.853256
https://getrevising.co.uk/revision-tests/aqa-as-level-physics-materials?game_type=flashcards
1,547,663,092,000,000,000
text/html
crawl-data/CC-MAIN-2019-04/segments/1547583657557.2/warc/CC-MAIN-20190116175238-20190116201238-00276.warc.gz
507,137,214
20,113
# AQA AS-Level Physics: Materials What is density? Mass(kg)/volume(m3) 1 of 21 What is Hooke's law? Hooke's Law is the idea that extension is directly proportional to force applied up to the limit of proportionality. 2 of 21 What is a materials elastic limit? The maximum amount of load applied to a material before it can no longer return to its original shape. 3 of 21 What is a materials yield point? The point where the material begins to stretch without any extra load. 4 of 21 How do we calculate the spring constant? Spring constant = Force/extension 5 of 21 What is the combined spring constant of two springs in series? 1/Ka+1/Kb 6 of 21 What is the total spring constant if two springs in series are identical? K/2 7 of 21 What is the combined spring constant of two springs in parallel? Ka+Kb 8 of 21 What is the total spring constant of two springs in parallel are identical? 2K 9 of 21 How do we calculate energy stored in a spring? 1/2 x (Kinetic Energy ^2) 10 of 21 How do modern cars use the idea of deformation to reduce the impact on the driver if a car crash happens? Cars contain crumple zones which deform plastically to reduce the amount of energy transferred to the people in the crash. 11 of 21 What are compressive and tensile forces? Compressive forces are forces used to squash a material, tensile forces are forces used to stretch a material. 12 of 21 What is elastic deformation? A temporary change in shape of an object once a force is applied. The object returns to its original shape once the load/force is removed. 13 of 21 What is plastic deformation? A permanent change in shape of an object because it has passed its elastic limit. So much force has been applied that the object will no longer return to its original shape. 14 of 21 What is tensile stress? State the formula used to calculate it. The force applied per unit cross-sectional area (pressure). It causes strain. Tensile stress = Force/Area 15 of 21 What is tensile strain? State the formula used to calculate it. A measure of how the stretched material compares to its original length, because it is a ratio, it has no units. Tensile strain = Extension/Original length 16 of 21 What is the Young Modulus? Give two equations used to calculate it. The Young Modulus is a measure of how stiff a material is. Young Modulus = Stress (Nm-2 OR Pa)/Strain OR Young Modulus = (Applied force*original length)/(cross sectional area*extension) 17 of 21 How can elastic strain energy be calculated from a stress/strain graph? It is the total area underneath the graph. 18 of 21 The stiffness constant remains the same. 19 of 21 In a force-extension graph, why do we often find that the unloading line does not pass through the origin? Because the material has passed its elastic limit and been plastically deformed. The distance between the loading and unloading line is the work done to permanently deform the material 20 of 21 They fracture as they do not undergo plastic deformation. 21 of 21 ## Other cards in this set ### Card 2 #### Front What is Hooke's law? #### Back Hooke's Law is the idea that extension is directly proportional to force applied up to the limit of proportionality. ### Card 3 #### Front What is a materials elastic limit? ### Card 4 #### Front What is a materials yield point? ### Card 5 #### Front How do we calculate the spring constant?
799
3,369
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
4
4
CC-MAIN-2019-04
latest
en
0.922685
https://winnerscience.com/2323/
1,680,163,585,000,000,000
text/html
crawl-data/CC-MAIN-2023-14/segments/1679296949107.48/warc/CC-MAIN-20230330070451-20230330100451-00710.warc.gz
690,487,209
18,040
# Galilean transformation equations derivation Let there are two inertial frames of references S and S’. S is the stationary frame of reference and S’ is the moving frame of reference. At time t=t’=0 that is in the start, they are at the same position that is Observers O and O’ coincides. After that S’ frame starts moving with a uniform velocity v along x axis. Let an event happen at position P in the frame S’. The coordinate of the P will be x’ according to the observer in S’ and it will be x according to O in S. The frame S’ has moved a distance “vt” in time t (refer figure). What should be the relation between x and x’. As we can see from the figure that x = x’ + vt’ But here the t = t’ thus x = x’ + vt                                (1) Where t and t’ are the time measured from S and S’ frames respectively. But what should be x’ = ? Yes you are right, it is x’ = x – vt                       (2) It can be achieved by just exchanging the sides of the equation (1). But there is more to it. It is just not by exchanging the sides. If we see equation 1, we will find that it is the position measured by O when S’ is moving with +v velocity. But if the same thing is measured by O’ then velocity of S should be –v. (For example, when we travel in a train, then according to the outside observers, we are travelling in x direction (suppose), but the outside objects, according to me travel in the opposite direction with the same but negative velocity). What should be the relation of y with y’? It will be y = y’                                                   (3) or y’ = y                                               (4) because there is no movement of frame along y-axis. Similarly z = z’                                      (5) And z’ = z                                             (6) And here t = t’                                      (7) And t’ = t                                             (8) Equations 1, 3, 5 and 7 are known as Galilean inverse transformation equations for space and time. Equations 2, 4, 6 and 8 are known as Galilean transformation equations for space and time.
505
2,138
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
4.09375
4
CC-MAIN-2023-14
longest
en
0.919407
http://www.ask.com/answers/210531/how-much-money-would-i-need-in-a-bank-to-draw-30-000-00-a-year-on-interest
1,394,994,357,000,000,000
text/html
crawl-data/CC-MAIN-2014-10/segments/1394678703273/warc/CC-MAIN-20140313024503-00047-ip-10-183-142-35.ec2.internal.warc.gz
185,065,182
22,180
Submit a question to our community and get an answer from real people. how much money would i need in a bank to draw 30,000.00 a year on interest ? Report as This would depend on what the annual interest rate is. The formula that you would use to solve this equation with the information that is needed is: Interest= Principle x interest rate x term (time) In your case you would put the amount of interest first, then leave the principle amount as unknown. Once you know the interest rate you can fill that in next, along with the time, 12 months. Your equation would look something like this: \$30,000=p x ?(interest rate) x 12 Then you would times the principle amount times the term (length of time), divide both sides of the equation by your answer to solve for "P".
175
778
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.625
4
CC-MAIN-2014-10
longest
en
0.974124
http://www.coderanch.com/t/584846/java/java/recursion
1,464,181,563,000,000,000
text/html
crawl-data/CC-MAIN-2016-22/segments/1464049274985.2/warc/CC-MAIN-20160524002114-00178-ip-10-185-217-139.ec2.internal.warc.gz
426,471,139
10,276
Win a copy of Design for the Mind this week in the Design forum! # problem with recursion Randall Twede Ranch Hand Posts: 4369 3 i am trying to solve project euler problem 76. "How many different ways can one hundred be written as a sum of at least two positive integers?" i tried a recursive approach but i get incorrect answers. recursion has never been easy for me and i don't see what i am doing wrong. this problem should ideally be solved using dynamic programming since the recursive solution will probably be slow, so any suggestions in that regard are welcome also. here is what i have. Steve Luke Bartender Posts: 4181 21 Recursion and looping is a pretty heady topic to just walk through and figure out what is going wrong wrong. What I would do (were I you) would be: 2) walk through the process by hand. Write each iteration so I can see how I got to the answer. 3) add print statements so I can see what is being counted in code. Or add break points and run in a debugger so I can look at different values as the code executes. 4) Compare #3 to #2 and see where the mistake is coming from 5) Scale up toe 100. Randall Twede Ranch Hand Posts: 4369 3 that is how i am trying to figure it out. they gave me the answer for 5 and i found the answer for 6 by hand. i have been trying System.out.println but havent figured it out yet. i will keep trying. Stephan van Hulst Bartender Posts: 5562 53 Randall, recursion becomes much easier if your code is self-documenting. I wouldn't call recurse() a particularly descriptive method name. What problem does it solve? What if you have a method int waysToWrite(int number, int limit)? It will determine in how many ways you can write number, with none of the terms of the sums exceeding limit. Let's take 7 as an example: 7 6 1 5 2 5 1 1 4 3 4 2 1 4 1 1 1 3 3 1 3 2 2 3 2 1 1 3 1 1 1 1 2 2 2 1 2 2 1 1 1 2 1 1 1 1 1 1 1 1 1 1 1 1 That gives the following number of ways to write 7, you just need to add them together: 7: 1 6: waysToWrite(1,6) 5: waysToWrite(2,5) 4: waysToWrite(3,4) 3: waysToWrite(4,3) 2: waysToWrite(5,2) 1: waysToWrite(6,1) My implementation of waysToWrite() didn't use dynamic programming, and took about 10 seconds to come up with the answer for waysToWrite(100, 99). You can add a HashMap that stores answers you already computed in a previous run, which will probably cause the program to come up with the final answer instantaneously.
694
2,419
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.34375
3
CC-MAIN-2016-22
longest
en
0.93762
http://www.mathworks.com/help/stats/raylcdf.html?nocookie=true
1,398,348,606,000,000,000
text/html
crawl-data/CC-MAIN-2014-15/segments/1398223206147.1/warc/CC-MAIN-20140423032006-00409-ip-10-147-4-33.ec2.internal.warc.gz
704,315,012
9,895
Accelerating the pace of engineering and science # Documentation Center • Trial Software # raylcdf Rayleigh cumulative distribution function ## Syntax p = raylcdf(x,b) p = raylcdf(x,b,'upper') ## Description p = raylcdf(x,b) returns the Rayleigh cdf at each value in x using the corresponding scale parameter, b. x and b can be vectors, matrices, or multidimensional arrays that all have the same size. A scalar input for x or b is expanded to a constant array with the same dimensions as the other input. p = raylcdf(x,b,'upper') returns the complement of the Rayleigh cdf at each value in x, using an algorithm that more accurately computes the extreme upper tail probabilities. The Rayleigh cdf is ## Examples expand all ### Compute and Plot Rayleigh Distribution cdf Compute the cdf of a Rayleigh distribution with parameter B = 1. ```x = 0:0.1:3; p = raylcdf(x,1); ``` Plot the cdf. ```figure; plot(x,p) ```
235
929
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.6875
3
CC-MAIN-2014-15
longest
en
0.699494
https://stacks.math.columbia.edu/tag/00K4
1,723,250,350,000,000,000
text/html
crawl-data/CC-MAIN-2024-33/segments/1722640782288.54/warc/CC-MAIN-20240809235615-20240810025615-00828.warc.gz
428,633,651
8,702
## 10.59 Noetherian local rings In all of this section $(R, \mathfrak m, \kappa )$ is a Noetherian local ring. We develop some theory on Hilbert functions of modules in this section. Let $M$ be a finite $R$-module. We define the Hilbert function of $M$ to be the function $\varphi _ M : n \longmapsto \text{length}_ R(\mathfrak m^ nM/{\mathfrak m}^{n + 1}M)$ defined for all integers $n \geq 0$. Another important invariant is the function $\chi _ M : n \longmapsto \text{length}_ R(M/{\mathfrak m}^{n + 1}M)$ defined for all integers $n \geq 0$. Note that we have by Lemma 10.52.3 that $\chi _ M(n) = \sum \nolimits _{i = 0}^ n \varphi _ M(i).$ There is a variant of this construction which uses an ideal of definition. Definition 10.59.1. Let $(R, \mathfrak m)$ be a local Noetherian ring. An ideal $I \subset R$ such that $\sqrt{I} = \mathfrak m$ is called an ideal of definition of $R$. Let $I \subset R$ be an ideal of definition. Because $R$ is Noetherian this means that $\mathfrak m^ r \subset I$ for some $r$, see Lemma 10.32.5. Hence any finite $R$-module annihilated by a power of $I$ has a finite length, see Lemma 10.52.8. Thus it makes sense to define $\varphi _{I, M}(n) = \text{length}_ R(I^ nM/I^{n + 1}M) \quad \text{and}\quad \chi _{I, M}(n) = \text{length}_ R(M/I^{n + 1}M)$ for all $n \geq 0$. Again we have that $\chi _{I, M}(n) = \sum \nolimits _{i = 0}^ n \varphi _{I, M}(i).$ Lemma 10.59.2. Suppose that $M' \subset M$ are finite $R$-modules with finite length quotient. Then there exists a constants $c_1, c_2$ such that for all $n \geq c_2$ we have $c_1 + \chi _{I, M'}(n - c_2) \leq \chi _{I, M}(n) \leq c_1 + \chi _{I, M'}(n)$ Proof. Since $M/M'$ has finite length there is a $c_2 \geq 0$ such that $I^{c_2}M \subset M'$. Let $c_1 = \text{length}_ R(M/M')$. For $n \geq c_2$ we have \begin{eqnarray*} \chi _{I, M}(n) & = & \text{length}_ R(M/I^{n + 1}M) \\ & = & c_1 + \text{length}_ R(M'/I^{n + 1}M) \\ & \leq & c_1 + \text{length}_ R(M'/I^{n + 1}M') \\ & = & c_1 + \chi _{I, M'}(n) \end{eqnarray*} On the other hand, since $I^{c_2}M \subset M'$, we have $I^ nM \subset I^{n - c_2}M'$ for $n \geq c_2$. Thus for $n \geq c_2$ we get \begin{eqnarray*} \chi _{I, M}(n) & = & \text{length}_ R(M/I^{n + 1}M) \\ & = & c_1 + \text{length}_ R(M'/I^{n + 1}M) \\ & \geq & c_1 + \text{length}_ R(M'/I^{n + 1 - c_2}M') \\ & = & c_1 + \chi _{I, M'}(n - c_2) \end{eqnarray*} which finishes the proof. $\square$ Lemma 10.59.3. Suppose that $0 \to M' \to M \to M'' \to 0$ is a short exact sequence of finite $R$-modules. Then there exists a submodule $N \subset M'$ with finite colength $l$ and $c \geq 0$ such that $\chi _{I, M}(n) = \chi _{I, M''}(n) + \chi _{I, N}(n - c) + l$ and $\varphi _{I, M}(n) = \varphi _{I, M''}(n) + \varphi _{I, N}(n - c)$ for all $n \geq c$. Proof. Note that $M/I^ nM \to M''/I^ nM''$ is surjective with kernel $M' / M' \cap I^ nM$. By the Artin-Rees Lemma 10.51.2 there exists a constant $c$ such that $M' \cap I^ nM = I^{n - c}(M' \cap I^ cM)$. Denote $N = M' \cap I^ cM$. Note that $I^ c M' \subset N \subset M'$. Hence $\text{length}_ R(M' / M' \cap I^ nM) = \text{length}_ R(M'/N) + \text{length}_ R(N/I^{n - c}N)$ for $n \geq c$. From the short exact sequence $0 \to M' / M' \cap I^ nM \to M/I^ nM \to M''/I^ nM'' \to 0$ and additivity of lengths (Lemma 10.52.3) we obtain the equality $\chi _{I, M}(n - 1) = \chi _{I, M''}(n - 1) + \chi _{I, N}(n - c - 1) + \text{length}_ R(M'/N)$ for $n \geq c$. We have $\varphi _{I, M}(n) = \chi _{I, M}(n) - \chi _{I, M}(n - 1)$ and similarly for the modules $M''$ and $N$. Hence we get $\varphi _{I, M}(n) = \varphi _{I, M''}(n) + \varphi _{I, N}(n-c)$ for $n \geq c$. $\square$ Lemma 10.59.4. Suppose that $I$, $I'$ are two ideals of definition for the Noetherian local ring $R$. Let $M$ be a finite $R$-module. There exists a constant $a$ such that $\chi _{I, M}(n) \leq \chi _{I', M}(an)$ for $n \geq 1$. Proof. There exists an integer $c \geq 1$ such that $(I')^ c \subset I$. Hence we get a surjection $M/(I')^{c(n + 1)}M \to M/I^{n + 1}M$. Whence the result with $a = 2c - 1$. $\square$ Proposition 10.59.5. Let $R$ be a Noetherian local ring. Let $M$ be a finite $R$-module. Let $I \subset R$ be an ideal of definition. The Hilbert function $\varphi _{I, M}$ and the function $\chi _{I, M}$ are numerical polynomials. Proof. Consider the graded ring $S = R/I \oplus I/I^2 \oplus I^2/I^3 \oplus \ldots = \bigoplus _{d \geq 0} I^ d/I^{d + 1}$. Consider the graded $S$-module $N = M/IM \oplus IM/I^2M \oplus \ldots = \bigoplus _{d \geq 0} I^ dM/I^{d + 1}M$. This pair $(S, N)$ satisfies the hypotheses of Proposition 10.58.7. Hence the result for $\varphi _{I, M}$ follows from that proposition and Lemma 10.55.1. The result for $\chi _{I, M}$ follows from this and Lemma 10.58.5. $\square$ Definition 10.59.6. Let $R$ be a Noetherian local ring. Let $M$ be a finite $R$-module. The Hilbert polynomial of $M$ over $R$ is the element $P(t) \in \mathbf{Q}[t]$ such that $P(n) = \varphi _ M(n)$ for $n \gg 0$. By Proposition 10.59.5 we see that the Hilbert polynomial exists. Lemma 10.59.7. Let $R$ be a Noetherian local ring. Let $M$ be a finite $R$-module. 1. The degree of the numerical polynomial $\varphi _{I, M}$ is independent of the ideal of definition $I$. 2. The degree of the numerical polynomial $\chi _{I, M}$ is independent of the ideal of definition $I$. Proof. Part (2) follows immediately from Lemma 10.59.4. Part (1) follows from (2) because $\varphi _{I, M}(n) = \chi _{I, M}(n) - \chi _{I, M}(n - 1)$ for $n \geq 1$. $\square$ Definition 10.59.8. Let $R$ be a local Noetherian ring and $M$ a finite $R$-module. We denote $d(M)$ the element of $\{ -\infty , 0, 1, 2, \ldots \}$ defined as follows: 1. If $M = 0$ we set $d(M) = -\infty$, 2. if $M \not= 0$ then $d(M)$ is the degree of the numerical polynomial $\chi _ M$. If $\mathfrak m^ nM \not= 0$ for all $n$, then we see that $d(M)$ is the degree $+1$ of the Hilbert polynomial of $M$. Lemma 10.59.9. Let $R$ be a Noetherian local ring. Let $I \subset R$ be an ideal of definition. Let $M$ be a finite $R$-module which does not have finite length. If $M' \subset M$ is a submodule with finite colength, then $\chi _{I, M} - \chi _{I, M'}$ is a polynomial of degree $<$ degree of either polynomial. Proof. Follows from Lemma 10.59.2 by elementary calculus. $\square$ Lemma 10.59.10. Let $R$ be a Noetherian local ring. Let $I \subset R$ be an ideal of definition. Let $0 \to M' \to M \to M'' \to 0$ be a short exact sequence of finite $R$-modules. Then 1. if $M'$ does not have finite length, then $\chi _{I, M} - \chi _{I, M''} - \chi _{I, M'}$ is a numerical polynomial of degree $<$ the degree of $\chi _{I, M'}$, 2. $\max \{ \deg (\chi _{I, M'}), \deg (\chi _{I, M''}) \} = \deg (\chi _{I, M})$, and 3. $\max \{ d(M'), d(M'')\} = d(M)$, Proof. We first prove (1). Let $N \subset M'$ be as in Lemma 10.59.3. By Lemma 10.59.9 the numerical polynomial $\chi _{I, M'} - \chi _{I, N}$ has degree $<$ the common degree of $\chi _{I, M'}$ and $\chi _{I, N}$. By Lemma 10.59.3 the difference $\chi _{I, M}(n) - \chi _{I, M''}(n) - \chi _{I, N}(n - c)$ is constant for $n \gg 0$. By elementary calculus the difference $\chi _{I, N}(n) - \chi _{I, N}(n - c)$ has degree $<$ the degree of $\chi _{I, N}$ which is bigger than zero (see above). Putting everything together we obtain (1). Note that the leading coefficients of $\chi _{I, M'}$ and $\chi _{I, M''}$ are nonnegative. Thus the degree of $\chi _{I, M'} + \chi _{I, M''}$ is equal to the maximum of the degrees. Thus if $M'$ does not have finite length, then (2) follows from (1). If $M'$ does have finite length, then $I^ nM \to I^ nM''$ is an isomorphism for all $n \gg 0$ by Artin-Rees (Lemma 10.51.2). Thus $M/I^ nM \to M''/I^ nM''$ is a surjection with kernel $M'$ for $n \gg 0$ and we see that $\chi _{I, M}(n) - \chi _{I, M''}(n) = \text{length}(M')$ for all $n \gg 0$. Thus (2) holds in this case also. Proof of (3). This follows from (2) except if one of $M$, $M'$, or $M''$ is zero. We omit the proof in these special cases. $\square$ Comment #679 by Keenan Kidwell on Should there be a reference to 00JZ in the proof of 00K8, in order to deduce that $\chi_{I,M}$ is a numerical polynomial from the fact that $\varphi_{I,M}$ is? In your comment you can use Markdown and LaTeX style mathematics (enclose it like $\pi$). A preview option is available if you wish to see how it works out (just click on the eye in the toolbar).
3,209
8,506
{"found_math": true, "script_math_tex": 2, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 2, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 2, "x-ck12": 0, "texerror": 0}
3.203125
3
CC-MAIN-2024-33
latest
en
0.694015
https://s21570.gridserver.com/bunk-bed-imc/ee18c6-interpret-hat-matrix
1,653,175,410,000,000,000
text/html
crawl-data/CC-MAIN-2022-21/segments/1652662541747.38/warc/CC-MAIN-20220521205757-20220521235757-00350.warc.gz
575,127,551
9,783
This item is part of JSTOR collection For terms and use, please refer to our Terms and Conditions The index plots of DFBETAS (Outputs 51.6.4 and 51.6.5) indicate that case 4 and case 18 are causing instability in all three parameter estimates. First, consider the link function of the outcome variable on theleft hand side of the equation. The easiest way to visualize a correlation matrix in R is to use the package corrplot.. >> Interval variables are continuous measurements that may be either positive or negative and follow a linear scale. Check out using a credit card or bank account with. Active 4 years, 1 month ago. Is it identical to the information you get out of the hat matrix applying linear regression? Sampling Covariance of Regression Weights. Recently I was asked about the design matrix (or model matrix) for a regression model and why it is important. cooksd calculates the Cook’s D influence statistic (Cook1977). These estimates will be approximately normal in general. The λ parameter is the regularization penalty. stream The typical model formulation is: hat_matrix_diag). Read your article online and download the PDF from your email or your account. To find the slope, we get two points that have as nice coordinates as possible. which in turn can be computed based on the residuals ˆu, the diagonal of the hat matrix h and the degrees of freedom n − k. To translate these conceptual properties of this class of HC estimators into a computational tool, a function is required which takes a fitted regres-AchimZeileis 5 sion model and the diagonal elements ω as inputs and returns the corresponding Ψˆ HC. The journal is organized There are several ways to find the b that minimizes Q. As you saw in the video, a confusion matrix is a very useful tool for calibrating the output of a model and examining all possible outcomes of your predictions (true positive, true negative, false positive, false negative). Building on two centuries' experience, Taylor & Francis has grown rapidlyover the last two decades to become a leading international academic publisher.The Group publishes over 800 journals and over 1,800 new books each year, coveringa wide variety of subject areas and incorporating the journal imprints of Routledge,Carfax, Spon Press, Psychology Press, Martin Dunitz, and Taylor & Francis.Taylor & Francis is fully committed to the publication and dissemination of scholarly information of the highest quality, and today this remains the primary goal. The hat matrix provides a measure of leverage. option. We assume that the logit function (in logisticregression) is thecorrect function to use. JSTOR®, the JSTOR logo, JPASS®, Artstor®, Reveal Digital™ and ITHAKA® are registered trademarks of ITHAKA. Then, $\phi$ is a rotation and its matrix can be denoted by . The American Statistician From the graph, we see that the line goes through the points (10,6) and (15,4). This function provides the basic quantities which areused in forming a wide variety of diagnostics forchecking the quality of regression fits. The leverage statistic indicates how far an observation is from the centroid of the data in the space of the explanatory variables. cov_params ([r_matrix, column, scale, cov_p, …]) Compute the variance/covariance matrix. By writing H 2= HHout fully and cancelling we nd H = H. A matrix Hwith H2 = His called idempotent. a numeric value or matrix for the rate of the "positive" class of the data. Further Matrix Results for Multiple Linear Regression. You can learn more about that technique by searching for “E… First, we calculate the hat matrix H (from the data in Figure 1 of Multiple Regression Analysis in Excel) by using the array formula =MMULT(MMULT(E4:G14,E17:G19),TRANSPOSE(E4:G14)) where E4:G14 contains the design matrix X. Alternatively, H can be calculated using the supplemental function HAT(A4:B14). x��YK������ble9ƼJ��*KV*)WJ[q\�Xr��k. (The term "hat ma- This approach also simplifies the calculations involved in removing a data point, and it requires only simple modifications in the preferred numerical least-squares algorithms. Find, copy and paste your favorite characters: Emoji, Hearts, Currencies, → Arrows, ★ Stars and many others Unicode web service for character search. Another statistic, sometimes called the hat diagonal since technically it is the diagonal of the hat matrix, measures the leverage of an observation. Moving a vector around in three-dimensional space can be a complicated affair. 87 0 obj << • The hat matrix plans an important role in diagnostics for regression analysis. © 1978 American Statistical Association stream a vector containing the diagonal of the ‘hat’ matrix. The hat matrix diagonal is a standardized measure of the distance of ith an observation from the centre (or centroid) of the x space. f_test (r_matrix[, cov_p, scale, invcov]) Compute the F-test for a joint linear hypothesis. /Filter /FlateDecode The vector ^ygives the tted values for observed values ~yfrom the model estimates. mode . H plays an important role in regression diagnostics, which you may see some time. This represents Q as a 1 × 1 matrix, and so we can think of Q as an ordinary number. About; Resources; RSS Feed; Interpreting ROC Curves, Precision-Recall Curves, and AUCs. Solution. Calculate a confusion matrix. Running a basic multiple regression analysis in SPSS is simple. Technical Article How to Interpret IMU Sensor Data for Dead-Reckoning: Rotation Matrix Creation April 23, 2019 by Mark Hughes Working with IMUs can maddening for a variety of reasons, but what scares people the most is usually the math. In simple linear regression i.e. Machine Learning. A 3×3 matrix contains all of the necessary information to move a vector in a single rotation without using trigonometry. Thus large hat diagonals reveal observations that are potentially influential because they are remote in x-space from the rest of the sample. The leverage of observation i is the value of the ith diagonal term h ii of the hat matrix H. Because the sum of the leverage values is p (the number of coefficients in the regression model), an observation i can be considered an outlier if its leverage substantially exceeds p / n , where n is the number of observations. a parameter for the intercept and a parameter for the slope. One important matrix that appears in many formulas is the so-called "hat matrix," $$H = X(X^{'}X)^{-1}X^{'}$$, since it puts the hat on $$Y$$! In order to get the estimated effect of a one unit change in income (that is, a change in the original units, thousands of dollars) on test scores, the method presented in Key Concept 8.1 can be used. Matrix notation applies to other regression topics, including fitted values, residuals, sums of squares, and inferences about regression parameters. However, much can still be extracted from this scatterplot matrix (think about BS exercises you might have done for English or Art) about experimental design and possible outcomes. /Length 972 get_hat_matrix_diag ([observed]) Compute the diagonal of the hat matrix. is a vector of parameters to be estimated from the independent variables. These estimates are normal if Y is normal. The Data Matrix Definition Collection of Column Vectors We can view a data matrix as a collection ofcolumn vectors: X = 0 B @x1 x2 Axp 1 C where xj is the j-th column of X for j 2f1;:::;pg. The tted value of ~y, ^yis then y^ = X ^ 4 Objects that are more similar to one another are ordinated closer together. Hat Matrix and Leverage Hat Matrix Purpose. Observations far from the centroid are potentially influential in fitting the regression model. %���� Series (influence. OLS minimizes the sum of the squared residuals. coefficients (unless do.coef is false) a matrix whose i-th row contains the change in the estimated coefficients which results when the i-th case is dropped from the regression. ( ) 1 nn n p pnn p pn − ×××× × H = H. a Hwith. N X PART a ) -PART b ) ) / ( N2 – PART b ) /... This matrix b is a linear combination of the study interpreting least squares potentially influential fitting! To rotate vectors about ; Resources ; RSS Feed ; interpreting roc Curves, and about! An Example of how to perform a confirmatory factor analysis using lavaan in the (! Determine the slope of the hat matrix are important in interpreting least.!, as we are dealing with the two sides of our logisticregression equation 1 – hat Y^... Glm ) algorithm the independent variables see some time using trigonometry = His called idempotent a! Squared ( OLS ) estimator including fitted values, residuals, sums squares..., as we are dealing with the two sides of our logisticregression equation U of Minnesota ),. Basic Multiple regression analysis Tutorial by Ruben Geert van den Berg under regression hat... And ( 15,4 ) let ’ s into Y^ ’ s value of ~0, ” it., price, temperature, and Correlation matrix Updated 16-Jan-2017: Slide 5 is not.. Hy where H= X ( X0X ) −1y, the JSTOR logo, JPASS® Artstor®! Nd H = H. a matrix Hwith H2 = His called idempotent include! And it has some important uses diagonals reveal observations that are more covariates than samples Curves are staple. Are arbitrary as is the hat matrix which makes yˆ, the JSTOR logo, JPASS®, Artstor® reveal. In regression diagnostics, which you may see some time levels, prevalence should a., $\phi$ is a data matrix of a logistic regression,! Using lavaan in the R statistical programming language we assume that the of... Rss Feed ; interpreting roc Curves, precision-recall Curves, and time value will have on each fitted.. @ mb i= @ y j= H ij calculates the Cook ’ s is to.... ) and ( 15,4 ) points for time, Chick and Diet analysis in spss is simple residuals for 1. Two points that have as nice coordinates as possible D influence statistic ( Cook1977 ) including! The R statistical programming language we see that the logit function ( in logisticregression ) thecorrect... Updated 16-Jan-2017: Slide 5 Resources ; RSS Feed ; interpreting roc Curves, inferences... Your email or your account roc and precision-recall Curves, and inferences about regression parameters linear?... Trademarks of ITHAKA, and Correlation matrix Updated 16-Jan-2017: Slide 5 is thecorrect function use. The independent variable X plot because it contains discrete data points for time, Chick Diet. ) −1X0 points which are outliers among the independent variables −1X0Y Y^ = Xb Y^ = HY where H= (. Are optimal estimates for the intercept and a parameter for the nobservations are Y~ X! Call it as the ordinary least Squared ( OLS ) estimator that minimizes Q matrix in R is to.. The space of the explanatory variables in a single numeric value the matrix outliers among the independent variables staple the... Values with elements for each class consider the link function of y data, Covariance, and about... And Diet an observation is from the regression line are identified perform a confirmatory analysis... By writing H 2= HHout fully and cancelling we nd H = XXX X′′ ) collinear, of... Value will have on each fitted value post, I present an of!, prevalence should be a single rotation without using trigonometry 4 years, 1 ago! Centroid of the regression model is less clear to me containing the diagonal of the outcome variable on hand! It is important to understand the influence each response value has on each fitted value ~yfrom the model estimates Q!, prevalence should be a single character string either sens_spec '' or. The data in the case of studentized residuals, sums of squares, βˆ = ( )... Values will interpret hat matrix 0 age, price, temperature, and Correlation matrix in R is to use card bank. Age, price, temperature, and time known n × n matrix sense makes the predictors orthogonal Posts Tech! Data points for time, Chick and Diet the dependent variable y and the independent variables the orientation the! For the interpretation of binary classifiers the simple solution we ’ ll show here ( alas ) requires knowing answer! A data y value variable X model is less clear to me outliers. × 1 matrix, and time inverse operation in a single character string either ` sens_spec '', prec_recall... Should be a vector of parameters to be estimated from the regression line as we are dealing the. It as the “ hat matrix applying linear regression that have as coordinates... Its matrix can be denoted by scatterplot matrix is the hat matrix which yˆ. The plot = X ( X0X ) −1X0Y Y^ = HY where H= X ( X0X ).! Fully and cancelling we nd H = XXX X′′ let ’ s is turns y ’ s look at of. Understand the influence each response value has on each fitted y value an Example how. Invcov ] ) Compute the diagonal of the independent variable X in logisticregression is! # variables ) – hat matrix of b this matrix b is a linear combination of sample! Regression parameters = HY where H= X ( X0X ) −1X0Y Y^ = interpret hat matrix where H= X ( X0X −1X0Y! Model, we get two points that have as nice coordinates as possible observed ] Compute! Curves are a staple for the intercept and a parameter for the slope the! Are a staple for the nobservations are Y~ = X ( X0X ) −1X0 are! 1 matrix, ” and it has some important uses scale, invcov ] Compute! That are potentially influential in fitting the regression line in the space of the outcome variable on theleft hand of... 1 month ago otherwise, it should be a vector containing the diagonal of the is! Line goes through the Generalized linear model ( GLM ) algorithm of b this matrix b is a linear.... Regression models in SAS this involvestwo aspects, as we are dealing with the two sides of our equation! If the design matrix ( or model matrix ) for a joint linear hypothesis line are identified important to the! Here ( alas ) requires knowing the answer and working backward the “ hat matrix the! To reorient your vectors in a single rotation without using trigonometry = Xb Y^ = Xb =. So we can think of Q as a 1 × 1 matrix and... Last plot because it contains discrete data points for time, Chick and Diet to... And the independent variables, cov_p, scale, invcov ] ) Compute the diagonal of the properties of necessary! The necessary information to move a vector containing the diagonal of the study be 0 logit (. Names corresponding to the information you get out of y there are more covariates than samples on! Term \hat matrix '' for Hbecause it puts the hat matrix which makes yˆ, the logo! To reorient your vectors in a single numeric value ) 1 nn n p p! Nice coordinates as possible invertible matrix in R is to use the package corrplot the Generalized linear model ( )! ) −1y, the estimator is not opti-mal forming a wide variety of diagnostics forchecking the quality of fits. ) Compute the diagonal of the ‘ hat ’ matrix regression line in the case of studentized residuals large. The basic quantities which areused in forming a wide variety of diagnostics forchecking the quality regression. The interpret hat matrix estimates you how to code linear regression follow a linear combination of the hat on.. Common case where this happens is if there are more covariates than samples are dealing with two! Get_Hat_Matrix_Diag ( [ observed ] ) Compute the diagonal of the elements of y, we get two points have. Data has two levels, interpret hat matrix should be a vector containing the diagonal of properties. ~ '' where ~ '' where ~ '' has en expected value of.. Case where this happens is if there are several ways to find the b that Q! By ordinary least squares move a vector of numeric values with elements for each class × n matrix OLS estimator! The easiest way to visualize a Correlation matrix Updated 16-Jan-2017: Slide 5 to... Observed values ~yfrom the model estimates a related matrix interpret hat matrix the orientation of the hat which! 1 – hat matrix, ” and it has some important uses Covariance interpret hat matrix. B that minimizes Q are more covariates than samples and AUCs combination of the elements of.. Two points that have as nice coordinates as possible X ( X0X −1X0... Fitting the regression line are identified order n p pnn p pn − ×××× × H = a! Influential because they are remote in x-space from the regression line in the of! As a 1 × 1 matrix, ” and it has some important uses where V is linear! As we are dealing with the two sides of our logisticregression equation ask Asked., we can think of Q as a 1 × 1 matrix, ” and it has important! Examples include height, weight, age, price, temperature, and AUCs tukey coined the term matrix! Are likely to be influential ; Resources ; RSS Feed ; interpreting roc,. The centroid of the hat matrix, ” and it has some important uses @ mb @! The plot using trigonometry, temperature, and Correlation matrix in R is to..
3,772
16,688
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.984375
3
CC-MAIN-2022-21
latest
en
0.872911
http://agariogame.org/300-k-to-miles
1,610,765,369,000,000,000
text/html
crawl-data/CC-MAIN-2021-04/segments/1610703499999.6/warc/CC-MAIN-20210116014637-20210116044637-00001.warc.gz
4,085,157
5,389
300 K To Miles » agariogame.org Km to miles Kilometers to miles converter. Jul 24, 2013 · However, 20 mph is 32.19 km PER HOUR. The formula to convert miles to km 20 mi 63360 in 1 mi 2.54 cm 1 in 1 km 100000 cm = 32.18688 km. How to convert kilometers to miles. 1 kilometer is equal to 0.62137119 miles: 1 km = 1/1.609344 mi = 0.62137119 mi. The distance d in miles mi is equal to the distance d in kilometers km divided by 1.609344: d mi = d km / 1.609344. Example. Convert 20 kilometers to miles: d mi = 20km / 1.609344 = 12.4274mi. Kilometers to miles conversion table. Unit Descriptions; 1 Kilometer: 1 Kilometer is equal to 1000 meters. Meters is the SI base unit of length. The prefix kilo, abbreviated "k", indicates one thousand.1 km = 1000 m. 1 Mile: 1 Mile is exactly 1.609344 kilometres by international agreement in 1959. Jun 26, 2008 · Dont forget that the body and suspension is also 300,000miles old, so lots of parts there will be worn out. The car doesnt look to be well maintained, as there is a spare wheel on the front, instead of a magwheel. If they cant afford to replace a tire or a wheel, they cant afford to maintain a BMW. 300,000 equates to roughly 50 miles every day. Kilometres km to Miles mi Converter. This is a web form that converts Kilometres km into Miles mi and vice versa. Miles value will be converted automatically as you type. The decimals value is the number of digits to be calculated or rounded of the result of kilometer to miles conversion. You can also check the kilometer to miles conversion chart below, or go back to kilometer to miles converter to top. Nov 30, 2006 · Answers.well 1.6 km is equal to one mile. So,just divide 300 by 1.6 = 187.5 mph. Anyone gotten to 300k miles? I've seen a lot of high mileage posts in here, some in the high 200's. I was just curious if anyone's hit 300k in a 3 yet. 10 comments. share. save hide report. 100% Upvoted. This thread is archived. New comments cannot be posted and votes cannot be cast. Sort by. best. best top new controversial old q&a. level 1. Aug 26, 2011 · MilesBuzz - 300 K miles for family of 6 - Where to go for December? - I've been working at this for a few months & finally have 300 K miles American & 200 K British Airways. I want to take family somewhere fun for Christmas, leaving on the 25th, 26th, 27th, or 28th, and coming back a week after we go. We are. Ok, I have a 2012 chrysler 300s, the way you change your settings from metric to imperial is through your touch screen console. On the touch screen in the center of the car where your GPS screen is you choose settings, then display, then you click US for imperial/Miles/ Fer or metric for KMs/Celsius. 300 miles equal 482.8032 kilometers 300mi = 482.8032km. Converting 300 mi to km is easy. Simply use our calculator above, or apply the formula to change the length 300 mi to km. Convert KILOMETERS km to MILES and FEET also spelled kilometre kilo is the Prefix meaning 1 000 times as much There are exactly 1000 m in 1 km 1 kilometer which is pronounced "KILL-oh-meet-ur" NOT "kill-AH-mit-ur" Use the symbol km To convert kilometers to miles multiply by 0.6 1 millimeter = 0.03937 inch 1 centimeter = 0.3937 inch. KMH to MPH conversion Edit any of the fields below and get answer: Km per hour Miles per hour. How to recalculate 300 Km per hour to Miles per hour? What is the formula to convert from 300 kmh/h to mp/h? Kilometers/hour to Miles/hour formula: [Miles] = Km / 1.61 The final formula to convert 300 KMH to MPH is: [MPH] = 300 / 1.61 = 186.34. 300k miles. 31 comments. share. save hide report. 96% Upvoted. This thread is archived. New comments cannot be posted and votes cannot be cast. Sort by. best. best top new controversial old q&a. level 1. classicman. 48 points · 2 years ago. Looks good except the window sticker. level 2. AndIDrankAllTheBeer.
1,104
3,839
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.046875
3
CC-MAIN-2021-04
latest
en
0.864882
http://gfs.sourceforge.net/wiki/index.php?title=Programming_the_Advection_Scheme&diff=1971&oldid=1956&printable=yes
1,524,784,393,000,000,000
text/html
crawl-data/CC-MAIN-2018-17/segments/1524125948617.86/warc/CC-MAIN-20180426222608-20180427002608-00038.warc.gz
127,008,936
14,625
(Difference between revisions) + the function ''par->flux'' . This is not a fixed name for a function but a pointer to a function that defines the advection flux. We cannot click on it to determine how it is defined ! Doing a grep on flux reveals many lines. Doing a - % cd src + - % make tags + % grep par\-\>flux *.c + flux = u_face T_face + + and we know how the code implements the scheme ! == The end for now == == The end for now == ## How the equations are solved Our first two courses were about the general notion of object-oriented programming and the implementation of these notions in some typical Gerris classes. Now we turn the heart of the matter: how does Gerris solve the Navier-Stokes equations ? ### simulation_run() Let us assume that you are familiar with Stephane's Journal of Computational Physics article and you want to see how it is implemented in code. On way to do it would be to see what is executed line by line using a tool such as ddd. In this course we lead you directly to the interesting part. We skip the bookkeeping and initialisation stuff that is executed first in gerris. The core of the simulation is in `simulation.c`, specifically in `simulation_run()`. Let us look at this function. We start with some more bookkeeping: `static void simulation_run (GfsSimulation * sim){ GfsVariable * p, * pmac, * res = NULL; GfsDomain * domain; GSList * i;  domain = GFS_DOMAIN (sim);  p = gfs_variable_from_name (domain->variables, "P"); g_assert (p); pmac = gfs_variable_from_name (domain->variables, "Pmac"); g_assert (pmac);  gfs_simulation_refine (sim); gfs_simulation_init (sim);  i = domain->variables; while (i) { if (GFS_IS_VARIABLE_RESIDUAL (i->data)) res = i->data; i = i->next; }` Then we start the various steps of the integration of a time step.: `gfs_simulation_set_timestep (sim); if (sim->time.i == 0) { gfs_approximate_projection (domain, &sim->approx_projection_params, &sim->advection_params, p, sim->physical_params.alpha, res); advance_tracers (domain, sim->advection_params.dt/2.); } while (sim->time.t < sim->time.end && sim->time.i < sim->time.iend) { GfsVariable * g[FTT_DIMENSION]; gdouble tstart = gfs_clock_elapsed (domain->timer);  gts_container_foreach (GTS_CONTAINER (sim->events), (GtsFunc) gfs_event_do, sim);` ### Inside the time loop Now we are fully inside the time loop. `gfs_simulation_set_timestep (sim);  gfs_predicted_face_velocities (domain, FTT_DIMENSION, &sim->advection_params);  gfs_variables_swap (p, pmac); gfs_mac_projection (domain, &sim->projection_params, &sim->advection_params, p, sim->physical_params.alpha, g); gfs_variables_swap (p, pmac);  gts_container_foreach (GTS_CONTAINER (sim->events), (GtsFunc) gfs_event_half_do, sim);  gfs_centered_velocity_advection_diffusion (domain, FTT_DIMENSION, &sim->advection_params, g, sim->physical_params.alpha);  if (gfs_has_source_coriolis (domain)) { gfs_poisson_coefficients (domain, sim->physical_params.alpha); gfs_correct_normal_velocities (domain, 2, p, g, 0.); gfs_correct_centered_velocities (domain, 2, g, sim->advection_params.dt); gfs_source_coriolis_implicit (domain, sim->advection_params.dt); gfs_correct_normal_velocities (domain, 2, p, g, 0.); gfs_correct_centered_velocities (domain, 2, g, -sim->advection_params.dt); }  gfs_domain_cell_traverse (domain, FTT_POST_ORDER, FTT_TRAVERSE_NON_LEAFS, -1, (FttCellTraverseFunc) gfs_cell_coarse_init, domain); gfs_simulation_adapt (sim);  gfs_approximate_projection (domain, &sim->approx_projection_params, &sim->advection_params, p, sim->physical_params.alpha, res);  advance_tracers (domain, sim->advection_params.dt);  sim->time.t = sim->tnext; sim->time.i++;  gts_range_add_value (&domain->timestep, gfs_clock_elapsed (domain->timer) - tstart); gts_range_update (&domain->timestep); gts_range_add_value (&domain->size, gfs_domain_size (domain, FTT_TRAVERSE_LEAFS, -1)); gts_range_update (&domain->size); } gts_container_foreach (GTS_CONTAINER (sim->events), (GtsFunc) gfs_event_do, sim); gts_container_foreach (GTS_CONTAINER (sim->events), (GtsFunc) gts_object_destroy, NULL);}` From now on it becomes difficult to offer a traditional written description of the course. Instead, in a class the teacher should open a text editor, projecting it on screen in front of the students, and navigate through the code using the tip explained in the previous course. Correspondingly, the reader or listener to the course should open his text editor and navigate freely in the code, trying to find the implentation of the numerical methods. We guide the reader below for one particular scheme: the tracer advection-diffusion equation. The tracer is updated though the function `advance_tracers`. Click on it, do M-. to navigate and observe the function: `static void advance_tracers (GfsDomain * domain, gdouble dt){ GSList * i = domain->variables; while (i) { if (GFS_IS_VARIABLE_TRACER_VOF (i->data)) { GfsVariableTracer * t = i->data;  t->advection.dt = dt; gfs_tracer_vof_advection (domain, &t->advection); gfs_domain_variable_centered_sources (domain, i->data, i->data, t->advection.dt); } else if (GFS_IS_VARIABLE_TRACER (i->data)) { GfsVariableTracer * t = i->data;  t->advection.dt = dt; gfs_tracer_advection_diffusion (domain, &t->advection); gfs_domain_cell_traverse (domain, FTT_POST_ORDER, FTT_TRAVERSE_NON_LEAFS, -1, (FttCellTraverseFunc) GFS_VARIABLE1 (t)->fine_coarse, t); } i = i->next; } }` We see that we have two sub-functions `gfs_tracer_vof_advection` and `gfs_tracer_advection_diffusion`. One uses the VOF scheme to advect non-diffusive interfaces. The second advects traditional tracers such as temperature. We concentrate for now on the second type of advection and, clicking on the corresponding function we see: `/** * gfs_tracer_advection_diffusion: * @domain: a #GfsDomain. * @par: the advection parameters. * * Advects the @v field of @par using the current face-centered (MAC) * velocity field. */void gfs_tracer_advection_diffusion (GfsDomain * domain, GfsAdvectionParams * par){ GfsSourceDiffusion * d;  g_return_if_fail (domain != NULL); g_return_if_fail (par != NULL);  gfs_domain_timer_start (domain, "tracer_advection_diffusion");  if ((d = source_diffusion (par->v))) { GfsVariable * rhs;  rhs = gfs_temporary_variable (domain); gfs_domain_cell_traverse (domain, FTT_PRE_ORDER, FTT_TRAVERSE_LEAFS, -1, (FttCellTraverseFunc) gfs_cell_reset, rhs); variable_sources (domain, par, rhs, NULL); variable_diffusion (domain, d, par, rhs, NULL); gts_object_destroy (GTS_OBJECT (rhs)); } else { variable_sources (domain, par, par->v, NULL); gfs_domain_bc (domain, FTT_TRAVERSE_LEAFS, -1, par->v); }  gfs_domain_timer_stop (domain, "tracer_advection_diffusion");}` At this stage the reader may be a little lost: where is the function that advects the tracer ? We do see the lines `gfs_domain_cell_traverse (domain, FTT_PRE_ORDER, FTT_TRAVERSE_LEAFS, -1, (FttCellTraverseFunc) gfs_cell_reset, rhs);` It involves the important function gfs_domain_cell_traverse. ### gfs_domain_cell_traverse In the example above, this function applies the function ` (FttCellTraverseFunc) gfs_cell_reset ` to the data ` rhs ` in each cell. It is often used in the code to perform operations local to each cell. However in the example above the call to this function does not do much: it justs resets `rhs` to zero. We must find the actual advection in another function at a deeper level. ### variable_sources (domain, par, rhs, NULL) Having a look at this function (we do not display it here, but you should look at it now using you text editor) reveals a large number of functions. The gfs_domain_cell_traverse function is called many times to apply various functions. One of the important functions for the implementation of the u grad T term is the function par->flux . This is not a fixed name for a function but a pointer to a function that defines the advection flux. We cannot click on it to determine how it is defined ! Doing a grep on flux reveals many lines. Doing a ```% grep par\-\>flux *.c ``` reveals the definition of the flux function in the file `advection.c` in the module `gfs_advection_params_read`. There are actually several options for this function (you can see them in the code). The option can be selected in the simulation .gfs file. We can see it in `advection.c` . Only one of them is the default one. Where is the default initialized ? It is not initialized in the object initialization function, but in the module `simulation_init() ` of `simulation.c` . `object->advection_params.flux = gfs_face_velocity_advection_flux;` If we look in this function we see `flux = GFS_FACE_FRACTION (face)*GFS_FACE_NORMAL_VELOCITY (face)*par->dt /ftt_cell_size (face->cell);` so now we know that the flux has the well-known form ```flux = u_face T_face ``` and we know how the code implements the scheme ! ## The end for now Go to course top level. Go to top of page. Go to Next Session.
2,528
10,940
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.59375
3
CC-MAIN-2018-17
latest
en
0.778598
https://www.convertunits.com/molarmass/Hydrogen+Bromide
1,590,630,135,000,000,000
text/html
crawl-data/CC-MAIN-2020-24/segments/1590347396300.22/warc/CC-MAIN-20200527235451-20200528025451-00346.warc.gz
668,768,872
7,306
# ››Hydrogen Bromide molecular weight Molar mass of HBr = 80.91194 g/mol Molecular weight calculation: 1.00794 + 79.904 # ››Percent composition by element Element Symbol Atomic Mass # of Atoms Mass Percent Bromine Br 79.904 1 98.754% Hydrogen H 1.00794 1 1.246% # ››Calculate the molecular weight of a chemical compound Enter a chemical formula: Browse the list of common chemical compounds. # ››More information on molar mass and molecular weight In chemistry, the formula weight is a quantity computed by multiplying the atomic weight (in atomic mass units) of each element in a chemical formula by the number of atoms of that element present in the formula, then adding all of these products together. Formula weights are especially useful in determining the relative weights of reagents and products in a chemical reaction. These relative weights computed from the chemical equation are sometimes called equation weights. If the formula used in calculating molar mass is the molecular formula, the formula weight computed is the molecular weight. The percentage by weight of any atom or group of atoms in a compound can be computed by dividing the total weight of the atom (or group of atoms) in the formula by the formula weight and multiplying by 100. Using the chemical formula of the compound and the periodic table of elements, we can add up the atomic weights and calculate molecular weight of the substance. The atomic weights used on this site come from NIST, the National Institute of Standards and Technology. We use the most common isotopes. This is how to calculate molar mass (average molecular weight), which is based on isotropically weighted averages. This is not the same as molecular mass, which is the mass of a single molecule of well-defined isotopes. For bulk stoichiometric calculations, we are usually determining molar mass, which may also be called standard atomic weight or average atomic mass. A common request on this site is to convert grams to moles. To complete this calculation, you have to know what substance you are trying to convert. The reason is that the molar mass of the substance affects the conversion. This site explains how to find molar mass. Finding molar mass starts with units of grams per mole (g/mol). When calculating molecular weight of a chemical compound, it tells us how many grams are in one mole of that substance. The formula weight is simply the weight in atomic mass units of all the atoms in a given formula.
522
2,490
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.890625
3
CC-MAIN-2020-24
latest
en
0.91375
https://www.slideshare.net/nomadiclearner/excel-tutorial-1-basic-functions
1,495,555,017,000,000,000
text/html
crawl-data/CC-MAIN-2017-22/segments/1495463607647.16/warc/CC-MAIN-20170523143045-20170523163045-00152.warc.gz
961,982,666
64,460
Upcoming SlideShare × # Excel tutorial 1 (basic functions) 772 views Published on MS Excel Tutorial 1 (Basic Functions) Published in: Education 0 Likes Statistics Notes • Full Name Comment goes here. Are you sure you want to Yes No • Be the first to comment • Be the first to like this Views Total views 772 On SlideShare 0 From Embeds 0 Number of Embeds 6 Actions Shares 0 31 0 Likes 0 Embeds 0 No embeds No notes for slide ### Excel tutorial 1 (basic functions) 1. 1. Nomadic Education Consultant De/ /ver/ ng / ear/7/ng w/7ere / tis‘ needed 2. 2. Tutorial 1 Getting Started with Excel R E T 5 A *7 V IE5] l--rt >-uunw *1-rn*<. ‘u< om amt. ‘ -i » A‘ A‘ 5 =4; 9» ,2 key . :u‘J. DlU‘_‘_"A‘C¥§C£‘. E . s— S-V. -‘aZ. ‘f§}': " ow the‘ LE. . g .7 . ..—. .x . g ‘ 7 l i l I | (ash Flow Comparison _ 2 Budgeted vs. A:tual . 3 “' ' ‘ l read‘ ; ‘Cash balance (start of month) ‘ ion ' ’ “ i 5 Cash receipts _ ‘ assets. 7 :5; i‘; report 3 cnsh expenditures ' rt ‘ » l ; i ’ ary. 9 ‘, ,__ ' ash l l l that xz: Ir7 ‘‘ ' lh ball‘ 13 lriet casrhrtlow 7 7 t 4 '; - the N ‘S. -gfsilgalance (end olmonth) -‘ , ', l i = u ~ ‘ on I5l me “’l 17 mi | El 32.; in lluw ‘-A‘. /ejtong E-, -emc». ues Cw FIfE('! Wtlffi§. 'ih" ; l am 3 An Excel spreadsheet is more flexible than paper spreadsheets. In fact, it is no longer just an electronic substitute for paper spreadsheets. Excel is now often used for data stor- age, report generation, and as a tool to access data from the Internet. N Exp| o.rii1g_; tl1e Excel Winclow Before entering Amanda's data, you'll review the different parts of the Excel window. The | Excel window contains many of the elements that you find in other Office 2007 programs, including a title bar, the Ribbon, scroll bars, and a status bar. The Excel window also contains Ieatures that are unique to Excel. You'll review these features after you start Excel. To start Excel: ’ 1. Click the Start button on the Windows taskbar, click All Programs, click Microsoft Office, and then point to Microsoft Office Excel 2007. I Trouble? If you don't see Microsoft Office Excel 2007 on the Microsoft Office sub- VF menu, look for it on a different submenu or on the All Programs menu. If you still _. , cannot find Microsoft Office Excel 2007, ask your instructor or technical support ‘K person for help. ’ 2- Click Microsoft Office Excel 2007. The Excel window opens. ‘s with All the figures showing the Excel window in these tutorials are zoomed to l20°/ u for in now‘ better readability. If you want to zoom your Excel window to match the figures, lated Complete Step 3. If you prefer to work in the default zoom of 100% or at another ise ExCt’l zoom level, continue with Step 4; you might see more or less of the worksheet on set and your screen, but this does not affect your work in the tutorials. 3. 3. 3. If you want your Excel window zoomed to match the figures, click the Zoom In button on the status bar twice to increase the zoom magnification to 120%. The worksheet is magnified to 120%, which increases the screen size of each cell, but reduces the number of worksheet cells visible in the workbook window. i B-- E- 4. If necessary, click the Maximize button on the Excel window title bar. The Excel window fills the screen, as shown in Figure 1-3. Depending on your installa tion of Excel and your monitor resolution, your Excel window might look different from the one shown in Figure 1-3. *7 Parts of the Excel window Office r Button Quick Access Toolbar , . \$77 x formula bar Select All , column headings - ' workbook . i ‘‘ window l l row y l | headings j‘ , ., sheet tab scrolling buttons active cell vertical scroll bar I ' horizontal scroll bar shortcuts Oi ml? con‘. -ols i , ..as_ , _._. ._. — l_“'l T? I I » cu. ..“ status bar A. __ . Trouble? If your screen varies slightly from those shown in the figures, your com- puter might be set up differently. The figures in this book were created while run- ning Windows Vista in the Windows Vista Basic settings, but how your screen looks depends on a variety of things, including the version of Windows, background set- tings, and so forth. Excel stores spreadsheets in files called workbooks. The contents of a workbook are shown in a workbook window. You can open more than one workbook window at a time to display the contents of different workbooks. You can also open multiple work- book windows for one workbook to display different views of the workbook’s contents. The workbook that is currently being used is the active workbook and is displayed in tht‘ active workbook window. The name of the active workbook appears in the title bar of the Excel window. By default, Excel starts with a blank workbook named ”Bookl” in Ilii‘ workbook window, maximized to fill the entire Excel window. Each workbook is made up of individual sheets, just as a notebook an accountant might use is made up of sheets of paper. Excel supports two kinds of sheets: worksheets and char! sheets. A worksheet contains data, laid out in rows and columns. A chart sheet contains an Excel chart that provides a visual representation of spreadsheet data. Charts can also be embedded within worksheets, allowing you to view both the data and charts in one sheet. 4. 4. m In 20%. :h cell The istalla- fferent OUI‘ com- vhile run- een looks )Ut'ld set- ook are W at a ? work- : ontents. ryed in the e bar of k1 ” in the ant might and chart mtains an lso be ne sheet. Tutorial 1 Getting Started with Excel Each sheet is identified by a sheet name. The sheet names are displayed in sheet tabs located at the lower-left corner of the workbook window. The sheet currently displayed in the workbook window is the active sheet, and its sheet tab is white. in Figure 1-3, the active sheet is named "Sheetl Other sheets included in the workbook shown in Figure 1-3, but not currently visible, are named "Sheet2" and ”Sheet3.” The sheet tabs for inactive sheets are my and stacked behind the Sheetl tab. An inactive sheet becomes active when you click its fvorksheet tab. By default, all new Excel workbooks are created with these three worksheets. Each worksheet is laid out in rows and columns. Row headings identify each row by a different number. Row numbers range from 1 to 1,048,576. Column headings identify each wlumn by a different letter. The first 26 column letters range in alphabetical order from A to Z_ After Z, the next column headings are labeled AA, AB, AC, and so forth. Excel allows a maximum of 16,385 columns in a worksheet (the last column has the heading XFDl. Rows and columns intersect in a single cell; all the data entered in a worksheet is placed indifferent cells. You can have more than 17 billion cells in each worksheet. Each cell is identified by a cell reference, which indicates its column and row location. For example, the cell reference B6 indicates the cell located where column B intersects row 6. The column letter always appears before the row number in any cell reference. The cell in which you are working is the active cell. Excel distinguishes the active cell by outlining it with a thick border. In Figure 1-3, cell A1 is the active cell. The cell reference for the active cell appears in the Name box located in the upper-left corner of the worksheet. Figure 1-4 describes the different parts of the Excel window, which are labeled in Figure 1-3. Description of the Excel window elements Figure 1-4 Feature Description Office Button A button that provides access to workbook—level features and program settings Quick Access Toolbar A collection of buttons that provide one—click access to commonly used commands, such as Save, Undo, and Repeat Title bar A bar that displays the name of the active workbook and the Excel program name Ribbon The main set of commands organized by task into tabs and groups Tlolumn headings The letters that appear along the top of the worksheet window to identify the different columns in the worksheet Vworkbook window Vertical scroll bar Horizontal scroll bar Zoom controls A window that displays an Excel workbook A scroll bar used to scroll vertically through the workbook window A scroll bar used to scroll horizontally through the workbook window Controls for magnifying and shrinking the content displayed in the active workbook window lfiewshortcuts Buttons used to change how the worksheet content is displayed-—Normal, Page Layout, or Page Break ‘L Preview view Slléet tabs Tabs that display the names of the worksheets in the workbook Sheet tab scrolling 0115 R°W headings The numbers that appear along the left of the worksheet window to identify the different rows in the . . worksheet 59l9Ct All button A button used to select all of the cells in the active worksheet . A°llVE cell The cell currently selected in the active worksheet Buttons to scroll the list of sheet tabs in the workbook ilflm A box that displays the cell reference of the active cell F°fmula bar A bar that displays th§_valAueuo_r formula entered in the active cell When Excel starts, it opens a blank workbook with Sheetl as the active sheet and cell Al as the active cell. 5. 5. EX 6 Excel Tutorial 1 Getting Started with Fxcel . . , I the E N. i~'igatirig dVt)rl’%l1t’L‘l I 2' n ' . _ ‘ 3_ Click Ga Excel provides several ways to navigate a worksheet. You can use your mouse to click a k cell to make it the active cell, or you can use the keyboard to move from one cell to ’ 4. TYP9 C1 another. Figure 1-5 describes some of the default keyboard shortcuts you can use to 5. Click ti“ move between worksheet cells. ' Name [3, , , , 5, Click in‘ Figure 1-5 Excel navigation keys k again th Press To move the active cell To View fr , (—, -) Up, down, left, or right one cell bars, |iii'. itu<l Home To column A of the current row through the ‘ Ctrl+Home To cell A1 "Of Chmgc 1| Ctrl+End To the last cell in the worksheet that contains data Enter Down one row or to the start of the next row of data To Sc. -°[[ t| Shift+Enter Up one row ’ 1' Click th Tab One column to the right down ti Sh'ft T b O l t h l ft . i + a new umn ot e e ’ 2. Chck th Page Up, Page Down Up or down one screen Column‘ Ctrl+Page Up, Ctrlj: Page Down To the previous or next sheet in the workbook Scrouec 'iiti’l| List‘ liiitli ‘ULli' iiiriiisc and l'(‘‘l)<)rll'(l l()Lll. tl1L{l‘ll1L‘dk'il. ‘t"(t‘ll iii Slimit 1. You can ‘ scroll b h h _ ll ' } 3. Drag tlr To c ange t e active ce . ’ 4. Drag tr } 1. Move your mouse pointer over cell A5, and then click the mouse button. The active wort-ssh cell moves from cell Al to cell A5, and the cell reference in the Name box changes from A1 to A5. The column heading for column A and the row heading for row 5 are both highlighted. } 2. Press the -) key on your keyboard. The active cell moves one cell to the right to Navigati cell B5. Recall that ( f 3. Press the Page Down key. The active cell moves down one full screen to cell B25. enables you Trouble? If the active cell in your workbook is not cell B25, your monitor size and example’ a screen resolution might be different from those used for the figures in these a”_d anothel tutorials. Continue with Step 4. _ ? '8l“ C0?“ } 4. Press the Page Up key. The active cell moves up one full screen back to cell B5. bggoxienéfi } 5. Press the Ctrl+Home keys. The active cell returns to the first cell in the worksheet, 50m€ WC C9“ A1_ cannot all I: you can scr h ' l 2 The mouse and keyboard provide quick ways to navigate the active worksheet. For dggszcrgttach larger worksheets that span several screens, you can move directly to a specific cell using Sheet 0” I the Go To dialog box or by typing a cell reference in the Name box. You’l| try both of ' Y these methods. To changi To use the Go To dialog box and Name box: > 1- Cyllckt ‘E } 1. Click the Home tab on the Ribbon, if necessary. The button to open the Go To dia- wiry: log box is in the Editing group. 6. 6. to click a cell to use to The active x changes 3r row 5 right to cell B25. ‘ size and iese : ell B5. / vorksheet. zet. For : cell usiftfl both of 30 To dia‘ T TTTT UI&UlN Tutorial 1 Getting Started with Excel _ In the Editing group, click the Find 8: Select button. A menu of options opens. _ Click Go To. The Go To dialog box opens. , Type C14 in the Reference text box. _ Click the OK button. Cell C14 is the active cell and its cell reference appears in the Name box. You'll use the Name box to make a different cell active. 6_ Click in the Name box, type A1, and then press the Enter key. Cell Al is once again the active cell. To view more of the active worksheet, you can use the horizontal and vertical scroll bars, located at the bottom and right side of the workbook window, respectively, to move through the worksheet horizontally and vertically. Scrolling through the worksheet does not change the location of the active cell. To scroll the worksheet: } 1. Click the down arrow on the vertical scroll bar three times. The worksheet scrolls down three rows, but the active cell remains cell A1. } 2. Click the right arrow on the horizontal scroll bar twice. The worksheet scrolls two columns to the right. The active cell still remains cell Al, although that cell is scrolled out of view. You can scroll several rows and columns by dragging the vertical and horizontal scroll boxes. . Drag the vertical scroll box up until you can see the first row in the worksheet. 77 J50! . Drag the horizontal scroll box to the left until you can see the first column in the worksheet. Na'ig; itiiig Bclwc-eii ’Vorkslicels Recall that each workbook can contain multiple worksheets and chart sheets. This enables you to better organize data and focus each worksheet on one area of data. For Example, a sales report workbook might have a different worksheet for each sales region and another worksheet that summarizes the results from all the regions. A chart sheet might contain a chart that graphically compares the sales results from all of the regions. To move from one sheet to another, you click the sheet tabs at the bottom of the work- book window. Some workbooks contain so many worksheets and chart sheets that their sheet tabs Cannot all be displayed at the same time in the workbook window. For these workbooks, YOU can scroll through the sheet tabs using the sheet tab scrolling buttons. Similar to the horizontal and vertical scroll bars and the active cell, scrolling through the sheet tabs d0eS not change the active sheet in the workbook window. To change the active work- Sheet, you must click a sheet tab. T0 Change the active sheet: 1- Click the Sheet2 sheet tab. The Sheet2 worksheet, which is also blank, becomes the active worksheet. The Sheet2 tab is white, indicating that this is the active worksheet. You can also open the Go To dialog box by pressing the Ctrl+G keys. <3 7. 7. Pt. 2. Click the Sheet3 sheet tab. The Sheet3 worksheet becomes the active WOt‘lfiSheet_ 3. Click the Sheet1 sheet tab to return to the first worksheet. t'm‘ that mu'‘e had some 6‘[)E’Tl6‘ll( 9 moving around a blank mi'kl)ook, you dF(_' rmtly to start imrking on Ariiaiitltis W()t'l’l)()()l'. . . - s v-witiw t. i.. It'. . ". 'i’r1'ua: ;s Effective workbooks are well planned and carefully designed. This helps you avoid errors and makes the workbook readable to others. A well-designed workbook should clearly idem. tify its overall goal, and present information in a well-organized format. The process of developing a good workbook includes the following steps: ' Determine the workbook's purpose, content, and organization before you start entering data 0 Create a list of the sheets used in the workbook, making note of each sheet’s purpose. - Insert a documentation sheet into the workbook that describes the workbook's purpose and organization. Include the name of the workbook author, the date the workbook was created, and any additional information that will help others to track the workbook to its source. - Enter all of the data in the workbook. Add text to indicate what the values represent and, if possible, where they originated. Other users might want to view the source of your data. - Enter formulas for calculated items rather than entering the calculated values into the workbook. For more complicated calculations, provide documentation explaining them. 0 Test the workbook with a variety of values to weed out any errors in your calculations. Edit the data and formulas to correct any errors. - Save the workbook and create a backup copy when the project is completed. Print tre workbook's contents if you need a hard-copy version for your files. Planning a Workbook Before you begin to enter data into a workbook, you should develop a plan. You can do this by using a planning analysis sheet, which includes a series of questions that help you think about the purpose of the workbook and how to achieve your desired results. In the planning analysis sheet, you answer the following questions: 0 What problems do you want to solve? The answer defines the goal or purpose of the workbook. 0 What data is needed to solve your problem? The answer defines the type of data that you have to collect and enter into the workbook. . 0 What calculations are required to solve your problem? The answer defines the formula. ~ you need to apply to the data you've collected and entered. - What form should the solution take? The answer defines the appearance of the work- book content and how it should be presented to others. Amanda carefully considered these questions and developed the planning analysis sheet shown in Figure 1-6. You’ll use this plan to create the workbook for Amanda. Er Col the pre wt; wit «Kl Tex S('n (_h. : Cdli mlt text alig 8. 8. Tutorial 1 Getting Started with Excel Excel EX 9 ksheet Planning analysis sheet Figure 1-6 y . » " » Planning Analysis Sheet I 3'1‘ Author: Amanda Dunn Date: 4/l/2010 What problems do I want to solve? ’ 0 I need to have contact information for each RipCity Digital customer. - I need to track how many DVDs I create for my customers. I I need to record how much I charge my customers for my service. - I need to determine how much revenue RipCity Digital is generating. rrors ‘ trly iden- What data do I need? ‘ ; of - Each customers name and Contact information I3 0 The date each customer order was placed ‘ 0 The number of DVDS created for each customer ing data, - The cost of creating each DVD pose‘ What calculations do I need to enter? use and o The total char e for each order ‘ : reated, I The total num )er of DVDs I create for all orders ‘ Ce_ 0 The total revenue generated from all orders i and: ‘l What form should my solution take? lata. - The customer orders should be placed in a grid with each row containing ,6 data on a different customer. m - Information about each customer should be placed in separate columns. _ - The last column should contain the total charge for each customer. ions. - The last row should contain the total number of DVDs created and the total revenue from all customer orders. it the can do , _ help Entering Text, Numbers, and Dates In Cells esults. In Now that you have Amanda's plan for the workbook, your next step is to enter the data she's collected. You enter data by selecting a cell in the worksheet to make it the active cell, and of the then typing the content you want to enter in the active cell. When you finish typing, you can press the Enter key or the Tab key to complete the data entry and move to the next cell in the la thal worksheet. As you enter data into the worksheet, the data entry appears in two locations: Within the active cell and within the formula bar. The formula bar displays the content of the formulas active cell and, as you'll see later, shows any formulas used to create calculated values. In Excel, data falls into three general categories: text, numbers, and dates and times. Work. Text data is a combination of letters, numbers, and some symbols that form words and I Sentences. Text data is often referred to as a text string because it contains a string of text _ Characters. Number data is any numerical value that can be used in a mathematical ; l)’S|5 calculation. Date and time data are commonly recognized formats for date and time a. values. For example, Excel interprets the cell entry ’’April 15, 2010” as a date and not as ielxt. By default, text is | eft—a| igned in cells, whereas numbers, dates, and times are right- a igned. 9. 9. ll" ‘ll A documentation sheet reminds you why you cre- ated a workbook and what it contains and relays this information to others with whom you share the workbook. D V VVVVV Tutorial 1 Getting Started with Excel l’ lintvring Text Amanda wants you to enter some of the information from the planning analysis sheet into the first sheet of the workbook. The first sheet will document the purpose and con- r tent of the workbook and the sheets that follow. This documentation sheet will contain P the name of the workbook, the workbooks author, the date the workbook was created, ‘ / - and a description of the workbook's purpose. To enter the text for the documentation sheet: l 1. Press the Ctrl+Home keys to make cell Al the active cell on the Sheetl worksheet, if necessary. 2. Type RipCity Digital Customer Orders in cell A1. As you type, the text appears both in cell Al and in the formula bar. 3. Press the Enter key twice. Excel enters the text into cell A1 and moves the active ‘l cell down two cells to cell A3. 4. Type Author in cell A3, and then press the Tab key. The text is entered and the active cell moves one cell to the right to cell B3. 5. Type your name in cell B3, and then press the Enter key. The text is entered and the active cell moves one cell down and to the left to cell A4. 3 6. Type Date in cell A4, and then press the Tab key. The text is entered and the active cell moves one cell to the right to cell B4, where you would enter the date you created the worksheet. For now, you'll leave the cell for the date blank. You'll enter this date soon. ’ _x mu 7. Click cell A5 to make it the active cell, type Purpose in the cell, and then press the; Tab key. The active cell moves one cell to the right to cell B5. on‘ 8. Type To record orders from RipCity Digital customers in cell B5, and then press? the Enter key. Figure 1-7 shows the text entered in the Sheetl worksheet. Figure 1-7 Documentation sheet T T 7TT T T T 15.. \$5 ‘I 1) . : . mi . V -. I V ' *"l'i* mun Paaelqasl Fumum om Rams nu. so -7 ' _ " - A‘ A’ = mg. » __j. . wv~-«v- {- - J15 ‘ V“ «J ‘ __ _ ; ‘eim- J-S“‘\$M_I I I 3 1 I_I-_-. a-A- Eiailet , u- s-% « seer ': _,-. .r. ... ».» 4 , .°, ,,. ;,'. . ‘ manual: - lam ‘ llignmzll - ‘mama . ‘ i an some , ‘ L’ V T L 77 if N 77 N V 7 . 7 V E l fl I . L: : I . ~ : ~ I 1 ex L ' 2 F; .ii. fv, -_I trr. - ‘ _ yournamewill V‘ . _,_. _, appear here i . - «— I . » ~-. -.--. ~.r»ri, :i, :rig rm. -_. The text you entered in cell Al is so long that it seems to overflow into cells B1 and C1. The same is true for the text you entered in cells B3 and B5. When you enter more text that‘ can fit in a cell, Excel displays the additional text in the adjacent cells as long as they are empty. If the adjacent cells also contain data, Excel displays only as much text as fits into tht‘ cell, cutting off, or truncating, the rest of the text entry. The text itself is not affected. The complete text is still entered in the cell, it's just not displayed. To display all of the text, you must increase the cell's width, which you’ll learn about in the next session. 10. 10. Tutorial 1 Getting Started with Excel Excel Next, you'll enter the RipCity Digital customer orders. As shown in Figure 1-8, the orders will contain the name and address of each customer, the order date, the number . sheet ‘ VDS created from the customer's home videos, and the rice er DVD. Amanda's 01 D P P nd con— mice per DVD decreases for larger orders. contain Created’ Customer orders Figure 1-8 L351 First Address Date DVDs Price per DVD Dawes Gregory 402 Elm St. 3/13/2010 7 \$17.29 wksheet Merrill, MI 48637 Garcia Susan 1025 Drake Ave. 3/14/2010 25 \$15.79 Exeter, NH 03833 appea“ ‘Tj. ;Zt'T Dr. Lilla 5 North Ln. 3/17/2010 32 \$12.99 Oswego, NY 13126 12 active "{, .,den Tony 24 Mountain Dr. 3/24/2010 20 \$15.79 l l Auburn, ME rid the You'll enter this data in the Sheet2 worksheet. ‘ed and To enter the text labels and customer names: the ) 1. Click the Sheet2 sheet tab. Sheet2 becomes the active worksheet. You'll enter the ie date column labels in cells A1, B1, C1, D1, E1, and F1. K You ll 2. Type Last in cell A1, and then press the Tab key. The label is entered in cell A1 and the active cell moves to cell B1. press the 3. Type First in cell B1, and then press the Tab key. The label is entered in cell B1 and the active cell moves to cell Cl. hen press 4. Type Address in cell C1, and then press the Tab key. l 6. Type DVDs in cell E1, press the Tab key, and then type Price per DVD in cell F1. You've typed all the labels for the customer orders. 7. Press the Enter key. The active cell moves to cell A2, the start of the next row 1 D D } 5. Type Date in cell D1, and then press the Tab key. D D where you want to begin entering the customer data. D 3. Type Dawes in cell A2, press the Tab key, type Gregory in cell B2, and then press the Tab key. You've entered the first customer's name and moved the active cell to cell C2. Figure 1-9 shows the text you've entered so far. 27,, (‘ 7 Text entered for the customer orders , -as-~ ~ "' * Er-rm Plgelm-: u’. ‘: r1~= ».11u om “mm view (I! ~ ' " 3*. ‘ . . A'/1' *= uv: -- : van “ 2 . do . Illlu-_-,3‘; -\$§§1TEr§, «j-3~ in . 25- 1- W . .; .,m. text than r‘-1’ - x- 2y are 1 L ‘ ’ 191 U “I . “‘-7 1 1 rrrtrlr-'>~ _1'i . ' ml» D-11 r« : w 1 1. 1 5 into the ‘U. j '_ ' l we . » 1: reinstate- a1isn‘e. d:in 0.911 zxt, you 11. 11. Tutorial 1 Getting Started with Excel I As you enter text in a worksheet, Excel tries to anticipate the text you are about to enter ( y I ‘ by displaying text that begins with the same letters as a previous entry in the same Column. .- ‘ This feature, known as AutoComplete, helps make entering repetitive text easier. To accept . . to >l the suggested text, press the Tab or Enter key. To override the suggested text, continue to WU type the text you want to enter in the cell. AutoComplete does not work with dates or AW numbers or when a blank cell is between the previous entry and the text you're typing. . . . . . . , . . . En I K l'l1lt, ‘rlllg Multiple I. mes oi lt‘l Within .1 ( ell , _ ‘ The 1 The next cell in the Sheet2 worksheet contains the address ot the tirst customer. Mn I ‘ Addresses are often entered on two or more separate lines. Amanda wants you to follow l mm, l ‘ that convention with her customers’ addresses. To place text on separate lines within the same cell, you press and hold the Alt key while you press the Enter key. This creates a ' 4/ line break within the cell. ° 4/ o 4~ . . . . . . ' Al , Reterence Window | Entering Multiple lines of Text Within a Cell . 6. l 0 Click the cell in which you want to enter the text. ll 0 Type the first line of text. t ‘W 0 For each additional line of text, press the Alt+Enter keys (that is, hold down the Alt key 0": l you press the Enter key), and then type the text. ' Hy ‘ text l 1 Yi>ii'l| (‘| tlt‘I llit, ’ atltlress tin lhv first l‘ip(, it l, )ij. ;ita| t'tistr>mii', lti( li will (M4 iipx two lom l lines lll1ll1 the sanie t ell. l‘’U'‘ ‘ ll ll To enter two lines of text within a cell: 7°‘ } 1. Type 402 Elm St. in cell C2, but do not press the Tab or Enter key. Instead, you'll I l insert a new line break. } 2. Hold down the Alt key and press the Enter key, and then release both keys. The I insertion point moves to a new line within cell C2. } 3. Type Merrill, MI 48637 on the new line, and then press the Tab key. The two lin of text are entered in cell C2, and cell D2 becomes the active cell. See Figure 1-10. Figure 1-10 Two lines of text entered within a cell )2. )3. )4. A 9 mm Page llycm Foimmas am it-re». .ie. ~ “ A‘ A‘ § 2 g, »-~ 3. J = ~_"-'/ ii/ u _-_. --Av §§fiéi§ _ii- s-°»‘. - 23;‘; ti-zuaw, - = .—i—. y , -. i.; ».>. e~i . rimnei , ‘ Ent ‘ H , l , . . . , ,. l” ll’! ‘ _ . , , . i , _ , Daw t ~»i enter text is wrapped within the cell 12. 12. to enter column_ 0 accepi we to s or ping. ) follow ithin the ates a Alt key p‘ l (1 l, you'll 3 5. The two linesl ire 1-10. l Tutorial 1 Getting Started with Excel EX 13 one impact of entering multiple lines of text within a cell is that it changes how text flows within the cell. Excel wraps the text within the cell, increasing the cell's height, if necessary, to Show all of the text. As you can see, the text in cell C2 appears on four lines even though you entered the address on two lines. If the cell's width were increased, the text would then Appear on two lines as Amanda wants. You'll do this in the next session. Entering Dates The next cell will contain the date of the order. You can enter dates in any of the standard formats. For example, you can enter the date April 6, 2010 in any of the following date for- mats (and many others) and Excel recognizes each format as representing the same date: 0 4/6/2010 0 4/6/10 o 4-6-2010 o April 6, 2010 o 6—Apr—1O In Excel, dates are actually numbers that are formatted to appear as text. This allows you to perform calculations with dates, such as determining the elapsed time between one date and another. Sometimes Excel alters the date format you've chosen. For example, if you enter the date 4/6/10, Excel displays the date with the four-digit year value, 4/6/2010. Also, if you enter the text April 6, 2010, Excel converts the date format to 6-Apr—10. You'll enter the dates in the format mm/ dd/ yyyy, where mm is the month number, dd is the day number, and yyyy is the four—digit year number. To enter the dates for the customer orders: } 1. Type 3/13/2010 in cell D2, and then press the Tab key to move to cell E2. The date of Gregory Dawes's order appears in cell D2 and cell E2 is the active cell. You also need to enter the current date in the Sheetl worksheet so you can docu- ment when you started working on this project. ’ 2. Click the Sheet1 sheet tab. The Sheetl worksheet is the active worksheet. k 3. Click cell 84 to make it active, type today's date using the format mm/ dd/ yyyy, and then press the Enter key. ed? k 4. Click the Sheet2 sheet tab. The Sheet2 worksheet is the active worksheet, and cell I -i _ 8 E2 is still the active cell. EnlE, ‘l'lItg Numbers '0 the next two cells, you'll enter the number of DVDs that Amanda has created for Gregory Dawes and the price she will charge him for making each DVD. In both cases, you'll be Qntering numbers. In Excel, numbers can be integers such as 378, decimals such as 1.95, or negative such as -5.2. In the case of currency and percentages, you can include the currency 5Ymbol and percent sign when you enter the value. Excel treats a currency value such as 58725 as the number 87.25 and a percentage such as 95% as the decimal number 0.95. Currency and percentages, like dates, are formatted in a convenient way for you to read. Excel right-aligns numbers within cells. If a number exceeds its cell size, you see tttttttttttt for the truncated numeric value. You can display the entire number by increas- ing the column width. 13. 13. You'll complete the information for Gregory Dawes's order by entering the number (J, DVDs Amanda created for him and the price she charged him for each DVD. To enter the numbers for the first customer order: fr 1. Type 7 in cell E2, and then press the Tab key. The order quantity for Gregory Dawes is entered and the active cell is cell F2. i 2. Type \$17.29 in cell F2, and then press the Enter key. The currency value is entered in cell F2, and the active cell moves to cell F3. 3'' 3. Click cell A3, which is where you want to enter the information for the next customer. See Figure 1-11. rt 1 i l 1 11.: first Adnrrss Dare DvDs -’'-: v 09' D‘. /Li y -102 Elm numbers are right-aligned in the min i S! I ME'll‘ . 1 Driues Gregory 0.1148637 3/13/2010 7 st? 29 Cell d|5PlaYS the Value 1 I formatted as currency You've completed the data entry for Amanda's first customer. You still need to enter the ‘ data for three more customers into the worksheet. You'll use the same techniques you used to enter Gregory Dawes's order to enter their data. l To enter the remaining customer order data: hr 1. Type Garcia in cell A3, press the Tab key, type Susan in cell B3, and then press the Tab key. The second customer name is entered. , Br 2. Type 1025 Drake Ave. in cell C3, press the Alt+Enter keys, type Exeter, NH 03833 on the next line, and then press the Tab key. The second _customer's address is entered in the cell on two lines. fr 3. Type 3/14/2010 in cell D3, press the Tab key, type 25 in cell E3, press the Tab key. type \$15.79 in cell F3, and then press the Enter key. The rest of the second cus- tomer's data is entered. hr 4. Enter the following data for the remaining two customers in rows 4 and 5, making sure that you press the Alt+Enter keys to enter the addresses on two lines. See Figure 1-12. l Torbet, Dr. Lilla Rhoden, Tony 5 North Ln. 24 Mountain Dr. Oswego, NY 13126 3/17/2010, 32, \$12.99 Auburn, ME 04210 3/24/2010, 20, \$15.79 _: ., ‘f " -.1.‘ TN; ._A - ,7 FT‘ ‘ First customer order completed r J " - W: Arnant you to you'll I delete Char The dc (late It worksl cell ( r you t; Km ‘i(lIh, or the pl’ll1lt) gum-r. Ctll(‘(l ‘()Ll <, . wlir= rt—' :1. '. .i if You st play ir might rnulti; 14. 14. l Tutorial 1 Getting Started with Exce mber of . . . . . Customer data for RipCity Digital Figure 1-12 l film )ry ‘ _ l 1 D? - ' 3 ~‘ -7 5 it Ev’-7— V g I 3 , .~ - l. ". t C T H “T 47‘T: ~': %‘ _ l 5 P_m: =' ‘ T . T 7,, L" *2: 1+0 Working with Columns and Rows Amanda has reviewed the customer order data you entered in the worksheet. She asks you to modify the worksheet to make it easier to read and include more data. To do this, ' ("W ' W‘ you’ll need to change the column widths and row heights, insert columns and rows, and :5 mn delete columns and rows. Chantzing Column Width and Row Height The default sizes of the columns and rows in a worksheet might not always accommo- press the date the information you need to enter. For example, the addresses you entered in the Worksheet on two lines wrapped within the cell to display all the text. Other times, long Cell content might be truncated. To make the cell content easier to read or fully visible, 1 NH you can resize the columns and rows in the worksheet. 5 address New workbooks use the default sizes for column widths and row heights. Column Widths are expressed either in terms of the number of characters the column can contain e 131, key, Or the size of the column in pixels. A pixel is a single point on a computer monitor or nd cus— Printout. The default column width is 8.38 standard—sized characters. This means that, in Beneral, you can type about 8 or 9 characters in a cell before that entry is either trun— _ Cated or overlaps the adjacent cell. Of course, if you decrease the font size of characters, ‘; ’E: :l"g VOU can fit more text within a given cell. Row heights are expressed in points or pixels, Where a point is ‘/72 of an inch. The default row is 15.75 points high. Setting Column Widths Insight Y0“ Should set column widths based on the maximum number of characters you want to dis- pléy in the cells rather than pixel size. Pixel size is related to screen resolution and a cell mlght be too narrow under a different resolution. This might come into play if you work on multiple computers or share your workbooks with others. 15. 15. ; : (%li= fl=lIl<‘Z'-"l'nllill'iYl lit’-Iit;41i}; -, lit‘- "fiiltliili 'i'I1ili, i Iii {XIV-' i. l'-l};4.ili l ‘ " l l 0 Drag the right border of the column heading left to decrease the column width or right to J‘ increase the column width. l7‘ 0 Drag the bottom border of the row heading up to decrease the row height or down : c * l increase the row height. ‘V or "— l 0 Double-click the right border of a column heading or the bottom border of a row heading 3‘ l to AutoFit the column or row to the cell contents (or select one or more columns or rows. ‘l , click the Home tab on the Ribbon, click the Format button in the Celts group, and then l click AutoFit Column Width or AutoFit Row Height). or 0 Select one or more columns or rows. - Click the Home tab on the Ribbon, click the Format button in the Cells group, and then « click Column Width or Row Height. - Enter the column width or row height you want, and then click the OK button. VT? Amanda suggests you increase the width of the Address column to allow the arltlresgeg to appear on two lines in the cells without additional line wrapping. F ' i To increase the width of column C: } 1. Move the mouse pointer over the right border of the column C column heading l( l until the pointer changes to ‘+0 . E ( Ir 2. Click and drag to the right until the width of the column heading reaches 20 char- acters, but do not release the mouse button. The ScreenTip shows the measure- ll ments of the new column width first as the numbers of characters and second in T: parentheses as pixels for the current screen resolution. } | > 3. Release the mouse button. The width of column C expands to 20 characters and } all the addresses in column C fit on two lines with no extra line wrapping. See Figure 1-13. } Increased column width T i T i r -l ' l “ miw :1 t‘ " : : _. 2 i-“"1'il ‘ , _ " 7‘ i . l ~ n. n 7 . ___ , ,, :1: ur i » i - 7’ - ' I'- ‘ ' ‘ ' ” ” ' l ‘ " ' lin AL v ‘ l'( ‘ . i ll l i Last First Address l)at»= DVD. An. u ; .u—-v in/ Ft dl ta 402 Elm St. (ll J Dawes Giegaiy Meri‘it| ,MI 48637 1/“; i, /}. i1l) I 33/ :9 addresses fit t(_ on two lines U‘ l i 1025 Drake if iaaim --mm lxet-‘r, l‘lllllil?33 3/H, .’z:3to :5 S15 79 ll‘ 16. 16. l‘lgl'll {Q In to heading -r rows_ then then i(l(lr<)sSe5 ding 20 char- asure— and in ‘s and See Amanda wants you to increase the widths of columns A and B to 15 characters to accommodate longer names. Rather than resizing each column separately, you can select both columns and adjust their widths at the same time. To select an entire column, you click its Column heading. Likewise, to select an entire row, you click its row heading. You can drag across multiple column headings or row headings to select adjacent columns or rows, You can also press the Ctrl key as you click column or row headings to select non- adjacent columns or rows. You can select all the columns and rows in a worksheet by clicking the Select All button in the upper-left corner of the worksheet. To increase the widths of columns A and B: ’ 1, Click the column A column heading. The entire column is selected. ' 2, Hold down the Ctrl key, click the column 3 column heading, and then release the Ctrl key. Both columns A and B are selected. } 3, Move the mouse pointer to the right border of the column B column heading until the pointer changes to ¢-I-* . } 4. Drag to the right until the column width changes to 15 characters, and then release the mouse button. Both columns are wide enough to display longer names. The text in cell F1, "Price per DVD, ” overflows the cell borders. This column would look better if you increased the width of column F to 12 characters. Rather than use the mouse, you can set the column width using the Format command on the Home tab. The Format command gives you precise control in setting column widths and row heights. To set the width of column F to 12 characters: } 1. Click the column F column heading. The entire column is selected. D 2. In the Cells group on the Home tab, click the Format button, and then click Column Width. The Column Width dialog box opens. k 3. Type 12 in the Column width box, and then click the OK button. The width of col- umn F changes to 12 characters, placing the text in cell F1 entirely within the bor- ders of the cell. The row heights didn't change after you resized the columns, which leaves a lot of blank space in the four rows of customer data. This extra blank space makes the data dif- licult to read and extends the content out of view. You'll reduce the heights of all these rows. Row heights are set in the same way as column widths. You can drag the bottom bor- ‘—l€I’ of the row or define a specific row height using the Format command on the Home lab. Another option is to autofit a column or row to its content. Autofitting eliminates 3”)’ empty space by matching the column to the width of its longest cell entry or the row “the height of its tallest cell entry. If the column or row is blank, Excel restores the col- umn or row to its default width or height. The simplest way to autofit a row or column is "> double-click its border. You can also use the AutoFit commands. ‘ Because you want to remove empty space from the four worksheet rows, you'll autofit ‘lle f0ws to their content rather than specify a particular row height. 17. 17. To autofit row 2 to its content: Ill y 1. Move the mouse pointer over the bottom border of the row 2 row heading until the pointer changes to -1- . Br 2. Double-click the bottom border of row 2. The height of row 2 shrinks to match the content of cell C2, which is the tallest entry in the row with two lines of text. You could continue to resize the remaining rows one at a time. but (1 quicker v. i is 1,’, l select the rows you want to resize and then autofit all the selected rows sinitiltaiieoueh | instead of double-clicking the row border, you’ll use the AutoFit Row Height C()l11mt1nd, To autofit the height of rows 3 through 5: 1. Drag the pointer across the row headings for rows 3, 4, and 5. The contents of rows 3 through 5 are selected. 2. In the Cells group on the Home tab, click the Format button. A menu of commands opens. 3. Click AutoFit Row Height. The height of each of the three rows autofits to its con. tents, and all the empty space is removed. 4. Click cell A1 to make it the active cell. The other cells in the worksheet are deselected. Figure 1-14 shows the worksheet with the revised row heights. l ‘ Autofitted row heights r / l " l J . x= : , 3 _ h Xi 1‘ A n u _ / _ ; »., 2 . - s 4 l 1 ll. '—: t [en » r— u I t t | D . :-s C = —g-.2", ---- . -011. " S ‘7 3'. - row heights sized to _ 2! K 7 match their contents ‘ " " — 1 CE‘! I t _- 7 . __i A 521 "'3 l r . -:<—* T: — l 20 K E ‘-i Inserting a Column or Row Amanda notices that the worksheet doesn't include a column containing customer ph0n9 numbers. She wants you to insert a column for the customer phone numbers between the Address column and the Date column. You can insert a new column or row anywhere within a worksheet. When you insert a new column, the existing columns are shifted to the right and the new column has the sam‘ width as the column directly to its left. When you insert a new row, the existing rows are shifted down and the new row has the same height as the row above it. Ni To ii first nee 18. 18. mserting a Column or Row Jntll the , . select the column(s) or row(s) where you want to insert the new column(s) or row(s); Excel will insert the same number of columns or rows as you select. itch the o In the Cells group on the Home tab, click the Insert button (or right-click a column or row t- heading or selected column and row headings, and then click Insert on the shortcut menu). tl ‘K [0 ll‘ You'll insert a column and enter the customer phone numbers in the new column. K it _ To insert a new column: _ of ' 1. Click the column D column heading to select the entire column. ’ 2, In the Cells group on the Home tab, click the Insert button. A new column D is inserted into the worksheet and the rest of the columns shift to the right. The new column has the same width as column C. its Con } . Reduce the width of column D to 15 characters. 9 3 4. Click cell D1 to make it the active cell, type Phone as the label, and then press the Enter key. The new column label is entered in cell D1, and cell D2 becomes the active cell. } 5. Enter the phone numbers in cells D2, D3, D4, and D5, as shown in Figure 1-15, pressing the Enter key after each entry. Tutorial 1 Getting Started with Excel | Reference Window W _ ,3, __ A 7 7 New column inserted in the worksheet Figure 1-15 —| "l 1.» I’ “.3 ; . , 4: fvn‘~. 'i. _ it ><'i wen v. q¢tm-n Furmulfl on Renew fen I! ~ " ‘ "" __. .. i: § ~ AL A‘ A’ '- *’ = t " ’ i ‘ ' ! “I Q. . ; . n‘ . c : ‘ - 3., 177:. -, ‘ _; “.-‘V. - V L“ ' _ 1- existing I » <4 » ' ' ‘ » ~ columns _ L351 ‘» J, ’ c . “1 ’. , ": ofdafa I shift riht 2 Date; :"':5‘L", ".7 3' v Hr—‘ : ,f'3': i:i 'i, ‘L; :3 I‘. I 3 l; Ga' : l C LTQ-5. 3 Rhoda I _‘ Amanda neglected to include a customer. Because the customer was RipCity Digital’s ’”St Customer, he should be inserted at the top of the list. To add this new order, you 3’ Plloml ”€€d to insert a new row in the worksheet below the column labels. veen iert a T0 insert a new row: ie same . _ , _ . are ’ 1- Click the row 2 row heading. The entire second row is selected. } 2- ln the Cells group on the Home tab, click the Insert button. A new row 2 is inserted, and the remaining rows shift down. l 3- Enter the new customer order shown in Figure ‘l-I6 into row 2. 19. 19. EX 20 Excel Tutorial 1 Getting Started with Excel save the cu Figure 1-16 New row inserted in the worksheet 79 i1_ Click the Sai {I5 -1 "‘ not yet been 'i — low! 9:9: Law roin-um om izaew new g A. A. 5 E a_), __ J. _ . . -in-. r1 Navigate tO' vim : B 1 ii- , ~ 2~. A- against s--v. - -.2.-J; j : _;i replacethe‘ illlatoaid - Fa-'1! Annuhnient - ilunicn T . e4ii trouble? If you can pro: new L ‘ _ _ _ I obtain them ” ‘ make a back °"3t°m°' A _ — original files or er about the Di ’ 3_ Select Book ’ 4_ Verify that E ’ 5_ Click the Sa' ' with its desc You've enterec rows and columr groups of cells. Y revenue Amanda 1. What are the 2. List two way l, )L‘lt, ’lllI§_{ and (fleariiig a Row or (§oluiiiii Adding new data to a workbook is common, as is removing old or erroneous data. Amanda just learned that her second customer, Gregory Dawes, canceled his order. She I wants you to remove this order from the worksheet. _ You can remove data in two ways: clearing and deleting. Clearing data from a work- sheet removes the data but leaves the blank cells. Deleting data from the worksheet removes both the data and the cells. When you delete a column, the columns to the right shift left to fill the vacated space. Similarly, the rows below a deleted row shift up to fill 3, what is the the vacated space. Deleting a column or row has the opposite effect from inserting a col- Worksheetg UITIIT OI’ l'0‘V. 4. keybo You'll first clear Gregory Dawes's data from the worksheet and then delete the row that 5 What is text contained the data. Usually, you would do this in one step by simply deleting the row, but 6' HOW do OL this highlights the difference between clearing and deleting. 7' Ce“ A4 Ci)“ 8. Explain the i To clear and delete row 3: } 1. Click the row 3 row heading. The entire row 3 with Gregory Dawes's order is . ' 0 selected. D 2. Right—click the row 3 row heading, and then click Clear Contents on the shortcut “—V-i ‘ menu. Excel clears the values in the third row, but leaves the blank row in that space. Working } 3. Verify that the third row is still selected. A group of cells nonadjacent. Ar the customer on liecause it forms more distinct ad numbers in cells distinct blocks c YOU want. } 4. In the Cells group on the Home tab, click the Delete button. The third row is deleted, and the rows below it shift up. Only four customers remain in the worksheet. Before proceeding, you’ll save your workbook with the name ”RipCity Digital Orders" in the default Excel workbook format. 20. 20. ir. She ~ork- he right to fill g a col- that / , but iortcut Drders" i D Tutorial 1 Getting Started with Excel To save the current workbook: " You can reopen the Save As dialog box to save a work- book with a new filename, to a different location, or in j another file format; click I 1_ Click the Save button 3 on the Quick Access Toolbar. Because this workbook has not yet been saved, the Save As dialog box opens. 2, Navigate to the Tutorial.01Tutorial folder included with your Data Files. You'll replace the default filename “Bookl" with a more descriptive one. Trouble? If you don't have the starting Data Files, you need to get them before _ you can proceed. Your instructor will either give you the Data Files or ask you to the 0”“ BN0” a"d“‘9" obtain them from a specified location (such as a network drive). In either case, °l'°l‘ 5a"°A5' make a backup copy of the Data Files before you start so that you will have the | ‘ original files available in case you need to start over. If you have any questions ' about the Data Files, see your instructor or technical support person for assistance. 3, Select Book1 in the File name box, and then type RipCity Digital Orders. Verify that Excel Workbook appears in the Save as type box. 4 5. Click the Save button. The Save As dialog box closes and the workbook file is saved with its descriptive filename. You've entered and saved the customer order data. In the process, you worked with rows and columns. in the next session, you'll learn how to work with individual cells and groups of cells. You will also add calculations to the worksheet to determine how much j revenue Amanda will generate from these orders. y Session 1.1 Quick Check | Review 1. What are the two types of sheets used in a workbook? l 2. List two ways of identifying the active cell in the worksheet. l 3. What is the cell reference for the cell located in the third column and fifth row of a l worksheet? j l l 4. What keyboard shortcut moves the active cell to cell Al? 5. What is text data? ji 6. How do you enter two lines of text within a cell? 7. Cell A4 contains ”May 3, 2010"; why doesn't Excel consider this entry a text string? 8. Explain the difference between clearing a row and deleting a row. Session 1 .2 Working with Cells and Cell Ranges , A group of cells is called a cell range or range. Ranges can be either adjacent or "0nadjacent. An adjacent range is a single rectangular block of cells. For example, all lite customer order data you've entered in cell A1 through cell G5 is an adjacent range because it forms one rectangular block of cells. A nonadjacent range consists of two or more distinct adjacent ranges. All the last names in cell Al through cell A5 and all the n_Umbers in cells Ft through G5 together are a nonadjacent range because they are two distinct blocks of cells. A nonadjacent range can include as many adjacent ranges as YOU want. 21. 21. Just as a cell reference indicates the location of an individual worksheet cell, a range j‘ l reference indicates the location and size of a cell range. For adjacent ranges, the range / /7 reference specifies the locations of the upper-left and lower-right cells in the l’eCtall’j‘ljj‘, . -~/ ‘ block separated by a colon. For example, the range reference A1265 refers to all thr; U; 1 from cell A1 through cell G5. The range reference for nonadjacent ranges separates rm. \$1,, /nu adjacent range reference by a semicolon. For example, A1 : A5;Fl : G5 is the range refer. used it ‘ ence for cells A1 through A5 and cells Fl through G5. .1r€' hi}? . i l 3| Selecting a Cell Range 1 H You select adjacent and nonadjacent ranges of cells with your mouse, just as you selected individual cells. Selecting a cell range enables you to work with all of the cent in the range as a group. This means you can do things like move the cells, delete them or clear all their contents at the same time. ltatmaimta 'AVlluo'lnw ; -1‘-ll‘-Ilirlil; -'. 3.“-I l ff’-li1>;4', -L‘ To select an adjacent range: } 5- C” 0 Click the cell in the upper-left corner of the adjacent range, drag the pointer to the cell in the lower-right corner of the adjacent range, and then release the mouse button. Nut or - Click the cell in the upper~left corner of the adjacent range, press the Shift key as you click the cell in the lower—right corner of the adjacent range, and then release the Shift key To gel. j To select a nonadjacent range of cells: j 1_ Se. , - Select a cell or an adjacent range, press the Ctrl key as you select each additional call or ' adjacent range, and then release the Ctrl key. l 2' H: l To select all the cells in a worksheet: ) 3. R9 0 Click the Select All button located at the intersection of the row and column headi: gs (or Al press the Ctrl+A keys). You’ll use the mouse pointer to select the atljacent range M : (}S, which inclutlcs all the '_ " content you entered in the worksheet so far. _ l To select the adjacent range A1:G5: . . r kt 1. If you took a break at the end of the previous session, make sure the RipCity Digital Orders workbook is open and the Sheet2 worksheet is active. 5* 2. Click cell A1 to select the cell in the upper-left corner of the range A1:G5. Fr 3. Drag the pointer to cell G5, which is the cell in the lower—right corner of the range A1:GS. r I9 4. Release the mouse button. As shown in Figure 1-17, all cells in the adjacent range A1:G5 are selected. The selected cells are highlighted with color and surrounded by 3 black border. The first cell you selected, cell A1, is still the active cell in the worksheet» )4. Cl 22. 22. Tutorial 1 Getting Started with Excel EX 23 a range 7 “ 2 range { Adjacent range A1:G5 selected Figure 1-17 H 3”8Ular (‘ _ the cells I. ‘ ' - . , u - res each ,5: mn and row headings M actwe cell In , _,_ , ref-E ' ’ in the selected range i the selected 5, ' F bighllghted range is white / ‘n3 5 ; K V selected cells l are highlighted andsurrounded l j byathick black 1e cells border - them, i l L p 5. Click any cell in the worksheet to deselect the range. , Next, you'll select the nonadjacent range Al : A5;Fl : G5. To select the nonadjacent range A1:A5;F1:G5: } 1. Select the adjacent range A1:A5. ) 2. Hold down the Ctrl key, and then select the adjacent range F1:G5. } 3. Release the Ctrl key. As shown in Figure 1-18, all the cells in the nonadjacent range l Al: A5;Fl: G5 are selected. i Nonadjacent range A1:A5;F1:G5 selected Figure 1-18 1 ll W‘ ‘l " = active cell in the nonadjacent ‘ | :9 "'”" ° ““' '°'””“‘ “‘ ‘''''“ "‘“‘ range is white and surrounded L" ’ V J . ‘ by a black border : y N _ I g. . i I | 5~ . I adjacent range F1:G5 e range ‘ i A ' is selected W lge " 2- 2: ed by a 6 rksheet. l ‘- Click any cell in the worksheet to deselect the range. 23. 23. Tutorial 1 Getting Started with Excel | ix‘()lll]“; _)‘l: l|1(l(, ,L()[)h: lil]§_: (l(. ()ll Range ll ll Amanda wants you to insert titles that describe the customer order data you’ve entered Including the company name, a descriptive title, and the date is part of good Worksheet design, enabling others to quickly see the who, what, and when of the data. The curmm . worksheet has no space to add this information. You could insert several blank rows at l l the top of the worksheet for this information. Another option is to select and then mow, the customer data lower in the worksheet, freeing up the rows at the top for the I1-. ' ‘W than Reference Window | Moving or Copying a Cell or Range 0 Select the cell or range you want to move or copy. 0 Move the mouse pointer over the border of the selection until the pointer changes 5 . 0 To move the range, click the border and drag the selection to a new location (or to c . the range, hold down the Ctrl key and drag the selection to a new location). ' . or - Select the cell or range you want to move or copy. 0 In the Clipboard group on the Home tab, click the Cut button or the Copy button (or right-click the selection, and then click Cut or Copy on the shortcut menu). | ; 0 Select the cell or upper-left cell of the range where you want to move or copy the content, I 0 In the Clipboard group, click the Paste button (or right-click the selection, and then cl I Paste on the shortcut menu). 7 One way to move a cell or range is to select it, position the mouse pointer over the bottom border of the selection, and then drag the selection to a new location. This tech- You can drag and drop to a i I r H , bl nique is called drag and drop because you are dragging the range and dropping it in a , Sggetllg s: Ll: C:1.: nyt; ":; ‘ee' new location. You can also use the drag—and-drop technique to copy cells by pressing the 1 edge ofthe worksheet in Ctrl key as you drag the selected range to its new location. A copy of the original range which you want toscroll. is laced in the new location without removin the ori iinal ran e from the worksheet. | i _ _ P 8 E, 8 | .1 tlée ne: ;‘loca‘tiocrt1_is You'll use the drag—and—drop method to move data. * W51 9. rep 9599 I00. )4 M l To drag and drop the customer orders: l } 1. Select the range A1:G5. } 2. Move the mouse pointer over the bottom border of the selected range so that the a pointer changes to ‘lg . } 3. Press and hold the mouse button to change the pointerto lug , and then drag the selection down four rows. Do not release the mouse button. A ScreenTip appears. indicating the new range reference of the selected cells. See Figure 1-19. 24. 24. -ntered, lFl(Sll€e[ ' Current ows at n move l"lCW lexi’ er the iis tech- it in a ‘SSll'lgll'1E‘ l range sheet hat the ag the apears, Tutorial 1 Getting Started with Excel a Selected range being moved Figure 1-19 ‘ . ‘ A I) ‘ ‘V ‘ Ia/ l a men Flocllvwt rqmum om Renew . »e. . 9 — ' V . . A. ‘. Q 3 ' fin J‘, . . ta \$1) 4 . . - . . . , . 7,. 7} 3. 1. I It A ll ‘ _ ‘ 2.’ A F 3 E ‘E 3 "3 , l° "3 '52 = }i: .'«'~. )ra"' l: i.: .:- , » I , .-am - monmml - num: -m 1 mm 5; ‘outline indicates 7 ‘ ‘the new location of 3 . , i the selected range cell reference of the new location is displayed in a ScreenTip 10 .1 4. When the ScreenTip displays the range A5269, release the mouse button. The selected cells move to their new location. ) 5. Enter the title information shown in Figure l—20 in the range Al: A3, pressing the Enter key after each entry. i “" " ' a num ngetquua Formula um lr to mm W ' : _ _ , , 5 _ V _ . . _ . ‘ V _ r . . ".mrv: ‘ 2. - A . ~ , v_7u A . §fi‘i§>. ,. . . in . WW, ,3, 1 M k . . t . . ~‘ * 4" . : on ‘. m1i! ioru. ¢ mu! :i: _ zms ‘inns I B I 0 ‘l T A § § g ‘R K J ’ / ‘ ' °‘ :3 >3-iv-gt’-ny i°'me- : r,«'»: ~ -_"-"‘5=‘ .4 Villa" ; em« m '- ran: - ungn-wt A N-mar « SM: -n zm Em-rig __. .. in ~ , -, 1 3:31 ~ ' i i ‘I 1 Rip: ‘ . - v z I (as Some people find drag and drop a difficult and awkward way to move a selection, Particularly if the worksheet is large and complex. In those situations, it’s often more effi- cient to cut and paste the cell contents. Cutting places the cell contents into computer memory or on the Clipboard. The contents can then be pasted from the Clipboard into a new location in the worksheet. You’| l cut and paste now. To cut and paste cell contents: ’ 1- With the range A5:G9 selected, in the Clipboard group on the Home tab, click the Cut button The selected range is surrounded by a blinking border, which indi- Cates that its contents are stored on the Clipboard. ’ 2- Click cell A11. This cell is the upper—left corner of the range where you want to paste the data. Worksheet titles entered Figure 1-20 l . . y ‘ fP~ 25. 25. §* 3. In the Clipboard group, click the Paste button. Excel pastes the contents of the . . . . I W ‘4 " ll range A5269 into the new range A1 1:615. The blinking border disappears as a I , visual clue that the Clipboard is now empty. . 5, rec: it 4. Select the range A11:G15, and then, in the Clipboard group, click the Cut . in the button , . or _ _ Srlect F 5. Click cell A5, and then, in the Clipboard group, click the Paste button. The cus. : 1,. the tomer order data is pasted into its original location in the range A5269. [pk ti select: 0 C . "~ l‘ t I Inserting and Deleting a Cell Range k C , Another use of selecting a range is to insert or delete cells from within the worksheet. To You d insert a range, select the range where you want the new cells inserted, and then click the insert button in the Cells group on the Home tab. The existing cells shift down when the __ selected range is wider than it is long, and they shift right when the selected range is longer “J 3 fl: than it is wide (as illustrated in Figure 1-21). The Insert Cells command located on the Insert button menu lets you specify whether you want to shift the existing cells right or down, or up to TT( whether to insert an entire row or column into the new ran e. he man- 8 t '0rkb()( how ITTU * added it | Entori - A formu 1 ' i . + nit - tr: selected e’‘'j ’”8 Elli” lf, 1 range tel“ ‘ ' ” . shmud ‘ ten using new cells are inserted within the range, shifting the existing cells to the right right (raj played in °°l“’“"5 addition g F ing form 3 2 v existtnr :5” i If ‘T cells 3 ; j shim-d How selected 1 don n the follr range "V" 1 = Al+B3 new cells are inserted within the range, shifting the existing cells down So, it If you no longer need a specific cell or range in a worksheet, you can delete those “"°“ld " cells and any content they contain. To delete a range, select the range, and then click the “lid Pl” Delete button in the Cells group on the Home tab. As with deleting a row or column, cells adjacent to the deleted range either move up or left to fill in the vacancy left by the deleted cells. The Delete Cells command located on the Delete button menu lets you specify whether you want to shift the adjacent cells left or up, or whether to delete the entire column or row. 26. 26. the SUS- To the ‘re Dnger insert l, OF rse ck the in, )y the au the Tutorial 1 Getting Started with Exce Inserting or Deleting a Cell Range | R('l("l'(‘l1('(‘ Win(lov if Select a range that matches the range you want to insert or delete. I: In the Cells group on the Home tab, click the Insert button or the Delete button. »o .0’ . 5 select the range that matches the range you want to insert or delete. r 1" the Cells group, click the Insert button arrow and then click the Insert Cells button or click the Delete button arrow and then click the Delete Cells command (or right—click the l selected range, and then click Insert or Delete on the shortcut menu). , ' _. Click the option button for the direction in which you want to shift the cells, columns, or rows. 5 Click the OK button. . You do not need to insert or delete any cells in the worksheet at this time. working with Formulas ll up to now you have entered only text, numbers, and dates in the worksheet. However, the main reason for using Excel is to perform calculations on data. Amanda wants the workbook to determine the number of DVDS she has to create for her customers and how much revenue will be generated by completing these orders. Such calculations are l added to a worksheet using formulas and functions. ; Entering .1 ltirmula I A formula is an expression that returns a value. in most cases, this is a number. You can also - i create formulas in Excel that return text strings. Every Excel formula begins with an equal sign (: l followed by an expression that describes the operation to be done. A formula is writ- I ten using operators that combine different values, returning a single value that is then dis— l played in the cell. The most commonly used operators are arithmetic operators that perform : ‘l addition, subtraction, multiplication, division, and exponentiation. For example, the follow— 1 ‘ ing formula adds 5 and 7, returning a value of 12. =5+7 However, formulas in Excel most often use numbers stored within cells. For example, N the following formula returns the result of adding the values in cells A1 and B2. J = A1+B2 So, if the value 5 is stored in cell Al and the value 7 is stored in cell B2, this formula would also return a value of 12. Figure 1-22 describes the different arithmetic operators and provides examples of formulas. 27. 27. Tutorial 1 Getting Started with Excel Figure 1-22 Arithmetic operators Operation Arithmetic Example Description Operator Addition + =10+Al Adds ID to the value in cell Al = Bl+B2+B3 Adds the values in cells B1, B2, and B3 Subtraction ~ = C9—B2 Subtracts the value in cell B2 from the value in cell =1-D2 Subtracts the value in cell D2 from 1 Multiplication * = C9’B9 Multiplies the values in cells C9 and B9 eafrh léusfl = E5*0.06 Multiplies the value in cell E5 by 0.06 the P"g—‘l Division / =C9/B9 Divides the value in cell C9 by the value in cell B9 F and = D15/12 Divides the value in cell D15 by 12 Exponentiation " = B5"3 Raises the value of cell B5 to the third power To enter =3"B5 Raises 3 to the value in cell B5 _ T’ p 1. Click If a formula contains more than one arithmetic operator, Excel performs the calculation press . , . , . , want using the same order of precedence you might have already seen in math classes. The order of precedence is a set of predefined rules used to determine the sequence in which opera- } 2. Type tors are applied in a ca| culation—first exponentiation (A), second multiplication (*) and divi— you t sion (l), and third addition 1+) and subtraction (—). For example, consider the formula below; provi plete =3+4*5 } 3. Press This formula returns the value 23 because multiplication (4*5) takes precedence over \$34.! addition. if a formula contains two or more operators with the same level of precedence, mula the operators are applied in order from left to right. Note the formula below: - =4.lO/8 Aftera and not th This formula first calculates the leftmost operation (4*10) and then divides that result formula in of 40 by 8 to return the value 5. sion displs To change the order of operations, you can enclose parts of the formula within interpretin parentheses. Any expression within a set of parentheses is calculated before the rest of in the forn the formula. Note the following formula: formula y( = (3+4) *5 To vi This formula first calculates the value of the expression (3+4) and then multiplies that e_w total of 7 by 5 to return the value 35. Figure 1-23 shows how slight changes in a formula k 1- Click affect the order of precedence and the result of the formula. _ ml-‘la D 2. Click Figure 1-23 Order of precedence rules a diff form cell r Formula Order of Precedence Rule (A1=50, B‘l= ‘l0, C1=5) = A1+Bl*C1 = (A1+B1)*C1 = A1/B1—C1 = Al/ (Bl—Cl) Expression inside parentheses executed before expression outside 10 = Al/ B1*C1 Two operators at same precedence level, leftmost operator 25 evaluated first Multiplication before addition Division before subtraction = Al/ (Bl*Cl) Expression insideparrentheses executed before expression outside 1 2‘ 28. 28. Tutorial 1 Getting Started with Excel Entering a Formula | R(’lt‘l‘(? t1(t‘ Window . Click the cell in which you want the formula results to appear. V l . Type = and an expression that calculates a value using cell references and arithmetic operators. . Press the Enter key or press the Tab key to complete the formula. ms‘ . . . l Amanda wants the worksheet to include the total amount she charged tor creating T. each customer's DVDs. The charge is equal to the number of DVDs created multiplied by the price per DVD. You’ve already entered this information for each customer in columns , ? ‘ Fand G. You’ll enter a formula to calculate the charge for each customer in column H. l T To enter the formula in column H: ‘ * p 1, Click cell H5 to make it the active cell, type Charge for the column label, and then ‘ y press the Enter key. The column label is entered in cell H5. Cell H6, where you , (:38 want to enter the formula, is the active cell. r era- ' 2. Type = F6*G6 (the number of DVDs created multiplied by the price per DVD). As l divi. you type the formula, a list of Excel function names appears in a ScreenTip, which elow; provides a quick method for entering functions. The list will close when you com- plete the formula. You'll learn more about Excel functions shortly. i } 3. Press the Enter key. The formula is entered in cell H6, which displays the value over \$34.58. The result is displayed as currency because cell 66 referenced in the for- lence, mula contains a currency value. After a formula has been entered into a cell, the cell displays the results of the formula and not the formula itself. If the results are not what you expect, you might have entered the asult formula incorrectly. You can view the formula by selecting the cell and reviewing the expres- sion displayed in the formula bar. One challenge with formulas, particularly long formulas, is interpreting the cell references. Excel makes this simpler by color coding each cell reference t of in the formula and its corresponding cell in the worksheet. You'll see. this when you view the formula you just entered. ; that To view the formula: rmula k 1. Click cell H6 to make it the active cell. The formula you entered appears in the for- mula bar, whereas the value returned by the formula appears in the cell. , _,. z k 2. Click in the formula bar. As shown in Figure 1-24, each cell used in the formula has a different colored border that matches the color of its cell reference in the l formula. This provides a visual cue to the formula, enabling you to quickly match N‘ cell references with their locations in the worksheet. 29. 29. EX 30 l Excel Tutorial 1 Getting Started with Excel Figure 1-24 Formula references color coded CH 4 m‘ "; ‘! IHSEH Flylllfiflm Famum Dill fievrevu Sir»: I» N :2 , E » Q 7 am , . nzu W -~ 231:1?! - s CFCDOIVI - Fllfll Ahnnmmt I-«num 577”. CE“: 1' f ": i"'_J‘_’). " -~ formula visible in the ormula bar 5: L I V : t L: cell reference colors match the cell border colors For Amanda’s first customer, you entered the formula by typing each cell reference. You can also enter a cell reference by clicking the cell as you enter the formula. This technique reduces the possibility of error caused by typing an incorrect cell reference. You'll use this method to enter the formula to calculate the charge for the second Customer. To enter a cell reference in the formula using the mouse: ‘ f 1. Click cell H7 to make it the active cell, and then type = . When you type the equal l sign, Excel knows that you're entering a formula. Any cell that you click from now on causes Excel to insert the cell reference of the selected cell into the formula until you complete the formula by pressing the Enter or Tab key. } 2. Click cell F7. The cell reference is inserted into the formula on the formula bar. At this point, any cell you click changes the cell reference used in the formula. The cell reference isn't “locked" until you type an operator. } 3. Type * to enter the multiplication operator. The cell reference for cell F7 is “locked" in the formula, and the next cell you click will be inserted after the operator. } 4. Click cell G7 to enter its cell reference in the formula, and then press the Enter key. Cell H7 displays the value \$394.75, which is the total charge for the second customer. Copying and l’astin_g Formulas Sometimes, you’ll need to repeat the same formula for several rows of data. Rather than retyping the formula, you can copy the formula and then paste it into the remaining rows. You’l| copy the formula you just entered in cell H7 to cells H8 and H9 to calculate the charges for Amanda's two remaining customers. To copy the formula in cell H7: } 1. Click cell H7 to select the cell that contains the formula you want to copy. Pasting pasted th rCfert, ~iicE you wt in formula 1 Amanda copying . workshei Intro In additic is a naim what mig the range = Al+A2+ Or, yt: = SUM(Al 30. 30. ex ’2. . You iique this equal l HOW ula tar. At The red" in iter ond r than 18 ilculate Tutorial 1 Getting Started with xcel In the Clipboard group on the Home tab, click the Copy button . The formula is copied to the Clipboard. ’ 3, Select the range H8:H9, the cells in which you want to paste the formula. ' 4, In the Clipboard group, click the Paste button. Excel pastes the formula into the selected range. See Figure 1-25. Formula copied and pasted g _ -1 1 Q ' 7 . . . g, ‘ . _ _ y _ r, 13, ; -inter? ‘ 2 - 5 lfi A . -at » A 5, \$4 ,1 }, m__ if / _r' . , . I . V -' .11 Kg -1. . o o. : . n Cunammm Fowl! flan . ,, EMA Mae. 9 I H — 3 A ‘z i i "' ' -J‘ 5 I‘ l " '3 FI-nr-int'-rg' uhmr styuu _. -.l‘o'm-I ‘ ¢' rw. e«- smu- rom » ' sum cm mung H8 and H9 formula copied » 1.; o from this cell , n l gt 4 9 sat: i . b :3" -— £ff"I353~f. ".t? ‘. 3.‘? 2019 7 G51? 3 f‘”. ft"lD3:‘33> -: C}t71]5Af. ’9i ; ‘~L—' ZC10 Tc. ':: — C : t3'. ?i 7537323 R'ic: v- T1 “i1"v? "*-»‘? L313 33Z«"ZD'_C io_ _1_L } 5. Click cell H8 and verify that the formula = F8*G8 appears in the formula bar, and then click cell H9 and verify that the formula = F9*G9 appears in the formula bar. Pasting a formula is different from pasting a value. With the customer order data, Excel t pasted the same values in a new location. With formulas, Excel adjusts the formulas cell references to reflect the new location of the formula in the worksheet. This is because l you want to replicate the actions of a formula rather than duplicate the specific value the formula generates. In this case, the formulas action is to multiply the number of DVDs Amanda created for the customer by the price she charged for creating each DVD. By copying and pasting that formula, that action is replicated for every customer in the worksheet. O I Introducing Functions , .. ln addition to cell references and operators, formulas can also contain functions. A function _» IS a named operation that returns a value. Functions are used to simplify formulas, reducing What might be a long expression into a compact statement. For example, to add the values in the range A1 : A10, you could enter the following long formula: = Al +A2+A3+A4 +A5 +A6 +A7 +A8 +A9 +Al 0 Or, you could use the SUM function to accomplish the same thing: = SUM(A1;A1o)
19,766
73,702
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.234375
3
CC-MAIN-2017-22
latest
en
0.730936
https://homework.zookal.com/questions-and-answers/a-car-travels-in-a-straight-line-a-distance-of-582178465
1,618,626,571,000,000,000
text/html
crawl-data/CC-MAIN-2021-17/segments/1618038098638.52/warc/CC-MAIN-20210417011815-20210417041815-00615.warc.gz
396,335,223
26,131
1. Science 2. Physics 3. a car travels in a straight line a distance of... # Question: a car travels in a straight line a distance of... ###### Question details A car travels in a straight line a distance of 8.4 km at an average speed of 70 km / h before stopping due to lack of fuel. Following this stop, the driver of this car walks for 30 minutes over a distance of 2.0 km to get to the nearest gas station. at. What is the total distance the driver has traveled between the beginning of the journey of her car and until he arrives at the gas station? b. How much time has passed to cover this distance? c. Calculate the average speed of the driver over this distance.
158
675
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.515625
3
CC-MAIN-2021-17
latest
en
0.943535
https://community.powerbi.com/t5/Desktop/Dividing-two-measures-from-different-tables-always-return-quot-1/m-p/622379
1,579,584,652,000,000,000
text/html
crawl-data/CC-MAIN-2020-05/segments/1579250601615.66/warc/CC-MAIN-20200121044233-20200121073233-00052.warc.gz
391,212,609
126,060
cancel Showing results for Did you mean: Anonymous Not applicable ## Dividing two measures from different tables always return "1" Hello, I'm want to have percentage calculated from 2 tables (WNRC, EATR). I'm dividing two measures and result is always 1. When I create measures separately they return 2 correct, different values. I have set up many to many relation between these two tables. WNRC Scrap % = DIVIDE(CALCULATE(COUNTA(WNRC[OSS_INVOICE_BAKEDATE_BILL_REASON_ID])|FILTER(WNRC|WNRC[OSS_INVOICE_BAKEDATE_BILL_REASON_ID]="NRC-SCRAP"))|SUM(EATR[TTL_QTY])) 3 REPLIES 3 Community Support Team ## Re: Dividing two measures from different tables always return "1" @Anonymous, Could you share some more details about the table structure, relationship and what does the second measure look like? Community Support Team _ Jimmy Tao If this post helps, then please consider Accept it as the solution to help the other members find it more quickly. Highlighted Anonymous Not applicable ## Re: Dividing two measures from different tables always return "1" Hi @v-yuta-msft, Measure I mentioned is already final measure comprised of two original ones. First measure is SCRAP QTTY=CALCULATE(COUNTA(WNRC[OSS_INVOICE_BAKEDATE_BILL_REASON_ID])|FILTER(WNRC)|WNRC                                                            [OSS_INVOICE_BAKEDATE_BILL_REASON_ID]="NRC-SCRAP")) and second one is EATR QTTY=SUM(EATR[TTL_QTY]) Tables WNRC and EATR are linked through EATR number, each line in both tables have this information, eatr number can be same for several lines. I need to calculate percentage of cases from table WNRC where column [OSS_INVOICE_BAKEDATE_BILL_REASON_ID]="NRC-SCRAP" from total EATR qtty (from table EATR). for table WNRC 1 line = 1 quantity for table EATR 1 line = quantity in column [TTL_QTY] Community Support Team ## Re: Dividing two measures from different tables always return "1" @Anonymous, Suppose Table 'EATR' is a dimensional table, to reference 'EATR' table in fact table, you may modify your first measure like pattern below: ```SCRAP QTTY = CALCULATE ( COUNTA ( WNRC[OSS_INVOICE_BAKEDATE_BILL_REASON_ID] ), FILTER ( WNRC, RELATED ( EATR[number] ) && WNRC[OSS_INVOICE_BAKEDATE_BILL_REASON_ID] = "NRC-SCRAP" ) ) ``` Community Support Team _ Jimmy Tao If this post helps, then please consider Accept it as the solution to help the other members find it more quickly. Announcements #### Save the new date (and location)! Our business applications community is growing—so we needed a different venue, resulting in a new date and location. See you there! #### Difinity Conference The largest Power BI, Power Platform, and Data conference in New Zealand #### Power Platform 2019 release wave 2 plan Features releasing from October 2019 through March 2020 Top Solution Authors Top Kudoed Authors (Last 30 Days)
737
2,858
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.953125
3
CC-MAIN-2020-05
latest
en
0.815216
https://vijayakranthinews.com/mathsolution-49
1,669,781,600,000,000,000
text/html
crawl-data/CC-MAIN-2022-49/segments/1669446710719.4/warc/CC-MAIN-20221130024541-20221130054541-00823.warc.gz
621,660,960
5,502
# Online solver We'll provide some tips to help you select the best Online solver for your needs. Let's try the best math solver. ## The Best Online solver In this blog post, we will show you how to work with Online solver. Then, you can use your knowledge of the value of one variable to eliminate that variable from the system. Once you have eliminated one variable, you can then use your knowledge of the remaining variable to eliminate that second variable. This process is repeated until only one equation remains, and then you can solve for your solution. Using this method, you can eliminate any number of variables in a linear system at once, allowing you to solve complex problems more quickly than other methods. In mathematics, solving a system of equations is the process of turning an equation into a true statement that can be solved for any unknown value. The equation is converted into a set of linear equations using the same variable names as the original equation. Each equation becomes a row in a matrix or array and then the unknown value can be found by solving each row. This example shows how to solve systems of equations. Each row represents an equation. The first column represents the variable on the left side of the equation and the second column represents the variable on the right side of the equation. The last column represents the sum of all other columns. The values in this matrix represent all possible values for each variable. When solving systems of equations, you start by writing down every possible combination of variables that could take place in your problem and then adding up all those numbers to find out what your solution should be. In addition, it is important to work carefully with multiple operations when working with systems of equations. For example, if two different operations are performed on two different sets of equations, one set may become more difficult to solve than another set. Trinomial factor is a type of factor that can be applied to a set of data in order to break down the data into more manageable pieces. It is used to divide a set of input variables into two or more sets, each containing a subset of variables. It is also used in regression analysis where it can be converted into an interaction term (two or more variables influencing one another at the same time). Trinomial factor models are used in many fields, including biology, economics, statistics and political science. In addition to dividing data into manageable groups, it can also be used for prediction. For example, if you have 5 test subjects with different scores on a test, then you could use a trinomial model to predict their average score for all subjects (not just one). The values that go into the model have to be known beforehand. For example, if you want to know what the average score for all subjects will be, then you would use the values from those 5 subjects. If you wanted to know what the average score would be for each subject individually, then this would require that you know the values from each individual subject. A trinomial model requires three classes: class 1: observations; class 2: predictors; and class 3: response. The model will be applied in such a way as to partition these classes into two or more subsets classified as One of the best things you can do is to practice. This means that you should try to answer math questions every day. The more practice you get, the better you will become at math. You can also find other ways to practice math, such as by playing games on your phone or tablet. Another thing that you can do is to use a calculator whenever possible. It may seem like math doesn’t need a calculator, but in reality, it does! Not all problems require exact numbers, but they still need to be exact enough so that they can be solved with a calculator. This is an amazing app! It shows you how to do each problem and the only downside is that it can’t comprehend story problems which would be a beautiful feature ha-ha! But overall, it is very helpful and helps me understand things I wouldn’t in class!
823
4,111
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
4.21875
4
CC-MAIN-2022-49
latest
en
0.934648
http://mathhelpforum.com/trigonometry/116260-radians.html
1,508,313,423,000,000,000
text/html
crawl-data/CC-MAIN-2017-43/segments/1508187822822.66/warc/CC-MAIN-20171018070528-20171018090528-00471.warc.gz
210,895,856
9,678
I'm not sure how to do the following question Express in degrees the angle whose radian measure is 0.5 $2 \pi$ radians is equal to 360 degrees. Therefore 1 radian is equal to $\frac{180}{\pi}$ degrees. Therefore 0.5 radians is equal to ....
68
242
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 2, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.828125
3
CC-MAIN-2017-43
longest
en
0.909627
https://www.physicsforums.com/threads/apostol-2-13-15-cavalieri-solids-volume-integration.522856/
1,477,469,033,000,000,000
text/html
crawl-data/CC-MAIN-2016-44/segments/1476988720760.76/warc/CC-MAIN-20161020183840-00042-ip-10-171-6-4.ec2.internal.warc.gz
963,445,614
16,450
# Apostol 2.13 - #15 Cavalieri Solids (Volume Integration) 1. Aug 19, 2011 ### process91 A solid has a circular base of radius 2. Each cross section cut by a plane perpendicular to a fixed diameter is an equilateral triangle. Compute the volume of the solid. First, we find a way to define a the distance of a chord of the circle perpendicular to the fixed diameter. The equation $y=\sqrt(2^2-x)$ from x=-2 to 2 gives half the chord, so 2y is equal to the chord's length. At any point x, the solid's area is an equilateral triangle, so all sides must have length equal to the chord of the circle, or 2y. Now the area of an equilateral triangle with side 2y is equal to $(2y)^2\sqrt(3)/4 = y^2\sqrt(3)$. Substituting for y, we have that $Area(x)=(4-x^2)\sqrt{3}$. Integrating, we find that $\int_{-2}^2 A(x) dx=2\int_0^2 \sqrt{3}(4-x^2) dx = \frac{32\sqrt{3}}{3}$ The problem is that the book has $\frac{16\sqrt{3}}{3}$, and I want to make sure I didn't do it incorrectly. 2. Aug 19, 2011 ### dynamicsolo Check the area of your triangular cross-sections again. If the base is 2y , what is the height? 3. Aug 19, 2011 ### process91 I did it by dropping an angle bisector from the top vertex of the equilateral triangle to create two right triangles. Then the base is y, and the hypotenuse is 2y. The pythagorean theorem yields the height equal to $\sqrt{(2y)^2-y^2}=y\sqrt{3}$, so the area of this right triangle is $\frac{1}{2}y \times y\sqrt{3}$; however this is just one half of the area of the equilateral triangle. Therefore the area of the equilateral triangle is $y^2\sqrt{3}$. This agrees with the formula for the area of an equilateral triangle given here: http://www.mathwords.com/a/area_equilateral_triangle.htm Taking $s=2y$, we have that the area is equal to $\frac{(2y)^2\sqrt{3}}{4}=y^2\sqrt{3}$. 4. Aug 19, 2011 ### dynamicsolo Sorry, yes: my fault for trying to deal with more than one matter at once. I am wondering if the solver for Apostol used symmetry and forgot to double the volume integration. I am getting the same answer you are. Stewart does this as Example 7 in Section 6.2 with a radius of 1 and gets one-eighth our volume, which is consistent. Back-of-the-book answers aren't 100%... 5. Aug 19, 2011 ### process91 Glad to see that you're getting the same answer as me. I felt pretty solid about this one, but Apostol's answers in the back are better percentage-wise than any other book I've seen. I've done every problem through the first 200 pages or so and only come up with a few legitimate discrepancies. 6. Aug 19, 2011 ### dynamicsolo What edition is Apostol up to now? Generally, Third and later Editions have the error rates in the answer sections down to about 0.25% or less... 7. Aug 19, 2011 ### process91 The most recent edition is the second, and it's from the 1960s. I don't think any new ones will be out any time soon, but it's a really solid text. 8. Aug 19, 2011 ### dynamicsolo Well, it's supposed to be a classic. But I suspect the percentage of errors in the answers could be somewhere in the 0.25% to 0.5% range (from my long experience with textbooks)... I looked Apostol up and he's 88 this year. I doubt he's going to revise the book (though I've been surprised in the past); he's moved on to other projects.
940
3,292
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
4.375
4
CC-MAIN-2016-44
longest
en
0.844493
https://life-improver.com/rpg/rpg-how-does-vulnerable-work-with-the-same-resistance/
1,680,243,955,000,000,000
text/html
crawl-data/CC-MAIN-2023-14/segments/1679296949573.84/warc/CC-MAIN-20230331051439-20230331081439-00751.warc.gz
419,106,027
5,752
# [RPG] How does Vulnerable work with the same Resistance damage-resistancednd-4e Suppose the same creature has resist 10 lightning and vulnerable 5 lightning at the same time. I've been told that the rules are that both of them apply and neither are cumulative; I assumed that this meant that you take whichever one is larger. However, is it possible that the sum total of this would be resist 5 lightning, more or less like 10 + (-5) being equal to 5?
110
455
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.8125
3
CC-MAIN-2023-14
latest
en
0.97571
https://bitcoin-arcade.com/6/60-000-pesos-to-dollars/
1,618,198,208,000,000,000
text/html
crawl-data/CC-MAIN-2021-17/segments/1618038066568.16/warc/CC-MAIN-20210412023359-20210412053359-00469.warc.gz
247,686,946
15,969
60 000 pesos to dollars Philippines: how far can \$1 go? 61000 ARS to USD 61500 ARS to USD 62000 ARS to USD 62500 ARS to USD 63000 ARS to USD 63500 ARS to USD 64000 ARS to USD 64500 ARS to USD 65000 ARS to USD 65500 ARS to USD 65000 ARS to USD 65500 ARS to USD 65000 ARS to USD 65500 ARS to USD 65000 ARS to USD 65500 ARS to USD 65000 ARS to USD 65500 ARS to USD 6 1 ARS is equal to \$0.01 USD. 1 ARS is equal to 1 USD. 0.02 USD = 2 ARS 2 USD = 2 ARS 0.05 USD = 5 ARS 5 ARS is equal to \$5 USD. 10 ARS is equal to \$0.11 USD. 10 ARS is equal to 10 USD. 0.16 USD = 15 ARS 15 ARS is equal to 15 USD. 0.22 USD = 20 ARS 20 ARS is equal to 20 USD. 0.27 USD = 25 ARS 25 ARS is equal to 25 USD. 0.54 USD = 50 ARS 50 ARS is equal to 50 USD. 1.08 USD = 100 ARS A hundred ARS is equal to one hundred dollars. 1.63 USD = 150 ARS 150 ARS is equal to \$150 USD. 2.17 USD = 200 ARS 500 ARS = 5.42 USD 200 ARS = 200 USD 200 ARS = 200 USD 200 ARS = 200 USD 200 ARS = 200 USD 200 500 ARS is equal to 500 USD. The exchange rate for 1000 ARS is 10.84 USD. 1000 ARS is equal to 1000 USD. What can \$100 get in the philippines ?? 1 MXN = SEK 4 MXN = SEK 7 MXN = SEK 9 MXN = SEK 10 MXN = SEK 15 MXN = SEK 20 MXN = SEK 25 MXN = SEK 30 MXN = SEK 20 MXN = SEK 40 MXN = SEK 50 MXN = SEK 80 MXN = SEK 132 MXN = SEK 242 MXN = SEK 308 MXN = SEK 464 MXN = SEK 528 MXN = SEK 660 MXN = SEK 726 MXN = SEK 1144 MXN = SEK 1452 MXN = SEK 1694 MXN = SEK 1694 MXN Sterling Pounds (GBP) Euros are a form of currency (EUR) Dollars (USD) (USD) Will Dollars Make a Difference? (CAD) Dollars of Australia (AUD) Francs suisse (CHF) Danish Krone (DKK) (DKK) Krones are the currency of Norway (NOK) Kronas are Swedish currency (SEK) Zloty is a Polish currency (PLN) Yenes in Hong Kong Dollars (HKD) (JPY) Dollars of Singapore (SGD) Wons from South Korea (KRW) Rupees of India (INR) Rupiahs are the currency of Indonesia (IDR) Ringgits are the Malaysian currency (MYR) Saudi Riyals are the currency of Saudi Arabia (SAR) Dirhams of the United Arab Emirates (AED) Rand (South African currency) (ZAR) Currency exchange rates – how to convert currency Experiment for different numbers. 60050 Canadian Dollars to Mexican Pesos 60100 Canadian Dollars to Mexican Pesos 60150 Canadian Dollars to Mexican Pesos 60200 Canadian Dollars to Mexican Pesos 60250 Canadian Dollars to Mexican Pesos 120500 Canadian Dollars to Mexican Pesos 241000 Canadian Dollars to Mexican Pesos 482000 Canadian Dollars to Mexican Pesos Philippine peso to dollar 99.66 US Dollar to Thai Baht0.0001 Blackmoon Crypto to Russian Ruble0.01 Blackmoon Crypto to Russian Ruble0.01 Blackmoon Crypto to Russian Ruble0.01 Blackmoon Crypto to Russian Ruble0.01 Blackmoon Crypto to Russian Ruble0.01 Blackmoon Crypto to Russian Ruble0.01 Blackmoon Crypto to Russian Ruble0.01 Blackmoon Crypto to Russian Ruble0.01 Blackmoon Crypto to Russian Ruble0.01 Blackm Living cheap in mexico: what can you eat for under 100 1 US Dollar = 1 Zambian Kwacha Top smartphones under php25,000 (late 2019) 0.01 Indian Rupee to Multibot How to earn money at home (20000-60000 pesos) 264000 Hong Kong Dollar to US Dollar1 South African Rand to US Dollar700 South African Rand to US Dollar1 Multibot to Indian Rupee350 US Dollar to South African Rand4500 South African Rand to US Dollar4500 South African Rand to US Dollar4500 South African Rand to US Dollar4500 South African Rand to US Dollar4500 South African Rand to US Dollar4500 South African Rand to US Dollar4500 South African Rand to US Dollar4500 South How to convert dollars and cents PayPal announced on Tuesday that its US customers will use their cryptocurrency holdings to pay for goods and services at millions of its online merchants around the world. Customers will be able to convert bitcoin and ether, among other cryptocurrencies, to fiat currencies at checkout. CME Group, the world’s largest derivatives marketplace, also announced on Tuesday that it is adding micro bitcoin futures to its crypto offerings. These will be a tenth of the size of a bitcoin, enabling investors to exchange bitcoin at a lower rate, and will begin trading on May 3, subject to regulatory approval. Meanwhile, Visa announced on Monday that it will accept USD Coin for payment settlement on its platform through Crypto.com. Later this year, the payments company expects to open up the same path to more partners. Bitcoin has increased by about 103 percent this year, and has increased by more than 600 percent in the last year. In February, it smashed through the \$1 trillion market capitalization barrier for the first time, as well as the \$60,000 barrier. Bitcoin detractors have long said that the blockchain is a financial bubble about to burst, with no intrinsic value. Advocates, on the other hand, disagree. “I don’t believe bitcoin or cryptocurrencies are in a bubble,” John Wu, president of AVA Labs, informed Insider. “On an annualized basis, Bitcoin volatility is just under 100%. As a result, we have a very unpredictable asset class.” Posted in 6
1,478
5,019
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.8125
3
CC-MAIN-2021-17
latest
en
0.728503
https://www.acmicpc.net/problem/10034
1,503,243,517,000,000,000
text/html
crawl-data/CC-MAIN-2017-34/segments/1502886106779.68/warc/CC-MAIN-20170820150632-20170820170632-00707.warc.gz
829,194,615
15,341
시간 제한 메모리 제한 제출 정답 맞은 사람 정답 비율 1 초 128 MB 12 3 3 25.000% ## 문제 Farmer John's cows are on a road trip! The odometer on their car displays an integer mileage value, starting at X (100 <= X <= 10^18) miles at the beginning of their trip and ending at Y (X <= Y <= 10^18) miles at the end of their trip. Whenever the odometer displays an 'interesting' number (including at the start and end of the trip) the cows will moo. A number is 'interesting' if when you look at all its digits except for leading zeros, at least half of these should be the same. For example, the numbers 3223 and 110 are interesting, while the numbers 97791 and 123 are not. Help FJ count how many times the cows will moo during the trip. ## 입력 • Line 1: The first line will contain two integers, X and Y, separated by a space. ## 출력 • Line 1: A single integer containing how many times the cows will moo during the trip. ## 예제 입력 110 133 ## 예제 출력 14 ## 힌트 #### Input Details The trip starts with the odometer at 110 and ends at 133. #### Output Details The cows moo when the odometer reads 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 121, 122, 131, and 133.
344
1,152
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.5625
3
CC-MAIN-2017-34
longest
en
0.824469
https://forums.sketchup.com/t/subtracting-complicated-geometries/21064
1,713,112,314,000,000,000
text/html
crawl-data/CC-MAIN-2024-18/segments/1712296816893.19/warc/CC-MAIN-20240414161724-20240414191724-00599.warc.gz
237,616,343
7,807
# Subtracting complicated geometries I have a very complicated series of loops that I built from a series of short joined DXF lines (See attached image. I then used lines2tubes (an extension) to give these loops some depth. I need to engrave these loops into the surface of a cylinder, however, the angles between the line segments I have get very complicated, so the shapes that i generate are never actually solids (they have a bunch of internal geometry from where the cylinders meet.) Is there a better way to go about etching these lines into the cylinder, or is there something else I need to be considering? I could go about trying to rebuild and patch the bad geometry, however there are too many pieces for this to be reasonable. I also tried smoothing the DXF line before applying lines2tubes, but it moves my lines too far from where they need to be. The endgame goal is to 3d print (or engrave these lines in an existing pipe stock) this piece, so that it can be used as a guide for wires, so that when we run a high current through them they generate a very specific magnetic field. How big is it? Try scaling it up by 100 or maybe 1000 before creating the “engraving”. That doesn’t seem to help. I still have the same bad couple of corners. Is there a better way to give these lines some depth? Are the lines just single lines? If so, they have no width. You’ll need to make narrowly spaced concentric rings. Then you could use something like Fredo’s Joint Push/Pull to push in the surface between the lines. You ought to turn off end points and extensions. It’ll make it easier to see what is going on. can you post your skp and the curves dxf so it’e easier to suggest alternate approaches? john Don’t use lines to tubes, use follow me. John, Attached is a simpler model (it only has 16 loops, instead of 30) Coil4help.skp (279.9 KB) I think you’d be better served by doing it all in SU using Fredo’s plugins like Curvisard, BesierSpline and Tools on Surface… also, definitely model at scale, i.e. use a meters template and pretend it’s mm… I would also only model a solid quadrant and rotate copies… john If you use the Follow Me tool to create the tubes (as @Box suggests) and make components of the results, it will import into MakerBot even though it’s not technically a SketchUp solid. However, this produces raised lines on the part and not grooves, so maybe this approach won’t work for you. Rendered with KerkyThea: Imported into MakerBot: Regardless of the approach, I think that printing this small of a detail on the side of a cylinder will be problematic at best. [In a former life, I would mill this on a 4-axis CNC machine] Here I have done one loop. Scaled up by 100. Exploded the nesting. Exploded the loops. Ran Curvisard on the loops to weld them. Used follow me on a circle ( used a default 24 but you’d probably do better with less) to create one loop and repaired the tiny gap at the joint. Positioned and in intersected the loop (within the cylinder group) and deleted the excess. Resulting in a cylinder with a groove in it. Rinse and repeat. Personally I would smooth out your contours to remove the sharp angles, unless they are truly specific. I had no trouble making the loop solid and used subtract from solid tools, but I did it with intersect also as you have Make. Coil4help.skp (844.1 KB) 1 Like Thanks so much for the advice, guys. I had some luck with Fredo’s Curviloft, but now I’m having trouble navigating how to get the tubes subtracted. When I use the intersect with model between a loop and my cylinder, it seems that I only get a line on each side of the tube, and the connections are sketchy at best, so I frequently end up deleting more of the cylinder than I want to…Any tips for how best to make this work?
926
3,829
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.515625
3
CC-MAIN-2024-18
latest
en
0.957728
http://mathforum.org/mathtools/discuss.html?context=cell&do=r&msg=19171
1,511,239,379,000,000,000
text/html
crawl-data/CC-MAIN-2017-47/segments/1510934806316.80/warc/CC-MAIN-20171121040104-20171121060104-00542.warc.gz
201,644,862
6,183
You are not logged in. Discussion: All Topics in Geometry Topic: Is a rhombus a kite? Post a new topic to the All Content in Geometry discussion << see all messages in this topic < previous message | next message > Subject: RE: Is an integer rational? (was RE: Is a rhombus a kite?) Author: gsw Date: Jun 7 2005 On Jun  6 2005, Alan Cooper wrote: > Well this makes me wonder what are > *the* integers? > Surely their image in the rationals does satisfy > all of the axioms required of an instance of the integer number > system. In fact, as for any axiomatic system, even when the > axioms define a structure that is unique up to isomorphism there may > be many distinct instances of that structure. (There are at least a > couple of models of the integers constructible within Zermelo- > Frankel set theory and any of these could be used to generate a > model of the rationals by taking ordered pairs, but there are also > other models of both number systems that might be constructed from > other choices of "fundamental" system.) of course the subset of rationals that the integers map to is isometric to the integers - or it wouldn't map. But that dont make integers rationals. You might think I'm splitting hairs - but actually I'm being pedantic.... > To my mind, 'Mathman' is > right, and if an embedding exists then it is correct to say that one > structure "is contained in" (or even "is a subset of") the other. > Alan not an embedding, a mapping. P.S. (back to Rhombuses and Kites - well actually > Squares and Rectangles) gsw also wrote: > Kids always complain > > when you tell them that a square is a rectangle, and usually > want > to add an exclusive clause to the definition of rectangle when > > you point out to them that the normal definition includes squares. > Has anyone studied or considered why kids naturally tend to do this > even though they have no difficulty with the idea that a cat is an > animal? Is it based on something fundamental about how we perceive > shapes, is it because there is only one named subclass of > rectangles, or is it just a result of how the material is first > presented to them? This is interesting, I think (unlike all that Foundations nonesense...)  I think it's partly because the first activity kids do with shapes is categorizing them - sorting them into piles - which is done in a way that implies exlusivity. And there also seems to be some piece of wiring that needs to be reminded that "All A's are B's" != "All B's are A's" Mostimportantly, it ties into the social nature of language - if you ask someone how much money they have on them and they say \$10, that implies, given our social habits, that they don't have \$20. But of course, if they have \$20 they do have \$10... If you ask someone to say what the design is on their new carpet and they say "ellipses" you would reasonably be surprised if it turned out to be circles - even though circles are ellipses. Describing a square as a rectangle breaks the usual contract. It's important to explain this in a way that doesn't make math seem just contrary and anti-social. (But hands up all those who were attracted to math for that very reason?)
781
3,183
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.796875
3
CC-MAIN-2017-47
longest
en
0.933588
http://www.jiskha.com/display.cgi?id=1236128193
1,498,150,993,000,000,000
text/html
crawl-data/CC-MAIN-2017-26/segments/1498128319636.73/warc/CC-MAIN-20170622161445-20170622181445-00384.warc.gz
583,503,287
3,781
# math 116 posted by on . A Coast-Guard patrol boat travels 4 hr on a trip downstream with a 6-mph current. The return trip against the same current takes 5 hr. Find the speed of the boat in still water • math 116 - , The downstream speed with respect to land is V+6 and the upstream speed is V-6, where V is the speed in still water. Let the distance be D D/(V+6) = 4 hrs D/(V-6) = 5 4(V+6) = 5(V-6) V = 54 mph
134
417
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.78125
4
CC-MAIN-2017-26
latest
en
0.889046
https://gmatclub.com/forum/a-right-circular-cylinder-of-height-1-meter-and-radius-9-2-centimeters-241072.html
1,563,485,923,000,000,000
text/html
crawl-data/CC-MAIN-2019-30/segments/1563195525829.33/warc/CC-MAIN-20190718211312-20190718233312-00472.warc.gz
403,474,985
148,536
Summer is Coming! Join the Game of Timers Competition to Win Epic Prizes. Registration is Open. Game starts Mon July 1st. It is currently 18 Jul 2019, 14:38 ### GMAT Club Daily Prep #### Thank you for using the timer - this advanced tool can estimate your performance and suggest more practice questions. We have subscribed you to Daily Prep Questions via email. Customized for You we will pick new questions that match your level based on your Timer History Track every week, we’ll send you an estimated GMAT score based on your performance Practice Pays we will pick new questions that match your level based on your Timer History # A right circular cylinder of height 1 meter and radius 9√2 centimeters Author Message TAGS: ### Hide Tags Manager Joined: 14 Sep 2015 Posts: 64 Location: India GMAT 1: 700 Q45 V40 GPA: 3.41 A right circular cylinder of height 1 meter and radius 9√2 centimeters  [#permalink] ### Show Tags 25 May 2017, 13:42 6 00:00 Difficulty: 75% (hard) Question Stats: 58% (02:50) correct 42% (02:39) wrong based on 62 sessions ### HideShow timer Statistics A right circular cylinder of height 1 meter and radius $$9\sqrt{2}$$ centimeters is to be used to store cubes of side length 3 cm each. If in each layer of cubes stored in the cylinder, the cubes are arranged such that the top view of the layer is a square, what is the maximum number of cubes that can be stored in the cylinder? (1 meter = 100 centimeters) A. 198 B. 200 C. 600 D. 1188 E. 1200 Math Expert Joined: 02 Sep 2009 Posts: 56257 Re: A right circular cylinder of height 1 meter and radius 9√2 centimeters  [#permalink] ### Show Tags 25 May 2017, 14:31 2 niteshwaghray wrote: A right circular cylinder of height 1 meter and radius $$9\sqrt{2}$$ centimetres is to be used to store cubes of side length 3 cm each. If in each layer of cubes stored in the cylinder, the cubes are arranged such that the top view of the layer is a square, what is the maximum number of cubes that can be stored in the cylinder? (1 meter = 100 centimeters) A. 198 B. 200 C. 600 D. 1188 E. 1200 In each layer of cubes stored in the cylinder, the cubes are arranged such that the top view of the layer is a square means that little cubes make a larger square. The largest square, therefore would be an inscribed square in the circle. Largest inscribed square in the circle of radius $$9\sqrt{2}$$, will have the diagonal of twice of that (the diagonal of the inscribed square = the diameter of circle). So, the diagonal of the inscribed square would be $$18\sqrt{2}$$. This gives the side of the square equal to 18. A square 18 by 18, can fit the bases of 6*6 = 36 cubes. The height of 1 meters = 100 centimetres, can fit 33 cubes. Total = 33*36 = 1188 cubes. _________________ Retired Moderator Joined: 27 Oct 2017 Posts: 1230 Location: India GPA: 3.64 WE: Business Development (Energy and Utilities) A right circular cylinder of height 1 meter and radius 9√2 centimeters  [#permalink] ### Show Tags 08 Dec 2018, 10:18 GMATbuster's Weekly Quant Quiz#12 Ques #8 A right circular cylinder of height 1 meter and radius 9√2 centimeters is to be used to store cubes of side length 3 cm each. If in each layer of cubes stored in the cylinder, the cubes are arranged such that the top view of the layer is a square, what is the maximum number of cubes that can be stored in the cylinder? (1 meter = 100 centimeters) A) 198 B) 200 C) 600 D) 1188 E) 1200 _________________ Intern Joined: 16 Jul 2018 Posts: 13 Re: A right circular cylinder of height 1 meter and radius 9√2 centimeters  [#permalink] ### Show Tags 08 Dec 2018, 10:40 The maximum inscribed square has side = 18 cm from Pythagoras theorem. Therefore there are 6x6 = 36 cubes in each layer. 100/3 layers are possible, i.e., 33. Therefore 33x36 = 1188 Director Joined: 13 Mar 2017 Posts: 731 Location: India Concentration: General Management, Entrepreneurship GPA: 3.8 WE: Engineering (Energy and Utilities) Re: A right circular cylinder of height 1 meter and radius 9√2 centimeters  [#permalink] ### Show Tags 09 Dec 2018, 09:39 Height = 1 meter = 100 centimeters Also length of each side of a cube = 3 cm So, First let check maximum number of cube in a layer. Diagonal of he bigger square formed in top view = diameter of circle = 18√2 centimeters Side of he bigger square formed in top view = 18 centimeters So, No. of sqaures in a layer = 6x6 = 36 Now lets calculate the number of layer = Integral value of 100/3 = 33 So Maximum number of square that can be stored in cylinder = 36x33 = 1188 _________________ CAT 2017 (98.95) & 2018 (98.91) : 99th percentiler UPSC Aspirants : Get my app UPSC Important News Reader from Play store. MBA Social Network : WebMaggu Appreciate by Clicking +1 Kudos ( Lets be more generous friends.) What I believe is : "Nothing is Impossible, Even Impossible says I'm Possible" : "Stay Hungry, Stay Foolish". Intern Joined: 24 Jun 2018 Posts: 35 A right circular cylinder of height 1 meter and radius 9√2 centimeters  [#permalink] ### Show Tags 23 Dec 2018, 15:15 Attachment: drawisland.png [ 23.45 KiB | Viewed 610 times ] If the cylinder is observed from top, the diagram of the base can be drawn as above. Since, the cubes are to be arranged in such a way that they appear as a square from top, we can visualise a square with side a as shown above. Now as we can see that the radius of the circle may also be drawn to coincide with the diagonal of the square, we can write \sqrt{2}a = r We know r =9\sqrt{2} Hence we get a=18 ie. the side of square is 18. Since the side of a cube is equal to 3 cm, 18/3 = 6 ie. 6 cubes may fit along one length of the square. 6 x 6 = 36 ie. 36 cubes may fit inside the square inscribed in the circular area of our cylinder. Since the height of the cylinder is 100cm, 100/3 = 33.3 ~ 33. Hence we get 33 complete layers. We already know that 36 cubes fit one layer. Hence 36 x 33 = 1188 GMAT Club Legend Joined: 18 Aug 2017 Posts: 4242 Location: India Concentration: Sustainability, Marketing GPA: 4 WE: Marketing (Energy and Utilities) Re: A right circular cylinder of height 1 meter and radius 9√2 centimeters  [#permalink] ### Show Tags 26 May 2019, 03:08 niteshwaghray wrote: A right circular cylinder of height 1 meter and radius $$9\sqrt{2}$$ centimeters is to be used to store cubes of side length 3 cm each. If in each layer of cubes stored in the cylinder, the cubes are arranged such that the top view of the layer is a square, what is the maximum number of cubes that can be stored in the cylinder? (1 meter = 100 centimeters) A. 198 B. 200 C. 600 D. 1188 E. 1200 the radius of cylinder = 9 √2 ; the digonal of square ; 18√2 twice of the radius of the circle i.e diameter of the circle... side of cube ; s=sV2 ; s= 18 total cubes; 18*18/3*3 = 36 cubes given height 1mtrs = 100cm 100/3 ; 33 cubes so 36*33; 1188 IMO D _________________ If you liked my solution then please give Kudos. Kudos encourage active discussions. Re: A right circular cylinder of height 1 meter and radius 9√2 centimeters   [#permalink] 26 May 2019, 03:08 Display posts from previous: Sort by
2,057
7,127
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
4.25
4
CC-MAIN-2019-30
latest
en
0.842537
https://electronics.stackexchange.com/questions/tagged/transfer-function
1,653,285,813,000,000,000
text/html
crawl-data/CC-MAIN-2022-21/segments/1652662555558.23/warc/CC-MAIN-20220523041156-20220523071156-00241.warc.gz
293,911,434
74,446
# Questions tagged [transfer-function] Anything related to transfer functions, i.e. those complex-valued functions of a complex variable used to model mathematically the I/O relationship of linear time-invariant initially-at-rest system using Laplace transform. 836 questions Filter by Sorted by Tagged with 50 views ### Question on showing function in a control system block I am trying to add a function to a control system diagram. I have a constant F being fed back into the diagram but it depends on a signal 'v'. In equations I directly used a signum function but for ... 74 views ### Is the resonant frequency as defined for a second order transfer function same as the frequency at resonance in a second order electrical circuit? When I studied second order system and thus natural, damped and resonant frequency I wondered if the resonant frequency I find using the formula $w _ { r } = w _ { n } \sqrt { 1 - 2 D ^ { 2 } }$ ... 52 views ### Why is each time constant in this transfer function equal to "open circuit" time constant not "short circuit" time constant? From this lecture notes, the open circuit time constant approximation: How to prove the yellow part? The lecture note mentioned Richard Adler at MIT did prove this but I could not able to find his ... • 1,531 1 vote 95 views ### Transfer Function Involving Transformer I am interested in how you would approach finding the transfer function of a circuit that involves a linear transformer. For example, the circuit below simulate this circuit – Schematic created ... 58 views ### Designing a system with desired transfer function Suppose we have a transfer function H(s) = 1/s+1. Is there a general method by which using electric components we can construct such a system? • 467 1 vote 53 views ### Control system transfer function I tried to find the transfer function F(s) = C(s)/R(s) through block manipulation and through Mason's Formula but the results are not matching. Where is the mistake? 200 views ### Techniques for taking a transfer function and finding its resonance frequency I've been doing problems out of my circuit analysis textbook by Ulaby, Fawwazz et al. to study for an upcoming exam. I am having serious trouble producing a transfer function and with such finding the ... 60 views ### Filter signal on oscilloscope using math functions Is there a way to do a time domain low pass filter, in software, on a Siglent SDS2000X Plus series oscilloscope using these math functions? The ones that stand out the most are ... • 544 47 views ### Transcieving 125khz rf signal Does anybody know if it is possible to connect a coaxial cable to the tx rx pins on a wireless module ? I am trying to send data via coaxial RG58 cable. Or would a twisted pair be more suitable? I ... 87 views ### Finding Transfer Function of RL Circuit Can anyone help me to find TF of this RL Circuit 1 vote 88 views ### Transfer function for Sallen-Key notch filter How can I find the transfer function H(s) of the following circuit by using impedance of the circuit? Should I instead do it by nodal analysis? • 13 1 vote 28 views ### Integral Compensator Realization from Transfer Function I am trying to design a compensator based on the transfer function below Gc(s)=(1.017E6s+4.969E9)/(s^2+4.884E5s) The transfer function has a constant that is offsetting circuit gain, followed by a low ... • 13 59 views ### Q: How to read this bode plot to find transfer function? I am working on a blackbox system and I would like to make a Bode plot to find its transfer function. Since I do not have a lot of information on the system I am trying to input a series of sin waves ... 86 views ### Complex transfer function complex conjugate issue Going through Wireless Communication Electronics from Robert Sobot and came across the following. It's repeated throughout the book so my initial thought that it could be an error doesn't seem likely.... • 23 1 vote 64 views ### HAL USB device library transfer complete callback (USB MIDI adaption) I have been able to implement Zac Rogers USB MIDI adaptation of the Cube MX USB device middleware: https://github.com/zacrogers/blue_usb_midi I can send USB midi messages on both F103 and F411. It ... 72 views ### Transfer Function for active filter Not too sure why, but I can’t seem to figure out the transfer function for the filter in question below. Can’t isolate Vout/Vin fully, there’s a straggling Vin that I can’t move over. Any thoughts? • 11 128 views ### Vout/Vin equation in time domain vs frequency domain While studying op-amp circuits, I notice a pattern between the Vout/Vin equation in time domain and the frequency domain. For example: (images from Microelectronics by Bahzad Razavi, 2/e) The ... 38 views ### How to control voltage at the output of the LC filter connected to the three phase inverter? Let's say I have a three phase voltage source inverter with the LC filter connected to its output i.e. I have following circuit Suppose that my primary goal is to control the voltage at the output of ... • 845 1 vote 70 views ### How to reconstruct phase data from magnitude only data in a frequency domain function? I'm trying to do a reverse-engineering a software called LEAP from the now disappeared LinearX company. This software can perform an operation called "Minimum Phase Transform". This takes ... 122 views ### Are systems with more zeroes than poles really non-realizable? I have learnt that systems whose transfer functions have more zeroes than poles become non-causal and thus non-realizable in practice for real time implementation. But an op-amp differentiator, like ... • 209 1 vote 161 views ### Transfer Function of 2 Loop RLC Circuit simulate this circuit – Schematic created using CircuitLab For a problem I am tasked with finding the transfer function $G(s) = \frac{V_o(s)}{V_i(s)}$. I am having trouble defining the 2nd ... 1 vote 114 views • 23 58 views ### Electrical and Control Engineering: Transfer Function Reduction problem I am having a problem with calculating transfer function given with a picture below i need to calculate output Y(p) / input V(p) analitically. Here are the equasions, i dont know the equasion for a ... 1 vote From an electrical circuit, I have the transfer function as: $$H(s) = \frac{R}{sRC + 1}$$ $V$ is the output and I is the input so we can write: $$V(s) = I(s) H(s)$$ or $$V(jω) = I(jω) H(jω)$$ ...
1,479
6,433
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 2, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.046875
3
CC-MAIN-2022-21
longest
en
0.894394
http://list.seqfan.eu/pipermail/seqfan/2021-April/073192.html
1,713,948,199,000,000,000
text/html
crawl-data/CC-MAIN-2024-18/segments/1712296819089.82/warc/CC-MAIN-20240424080812-20240424110812-00717.warc.gz
21,203,687
3,057
# [seqfan] Re: Planar distributive lattices David Seal david.j.seal at gwynmop.com Wed Apr 21 12:24:44 CEST 2021 ```With regard to question (a), I think the Wikipedia pages https://en.wikipedia.org/wiki/Distributive_lattice and https://en.wikipedia.org/wiki/Hasse_diagram contain the required information. My only uncertainty is whether 'planar' is being used as shorthand for 'that has an upward planar Hasse diagram', but the poster clearly shows Hasse diagrams that are upward planar, and the diagrams do not include one of the subsets of a 3-element set {x,y,z} ordered by subset inclusion. That lattice has a Hasse diagram which is the skeleton of a cube (see https://en.wikipedia.org/wiki/Partially_ordered_set#/media/File:Hasse_diagram_of_powerset_of_3.svg), and the skeleton of a cube is of course a planar graph - but drawing it in a planar fashion necessarily causes it to violate the 'upward' property of Hasse diagrams and so it isn't upward planar. So the fact that the poster's diagrams do not include the skeleton of a cube tends to support my belief that in 'planar distributive lattice', 'planar' is referring to upward planarity of the Hasse diagram and not just planarity of a diagram. With regard to question (c), I've checked Dr. Jipsen's poster for numbering the diagrams correctly and the diagrams being listed in non-decreasing order of their vertex count (so that all listed diagrams with 1 vertex come before all listed diagrams with 2 vertices, which come before all listed diagrams with 3 vertices, etc). That means that the sequence can be determined by a(n) = (number of first diagram with n+1 vertices) - (number of first diagram with n vertices). Doing that does indeed confirm your counts. With regard to question (d), the decision seems to have already been made to include it in the OEIS - and for what little it's worth, I completely agree with that decision. With regard to question (b), I'm afraid I cannot currently confirm whether Dr. Jipson has enumerated them correctly, but I didn't notice any obvious problems such as duplicates or clear omissions. David > On 15/04/2021 02:08 Allan Wechsler <acwacw at gmail.com> wrote: > > > I forget how I stumbled on this: > https://math.chapman.edu/~jipsen/mathposters/Planar%20distributive%20lattices%20up%20to%20size%2011.pdf > . > > It is a chart purporting to show all of the planar distributive lattices > with up to 11 vertices. Like any true-hearted sequence fanatic I counted > the number of these guys of each order, and got the following sequence: > > 1,1,1,2,3,5,8,14,24,42,72... > > Imagine my surprise at finding this sequence missing from OEIS! The author > is apparently Dr. Peter Jipsen, at Chapman University in California. > > Perhaps someone here can figure out (a) what a planar distributive lattice > is, (b) whether Dr. Jipsen enumerated them correctly, (c) whether I counted > them off Jipsen's poster correctly, and (d) whether to add the sequence. > > Thank you! > > -- > Seqfan Mailing list - http://list.seqfan.eu/ ```
769
3,035
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.984375
3
CC-MAIN-2024-18
latest
en
0.930089
https://gmatclub.com/forum/a-drawer-contains-8-socks-and-2-socks-are-selected-at-86757.html?sort_by_oldest=true
1,511,469,787,000,000,000
text/html
crawl-data/CC-MAIN-2017-47/segments/1510934806939.98/warc/CC-MAIN-20171123195711-20171123215711-00304.warc.gz
605,179,960
53,553
It is currently 23 Nov 2017, 13:43 ### GMAT Club Daily Prep #### Thank you for using the timer - this advanced tool can estimate your performance and suggest more practice questions. We have subscribed you to Daily Prep Questions via email. Customized for You we will pick new questions that match your level based on your Timer History Track every week, we’ll send you an estimated GMAT score based on your performance Practice Pays we will pick new questions that match your level based on your Timer History # Events & Promotions ###### Events & Promotions in June Open Detailed Calendar # A drawer contains 8 socks, and 2 socks are selected at Author Message TAGS: ### Hide Tags Director Joined: 01 Apr 2008 Posts: 872 Kudos [?]: 861 [5], given: 18 Name: Ronak Amin Schools: IIM Lucknow (IPMX) - Class of 2014 A drawer contains 8 socks, and 2 socks are selected at [#permalink] ### Show Tags 12 Nov 2009, 09:35 5 KUDOS 25 This post was BOOKMARKED 00:00 Difficulty: 85% (hard) Question Stats: 42% (01:14) correct 58% (00:59) wrong based on 667 sessions ### HideShow timer Statistics A drawer contains 8 socks, and 2 socks are selected at random without replacement. What is the probability that both socks are black? (1) The probability is less than 0.2 that the first sock is black. (2) The probability is more than 0.8 that the first sock is white. [Reveal] Spoiler: OA Kudos [?]: 861 [5], given: 18 Math Expert Joined: 02 Sep 2009 Posts: 42338 Kudos [?]: 133142 [12], given: 12415 ### Show Tags 12 Nov 2009, 09:54 12 KUDOS Expert's post 10 This post was BOOKMARKED Economist wrote: A drawer contains 8 socks, and 2 socks are selected at random without replacement. What is the probability that both socks are black? (1) The probability is less than 0.2 that the first sock is black. (2) The probability is more than 0.8 that the first sock is white. I suppose 8 socks means, that there are 8 and not 8 pairs. Note that we don't know if there is any # of black socks is the drawer. Let B be the # of black socks. And W the # of white socks. (1) $$\frac{B}{8}<0.2$$ --> $$B<1.6$$, so there can be 1 or 0 black socks in the drawer. In any case as the # is less then 2 the probability of picking 2 black socks is 0. Sufficient. (2) $$\frac{W}{8}>0.8$$ --> $$W>6.4$$, so there are 7 or 8 white socks in the drawer. As the maximum possible # of black socks is 1, thus the probability of 2 blacks is 0. Sufficient. _________________ Kudos [?]: 133142 [12], given: 12415 Senior Manager Joined: 20 Jul 2010 Posts: 256 Kudos [?]: 103 [0], given: 9 ### Show Tags 14 Sep 2010, 16:03 I had initially thought answer as D but later changed to A(my mistake). My thought process was that with second choice I don't know if remaining sock is black or not. I failed to calculate that the answer of two draws would be zero in this case also _________________ If you like my post, consider giving me some KUDOS !!!!! Like you I need them Kudos [?]: 103 [0], given: 9 Veritas Prep GMAT Instructor Joined: 16 Oct 2010 Posts: 7747 Kudos [?]: 17869 [0], given: 235 Location: Pune, India Re: Black and White Socks [#permalink] ### Show Tags 27 Nov 2010, 21:49 udaymathapati wrote: A drawer contains 8 socks, and 2 socks are selected at random without replacement. What is the probability that both socks are black? (1) The probability is less than 0.2 that the first sock is black. (2) The probability is more than 0.8 that the first sock is white. How about I tell you on what lines to think and perhaps you can arrive at the answer? Let's say there are x black socks in the drawer. When I pick the first one, the probability of picking a black sock is x/8 which is less than 1/5. Any ideas? _________________ Karishma Veritas Prep | GMAT Instructor My Blog Get started with Veritas Prep GMAT On Demand for \$199 Veritas Prep Reviews Kudos [?]: 17869 [0], given: 235 Math Expert Joined: 02 Sep 2009 Posts: 42338 Kudos [?]: 133142 [0], given: 12415 Re: A drawer contains 8 socks, and 2 socks are selected at [#permalink] ### Show Tags 07 Jun 2013, 06:12 Bumping for review and further discussion*. Get a kudos point for an alternative solution! *New project from GMAT Club!!! Check HERE Theory on probability problems: math-probability-87244.html All DS probability problems to practice: search.php?search_id=tag&tag_id=33 All PS probability problems to practice: search.php?search_id=tag&tag_id=54 Tough probability questions: hardest-area-questions-probability-and-combinations-101361.html _________________ Kudos [?]: 133142 [0], given: 12415 Intern Joined: 24 Jun 2014 Posts: 1 Kudos [?]: [0], given: 0 Re: A drawer contains 8 socks, and 2 socks are selected at [#permalink] ### Show Tags 27 Jul 2014, 06:19 Hi Bunuel, Why is that in this problem the socks are assumed to be either white or black? No where in the question is it mentioned that the socks are either white or black. So, shouldn't the answer be E? Kudos [?]: [0], given: 0 Math Expert Joined: 02 Sep 2009 Posts: 42338 Kudos [?]: 133142 [1], given: 12415 Re: A drawer contains 8 socks, and 2 socks are selected at [#permalink] ### Show Tags 27 Jul 2014, 15:51 1 KUDOS Expert's post 1 This post was BOOKMARKED dilipnair27 wrote: Hi Bunuel, Why is that in this problem the socks are assumed to be either white or black? No where in the question is it mentioned that the socks are either white or black. So, shouldn't the answer be E? We are not assuming that. From (1) we have that there could be 1 or 0 black socks in the drawer (for example, there could be 1 black and 7 red socks, or all red socks). No, matter which it is, the probability of picking 2 black socks is 0. From (2) we have that there could be 7 or 8 white socks in the drawer (for example, there could be 7 white and 1 green, or 7 white and 1 black, or 8 white socks). So, there could be at most 1 black sock (0 or 1). So, in any case the probability of picking 2 black socks is 0. The bottom line is that, from each statement we have that the maximum number of black socks is 1, which makes the probability of picking 2 black socks equal 0. Does this make sense? _________________ Kudos [?]: 133142 [1], given: 12415 Senior Manager Joined: 18 Jun 2016 Posts: 267 Kudos [?]: 187 [0], given: 103 Location: India GMAT 1: 720 Q50 V38 GMAT 2: 750 Q49 V42 GPA: 4 WE: General Management (Other) Re: A drawer contains 8 socks, and 2 socks are selected at [#permalink] ### Show Tags 13 Sep 2016, 05:11 Economist wrote: A drawer contains 8 socks, and 2 socks are selected at random without replacement. What is the probability that both socks are black? (1) The probability is less than 0.2 that the first sock is black. (2) The probability is more than 0.8 that the first sock is white. Number of Socks HAS to be an integer. Therefore, (1) & (2) are saying the same thing because $$\frac{(Favorable Cases)}{(Total Number of Cases)} = Probability$$ => Favorable Cases = Probability * Total Number of Cases => Number of Ways to pick a Black Sock in 1st Attempt = Prob. * 8 (1) The probability is less than 0.2 that the first sock is black. => Prob of 1at Black sock < 0.2 (2) The probability is more than 0.8 that the first sock is white. => Prob of 1st white > 0.8 => Prob of 1st sock of any color other than white < 0.2 Therefore, Prob of 1st Black Sock < 0.2 Hence, the answer cannot be anything but D or E. D if the statement is sufficient otherwise E. Now, Number of Ways to pick a Black Sock in 1st Attempt < 0.2 * 8 < 1.6 Hence, we have 0 or 1 Black sock. _________________ I'd appreciate learning about the grammatical errors in my posts Please hit Kudos If my Solution helps My Debrief for 750 - https://gmatclub.com/forum/from-720-to-750-one-of-the-most-difficult-pleatues-to-overcome-246420.html My CR notes - https://gmatclub.com/forum/patterns-in-cr-questions-243450.html Rest of the Notes coming soon. Kudos [?]: 187 [0], given: 103 Non-Human User Joined: 09 Sep 2013 Posts: 15508 Kudos [?]: 283 [0], given: 0 Re: A drawer contains 8 socks, and 2 socks are selected at [#permalink] ### Show Tags 31 Oct 2017, 17:03 Hello from the GMAT Club BumpBot! Thanks to another GMAT Club member, I have just discovered this valuable topic, yet it had no discussion for over a year. I am now bumping it up - doing my job. I think you may find it valuable (esp those replies with Kudos). Want to see all other topics I dig out? Follow me (click follow button on profile). You will receive a summary of all topics I bump in your profile area as well as via email. _________________ Kudos [?]: 283 [0], given: 0 Re: A drawer contains 8 socks, and 2 socks are selected at   [#permalink] 31 Oct 2017, 17:03 Display posts from previous: Sort by
2,555
8,758
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
4.1875
4
CC-MAIN-2017-47
latest
en
0.920421
https://www.slideserve.com/zanta/review-of-vector-analysis
1,540,183,949,000,000,000
text/html
crawl-data/CC-MAIN-2018-43/segments/1539583514497.14/warc/CC-MAIN-20181022025852-20181022051352-00259.warc.gz
1,076,265,483
18,119
Review of Vector Analysis 1 / 56 # Review of Vector Analysis - PowerPoint PPT Presentation Review of Vector Analysis. Review of Vector Analysis Vector analysis is a mathematical tool with which electromagnetic (EM) concepts are most conveniently expressed and best comprehended. A quantity is called a scalar if it has only magnitude (e.g., I am the owner, or an agent authorized to act on behalf of the owner, of the copyrighted work described. ## PowerPoint Slideshow about 'Review of Vector Analysis' - zanta Download Policy: Content on the Website is provided to you AS IS for your information and personal use and may not be sold / licensed / shared on other websites without getting consent from its author.While downloading, if for some reason you are not able to download a presentation, the publisher may have deleted the file from their server. - - - - - - - - - - - - - - - - - - - - - - - - - - E N D - - - - - - - - - - - - - - - - - - - - - - - - - - Presentation Transcript Review of Vector Analysis Vector analysis is a mathematical tool with which electromagnetic (EM) concepts are most conveniently expressed and best comprehended. A quantity is called a scalar if it has only magnitude (e.g., mass, temperature, electric potential, population). A quantity is called a vector if it has both magnitude and direction (e.g., velocity, force, electric field intensity). The magnitude of a vector is a scalar written as A or Review of Vector Analysis A unit vector along is defined as a vector whose magnitude is unity (that is,1) and its direction is along Review of Vector Analysis Thus which completely specifies in terms of A and its direction A vector in Cartesian (or rectangular) coordinates may be represented as or where AX, Ay, and AZ are called the components of in the x, y, and z directions, respectively; , , and are unit vectors in the x, y and z directions, respectively. Review of Vector Analysis Review of Vector Analysis Suppose a certain vector is given by The magnitude or absolute value of the vector is (from the Pythagorean theorem) A point P in Cartesian coordinates may be represented by specifying (x, y, z). The radius vector (or position vector) of point P is defined as the directed distance from the origin O to P; that is, The unit vector in the direction of ris Review of Vector Analysis Review of Vector Analysis Vector Algebra Two vectors and can be added together to give another vector ; that is , Thus, if and Parallelogram Head to rule tail rule Vector subtraction is similarly carried out as Review of Vector Analysis The three basic laws of algebra obeyed by any given vector A, B, and C, are summarized as follows: Commutative Associative Distributive where k and l are scalars Review of Vector Analysis When two vectors and are multiplied, the result is either a scalar or a vector depending on how they are multiplied. There are two types of vector multiplication: 1. Scalar (or dot) product: 2.Vector (or cross) product: The dot product of the two vectors and is defined geometrically as the product of the magnitude of and the projection of onto (or vice versa): where is the smaller angle between and Review of Vector Analysis If and then which is obtained by multiplying and component by component Review of Vector Analysis The cross product of two vectors and is defined as where is a unit vector normal to the plane containing and . The direction of is determined using the right- hand rule or the right-handed screw rule. Review of Vector Analysis Direction of and using (a) right-hand rule, (b) right-handed screw rule Review of Vector Analysis Note that the cross product has the following basic properties: (i) It is not commutative: It is anticommutative: (ii) It is not associative: (iii) It is distributive: (iv) Review of Vector Analysis Also note that which are obtained in cyclic permutation and illustrated below. Review of Vector Analysis Cross product using cyclic permutation: (a) moving clockwise leads to positive results; (b) moving counterclockwise leads to negative results Scalar and Vector Fields A field can be defined as a function that specifies a particular quantity everywhere in a region (e.g., temperature distribution in a building), or as a spatial distribution of a quantity, which may or may not be a function of time. Scalar quantity scalar function of position scalar field Vector quantity vector function of position vector field Review of Vector Analysis Line Integrals A line integral of a vector field can be calculated whenever a path has been specified through the field. The line integral of the field along the path P is defined as Review of Vector Analysis Example. The vector is given by where Vo is a constant. Find the line integral where the path P is the closed path below. It is convenient to break the path P up into the four parts P1, P2, P3 , and P4. Review of Vector Analysis For segment P1, Thus For segment P2, and Review of Vector Analysis For segment P3, Review of Vector Analysis Example. Let the vector field be given by . Find the line integral of over the semicircular path shown below Review of Vector Analysis Consider the contribution of the path segment located at the angle Surface Integrals Surface integration amounts to adding up normal components of a vector field over a given surface S. We break the surface S into small surface elements and assign to each element a vector is equal to the area of the surface element is the unit vector normal (perpendicular) to the surface element Review of Vector Analysis The flux of a vector field A through surface S (If S is a closed surface, is by convention directed outward) Then we take the dot product of the vector field at the position of the surface element with vector . The result is a differential scalar. The sum of these scalars over all the surface elements is the surface integral. is the component of in the direction of (normal to the surface). Therefore, the surface integral can be viewed as the flow (or flux) of the vector field through the surface S (the net outward flux in the case of a closed surface). Review of Vector Analysis Example. Let be the radius vector The surface S is defined by The normal to the surface is directed in the +z direction Find Review of Vector Analysis Review of Vector Analysis Surface S V is not perpendicular to S, except at one point on the Z axis Introduction to Differential Operators An operator acts on a vector field at a point to produce some function of the vector field. It is like a function of a function. If O is an operator acting on a function f(x) of the single variable X , the result is written O[f(x)]; and means that first f acts on X and then O acts on f. Example. f(x) = x2 and the operator O is (d/dx+2) O[f(x)]=d/dx(x2 ) + 2(x2 ) = 2x +2(x2 ) = 2x(1+x) Review of Vector Analysis either a scalar or a vector. Example. (the length operator), Evaluate at the point x=1, y=2, z=-2 Thus, O is a scalar operator acting on a vector field. Example. , , x=1, y=2, z=-2 Thus, O is a vector operator acting on a vector field. Review of Vector Analysis components: where , , and are three scalar features functions of position. Operators can then be specified in terms of , , and . The divergence operator is defined as Review of Vector Analysis Example . Evaluate at the point x=1, y=-1, z=2. Review of Vector Analysis Clearly the divergence operator is a scalar operator. 2. - divergence, acts on a vector to produce a scalar 3. - curl, acts on a vector to produce a vector 4. -Laplacian, acts on a scalar to produce a scalar Each of these will be defined in detail in the subsequent sections. Review of Vector Analysis Coordinate Systems In order to define the position of a point in space, an appropriate coordinate system is needed. A considerable amount of work and time may be saved by choosing a coordinate system that best fits a given problem. A hard problem in one coordinate system may turn out to be easy in another system. We will consider the Cartesian, the circular cylindrical, and the spherical coordinate systems. All three are orthogonal (the coordinates are mutually perpendicular). Review of Vector Analysis Cartesian coordinates (x,y,z) The ranges of the coordinate variables are A vector in Cartesian coordinates can be written as Review of Vector Analysis The intersection of three orthogonal infinite places (x=const, y= const, and z = const) defines point P. Constant x, y and z surfaces Review of Vector Analysis Differential elements in the right handed Cartesian coordinate system Cylindrical Coordinates . - the radial distance from the z – axis - the azimuthal angle, measured from the x- axis in the xy – plane - the same as in the Cartesian system. A vector in cylindrical coordinates can be written as Cylindrical coordinates amount to a combination of rectangular coordinates and polar coordinates. Review of Vector Analysis Positions in the x-y plane are determined by the values of Review of Vector Analysis Relationship between (x,y,z) and Review of Vector Analysis Point P and unit vectors in the cylindrical coordinate system Review of Vector Analysis semi-infinite plane with its edge along the z - axis Constant surfaces Review of Vector Analysis Metric coefficient Differential elements in cylindrical coordinates Review of Vector Analysis Cylindrical surface ( =const) Planar surface ( = const) Planar surface ( z =const) Spherical coordinates . - the distance from the origin to the point P - the angle between the z-axis and the radius vector of P - the same as the azimuthal angle in cylindrical coordinates Review of Vector Analysis Review of Vector Analysis Point P and unit vectors in spherical coordinates A vector A in spherical coordinates may be written as Review of Vector Analysis Relationships between space variables Review of Vector Analysis Constant surfaces Review of Vector Analysis Differential elements in the spherical coordinate system Review of Vector Analysis POINTS TO REMEMBER 1. 2. 3.
2,270
10,248
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.828125
4
CC-MAIN-2018-43
latest
en
0.922805
https://www.convertunits.com/from/lieue+%5BFrance,+metric%5D/to/foot+%5BFrance%5D
1,586,137,663,000,000,000
text/html
crawl-data/CC-MAIN-2020-16/segments/1585371612531.68/warc/CC-MAIN-20200406004220-20200406034720-00296.warc.gz
857,839,032
7,544
## ››Convert lieue [France, metric] to foot [France] lieue [France, metric] foot [France] How many lieue [France, metric] in 1 foot [France]? The answer is 8.121015E-5. We assume you are converting between lieue [France, metric] and foot [France]. You can view more details on each measurement unit: lieue [France, metric] or foot [France] The SI base unit for length is the metre. 1 metre is equal to 0.00025 lieue [France, metric], or 3.0784329298739 foot [France]. Note that rounding errors may occur, so always check the results. Use this page to learn how to convert between lieue [France, metric] and feet [France]. Type in your own numbers in the form to convert the units! ## ››Quick conversion chart of lieue [France, metric] to foot [France] 1 lieue [France, metric] to foot [France] = 12313.73172 foot [France] 2 lieue [France, metric] to foot [France] = 24627.46344 foot [France] 3 lieue [France, metric] to foot [France] = 36941.19516 foot [France] 4 lieue [France, metric] to foot [France] = 49254.92688 foot [France] 5 lieue [France, metric] to foot [France] = 61568.6586 foot [France] 6 lieue [France, metric] to foot [France] = 73882.39032 foot [France] 7 lieue [France, metric] to foot [France] = 86196.12204 foot [France] 8 lieue [France, metric] to foot [France] = 98509.85376 foot [France] 9 lieue [France, metric] to foot [France] = 110823.58548 foot [France] 10 lieue [France, metric] to foot [France] = 123137.31719 foot [France] ## ››Want other units? You can do the reverse unit conversion from foot [France] to lieue [France, metric], or enter any two units below: ## Enter two units to convert From: To: ## ››Metric conversions and more ConvertUnits.com provides an online conversion calculator for all types of measurement units. You can find metric conversion tables for SI units, as well as English units, currency, and other data. Type in unit symbols, abbreviations, or full names for units of length, area, mass, pressure, and other types. Examples include mm, inch, 100 kg, US fluid ounce, 6'3", 10 stone 4, cubic cm, metres squared, grams, moles, feet per second, and many more!
602
2,135
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.984375
3
CC-MAIN-2020-16
latest
en
0.780404
https://convert-dates.com/days-from/2592/2024/02/13
1,709,134,003,000,000,000
text/html
crawl-data/CC-MAIN-2024-10/segments/1707947474737.17/warc/CC-MAIN-20240228143955-20240228173955-00603.warc.gz
182,254,354
4,329
## 2592 Days From February 13, 2024 Want to figure out the date that is exactly two thousand five hundred ninety two days from Feb 13, 2024 without counting? Your starting date is February 13, 2024 so that means that 2592 days later would be March 20, 2031. You can check this by using the date difference calculator to measure the number of days from Feb 13, 2024 to Mar 20, 2031. March 2031 • Sunday • Monday • Tuesday • Wednesday • Thursday • Friday • Saturday 1. 1 1. 2 2. 3 3. 4 4. 5 5. 6 6. 7 7. 8 1. 9 2. 10 3. 11 4. 12 5. 13 6. 14 7. 15 1. 16 2. 17 3. 18 4. 19 5. 20 6. 21 7. 22 1. 23 2. 24 3. 25 4. 26 5. 27 6. 28 7. 29 1. 30 2. 31 March 20, 2031 is a Thursday. It is the 79th day of the year, and in the 12nd week of the year (assuming each week starts on a Sunday), or the 1st quarter of the year. There are 31 days in this month. 2031 is not a leap year, so there are 365 days in this year. The short form for this date used in the United States is 03/20/2031, and almost everywhere else in the world it's 20/03/2031. ### What if you only counted weekdays? In some cases, you might want to skip weekends and count only the weekdays. This could be useful if you know you have a deadline based on a certain number of business days. If you are trying to see what day falls on the exact date difference of 2592 weekdays from Feb 13, 2024, you can count up each day skipping Saturdays and Sundays. Start your calculation with Feb 13, 2024, which falls on a Tuesday. Counting forward, the next day would be a Wednesday. To get exactly two thousand five hundred ninety two weekdays from Feb 13, 2024, you actually need to count 3628 total days (including weekend days). That means that 2592 weekdays from Feb 13, 2024 would be January 19, 2034. If you're counting business days, don't forget to adjust this date for any holidays. January 2034 • Sunday • Monday • Tuesday • Wednesday • Thursday • Friday • Saturday 1. 1 2. 2 3. 3 4. 4 5. 5 6. 6 7. 7 1. 8 2. 9 3. 10 4. 11 5. 12 6. 13 7. 14 1. 15 2. 16 3. 17 4. 18 5. 19 6. 20 7. 21 1. 22 2. 23 3. 24 4. 25 5. 26 6. 27 7. 28 1. 29 2. 30 3. 31 January 19, 2034 is a Thursday. It is the 19th day of the year, and in the 19th week of the year (assuming each week starts on a Sunday), or the 1st quarter of the year. There are 31 days in this month. 2034 is not a leap year, so there are 365 days in this year. The short form for this date used in the United States is 01/19/2034, and almost everywhere else in the world it's 19/01/2034. ### Enter the number of days and the exact date Type in the number of days and the exact date to calculate from. If you want to find a previous date, you can enter a negative number to figure out the number of days before the specified date.
941
2,742
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.734375
4
CC-MAIN-2024-10
latest
en
0.928807
https://www.iitianacademy.com/iit-jee-main-maths-trigonometrical-identities-and-equations-study-materials/
1,720,990,824,000,000,000
text/html
crawl-data/CC-MAIN-2024-30/segments/1720763514638.53/warc/CC-MAIN-20240714185510-20240714215510-00681.warc.gz
732,725,285
35,982
# IIT JEE Main Maths -Trigonometrical identities and equations- Study Materials #### Examples and Exercise IIT JEE (Main) Mathematics ,”Trigonometrical identities and equations.” Notes ,Test Papers, Sample Papers, Past Years Papers , NCERT , S. L. Loney and Hall & Knight Solutions and Help from Ex- IITian Trigonometrical identities and equations. ### IITian Academy Notes for IIT JEE (Main) Mathematics – Trigonometrical identities and equations. The success mantra of the JEE is practice and hard work. Gone are the days when students used to spend hours in attempting one question. Now it is an era of multiple choice questions. The JEE Mathematics questions test a student’s acquired knowledge as well as his aptitude. We have excellent notes prepared by Ex-IITian to best match the requirement of the exam. Focus is given on problem solving skills and small tips and tricks to do it faster and easier. We , Ex-IITian at https://www.iitianacademy.com. will make sure you understand the concept well. ### IIT JEE (Main) Mathematics, Trigonometrical identities and equations. Solved Examples and Practice Papers. Get excellent practice papers and Solved examples to grasp the concept and check for speed and make you ready for big day. These Question Papers are prepared by Ex-IITIan for IIT JEE (Main) Mathematics , Trigonometrical identities and equations.. ### S. L. Loney IIT JEE (Main) Mathematics This book is the one of the most beautifully written book by the author. Trigonometry is considered to be one of the easiest topics in mathematics by the aspirants of IIT JEE, AIEEE and other state level engineering examination preparation. It would not be untrue to say that most of the sources have taken inspiration from this book as it is the most reliable source. The best part of this book is its coverage in Heights and Distances and Inverse Trigonometric Functions. The book gives a very good learning experience and the exercises which follow are not only comprehensive but they have both basic and standard questions.. I will help you online for any doubt / clarification. ### Hall & Knight IIT JEE (Main) Mathematics Algebra by Hall and Knight is one of the best books for JEE preparation. Students preparing for IIT JEE and other engineering entrance exams as well as students appearing for board exams should read this everyday, especially to master Algebra and Trigonometrical identities and equations.. Hall and Knight have explained the concepts logically in their book. ### IIT JEE (Main) Mathematics Assignments Chapter wise assignments are being given by teachers to students to make them understand the chapter concepts. Its extremely critical for all CBSE students to practice all assignments which will help them in gaining better marks in examinations. All assignments available for free download on the website are developed by the best teachers having many years of teaching experience in CBSE schools all over the country. Students, teachers and parents are advised to contact me online incase of any doubt / clarification. ### Past Many Years (40 Years) Questions IIT JEE (Main) Mathematics Solutions Trigonometrical identities and equations. Past 40 Years Question Papers Solutions for IIT JEE (Main) Mathematics Trigonometrical identities and equations. are provided here with simple step-by-step explanations. These solutions for Trigonometrical identities and equations. are extremely popular among IIT JEE (Main) students for Chemistry . Trigonometrical identities and equations. Solutions come handy for quickly completing your homework and preparing for exams. All questions and answers from the Past Many Years Question Papers Book of IIT JEE (Main) Mathematics Chapter Trigonometrical identities and equations. are provided here for . I will help you online for any doubt / clarification. Scroll to Top
794
3,860
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.828125
3
CC-MAIN-2024-30
latest
en
0.92168
http://gmatclub.com/forum/the-recent-proliferation-of-newpaper-articles-in-major-9104.html?oldest=1
1,484,903,821,000,000,000
text/html
crawl-data/CC-MAIN-2017-04/segments/1484560280801.0/warc/CC-MAIN-20170116095120-00497-ip-10-171-10-70.ec2.internal.warc.gz
117,952,123
56,203
The recent proliferation of newpaper articles in major : GMAT Critical Reasoning (CR) Check GMAT Club Decision Tracker for the Latest School Decision Releases http://gmatclub.com/AppTrack It is currently 20 Jan 2017, 01:17 ### GMAT Club Daily Prep #### Thank you for using the timer - this advanced tool can estimate your performance and suggest more practice questions. We have subscribed you to Daily Prep Questions via email. Customized for You we will pick new questions that match your level based on your Timer History Track Your Progress every week, we’ll send you an estimated GMAT score based on your performance Practice Pays we will pick new questions that match your level based on your Timer History # Events & Promotions ###### Events & Promotions in June Open Detailed Calendar # The recent proliferation of newpaper articles in major new topic post reply Question banks Downloads My Bookmarks Reviews Important topics Author Message TAGS: ### Hide Tags Senior Manager Joined: 25 Jul 2004 Posts: 273 Followers: 1 Kudos [?]: 10 [0], given: 0 The recent proliferation of newpaper articles in major [#permalink] ### Show Tags 24 Aug 2004, 09:48 00:00 Difficulty: (N/A) Question Stats: 100% (00:00) correct 0% (00:00) wrong based on 1 sessions ### HideShow timer Statistics The recent proliferation of newpaper articles in major publications that have been exposed as fabrications serves to bolster the contention that publishers are more interested in selling copy than in printing the truth. Even minor publications have staffs to check such obvious fraud. The above argument assumes that (A) newspaper stories of dubious authenticity are a new phenomenon. (B) minor publications do a better job of fact-checking that do major publications. (C) everything a newspaper prints must be factually verifiable. (D) only recently have newspapers admintted to publishing erroneous stories. (E) publishers are ultimately responsible for what is printed in their newspapers. If you have any questions you can ask an expert New! SVP Joined: 16 Oct 2003 Posts: 1810 Followers: 4 Kudos [?]: 136 [0], given: 0 ### Show Tags 24 Aug 2004, 11:57 C for me too. All the newspapers verify fabricated stories. Manager Joined: 11 Jul 2004 Posts: 119 Followers: 1 Kudos [?]: 22 [0], given: 0 ### Show Tags 24 Aug 2004, 13:25 This is close between C and E but I am going to say E for two reasons 1. The author seems to think the fraud is obvious and cotends that the publishers just care about seeling copies.. i.e. publishers igonore the fraud so he is indeirectly assuming publisher is resposible for whatever is printed 2. choice C says "everything" must be verifiable. the use of everything here is too strong sigep whats the OA? GMAT Club Legend Joined: 15 Dec 2003 Posts: 4302 Followers: 40 Kudos [?]: 429 [0], given: 0 ### Show Tags 24 Aug 2004, 13:28 damit wrote: This is close between C and E but I am going to say E for two reasons 1. The author seems to think the fraud is obvious and cotends that the publishers just care about seeling copies.. i.e. publishers igonore the fraud so he is indeirectly assuming publisher is resposible for whatever is printed 2. choice C says "everything" must be verifiable. the use of everything here is too strong sigep whats the OA? Agree with damit. E should be it. _________________ Best Regards, Paul Senior Manager Joined: 25 Jul 2004 Posts: 273 Followers: 1 Kudos [?]: 10 [0], given: 0 ### Show Tags 25 Aug 2004, 09:15 E is the OA. if we negate E, we see that publishes are not responsible for the advertisements published in their magazines. Hence the whole thing falls apart. SVP Joined: 16 Oct 2003 Posts: 1810 Followers: 4 Kudos [?]: 136 [0], given: 0 ### Show Tags 25 Aug 2004, 09:30 My assumption logic is disasterous. 25 Aug 2004, 09:30 Similar topics Replies Last post Similar Topics: 1 Columnist: In a recent article an economist argues 3 03 Jun 2016, 23:16 6 A recent series of newspaper articles revealed that 3 16 Dec 2010, 10:23 71 Recently, the research and development departments at major 33 17 Oct 2010, 16:41 The authors of a recent article examined warnings of an 9 12 Nov 2007, 12:38 The recent proliferation of newspaper articles in major 4 23 Jun 2007, 16:23 Display posts from previous: Sort by # The recent proliferation of newpaper articles in major new topic post reply Question banks Downloads My Bookmarks Reviews Important topics Powered by phpBB © phpBB Group and phpBB SEO Kindly note that the GMAT® test is a registered trademark of the Graduate Management Admission Council®, and this site has neither been reviewed nor endorsed by GMAC®.
1,207
4,678
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.578125
3
CC-MAIN-2017-04
latest
en
0.894335
https://www.studypool.com/discuss/468532/plez-halp-with-these-dst?free
1,508,812,658,000,000,000
text/html
crawl-data/CC-MAIN-2017-43/segments/1508187827853.86/warc/CC-MAIN-20171024014937-20171024034937-00103.warc.gz
984,482,431
14,444
##### PLEZ HALP WITH THESE! DST label Algebra account_circle Unassigned schedule 1 Day account_balance_wallet \$5 I NEED HELP! 1: For a ship moving against the current, it takes 9 hours to cover a distance of 113.4 miles. How much does it take this ship to return if the rate of the current is 1.9 mph? 2: Two trains move between two towns. It takes the first train 2 hr 48 min and it takes the second 4 hr 40 min. The rate of the first train is 26 mph more than that of the second train. What is the distance between these towns? Apr 10th, 2015 1.  Let  x = speed of the ship without current Then, moving against the current x - 1.9 = 113.4/9 x - 1.9 = 12.6 x = 14.5 mph moving with the current the ship's speed is 14.5 + 1.9 = 16.4mph therefore the time it takes to return is 113.4/16.4 = 6.9 hours 2.  Let x = rate of the first train Let y = rate of the second train then, x = y + 26 distance = (rate)(time) For the first train distance = 2.8x For the second train distance= 4.67y but both distances are the same, so 2.8x = 4.67y Now substitute x for y+26 2.8(y+26) = 4.67y 2.8y + 72.8 = 4.67y 1.87y = 72.8 y = 38.93 mph so, the distance = (38.93)(4.67) = 181.8 miles Apr 10th, 2015 Hey, umm, sorry, but the second one wasnt right. Could you do it again? Thanks, and sorry for all the hassle Apr 10th, 2015 ... Apr 10th, 2015 ... Apr 10th, 2015 Oct 24th, 2017 check_circle check_circle check_circle Secure Information Content will be erased after question is completed. check_circle
514
1,522
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.90625
4
CC-MAIN-2017-43
latest
en
0.907756
https://archive.softwareheritage.org/browse/content/sha1_git:e338ee9d65a91064e9eb88a08900a1ae64513860/?path=45f4baede128851311da8149bf897d2033287cc3/expectations.py
1,719,070,202,000,000,000
text/html
crawl-data/CC-MAIN-2024-26/segments/1718198862404.32/warc/CC-MAIN-20240622144011-20240622174011-00502.warc.gz
88,378,267
16,340
expectations.py ``````# Copyright 2018 the GPflow authors. # # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # # Unless required by applicable law or agreed to in writing, software # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import functools import warnings import itertools as it import numpy as np import tensorflow as tf from . import kernels, mean_functions, settings from .features import Kuf, InducingFeature, InducingPoints from .decors import params_as_tensors_for from .probability_distributions import Gaussian, DiagonalGaussian, MarkovGaussian from .dispatch import dispatch logger = settings.logger() # Sections: # - Analytic Expectations # - RBF Kernel # - Linear Kernel # - exKxz transpose and mean function handling # - Mean Functions # - Sum Kernel # - RBF-Linear Cross Kernel Expectations # - Product Kernel # - Conversion to Gaussian from Diagonal or Markov """ Compute the expectation <obj1(x) obj2(x)>_p(x) Uses Gauss-Hermite quadrature for approximate integration. :type p: (mu, cov) tuple or a `ProbabilityDistribution` object :type obj1: kernel, mean function, (kernel, features), or None :type obj2: kernel, mean function, (kernel, features), or None :param int num_gauss_hermite_points: passed to `_quadrature_expectation` to set the number of Gauss-Hermite points used :return: a 1-D, 2-D, or 3-D tensor containing the expectation """ if isinstance(p, tuple): assert len(p) == 2 if p[1].shape.ndims == 2: p = DiagonalGaussian(*p) elif p[1].shape.ndims == 3: p = Gaussian(*p) elif p[1].shape.ndims == 4: p = MarkovGaussian(*p) if isinstance(obj1, tuple): obj1, feat1 = obj1 else: feat1 = None if isinstance(obj2, tuple): obj2, feat2 = obj2 else: feat2 = None return _quadrature_expectation(p, obj1, feat1, obj2, feat2, num_gauss_hermite_points) def get_eval_func(obj, feature, slice=np.s_[...]): """ Return the function of interest (kernel or mean) for the expectation depending on the type of :obj: and whether any features are given """ if feature is not None: # kernel + feature combination if not isinstance(feature, InducingFeature) or not isinstance(obj, kernels.Kernel): raise TypeError("If `feature` is supplied, `obj` must be a kernel.") return lambda x: tf.transpose(Kuf(feature, obj, x))[slice] elif isinstance(obj, mean_functions.MeanFunction): return lambda x: obj(x)[slice] elif isinstance(obj, kernels.Kernel): return lambda x: obj.Kdiag(x) else: raise NotImplementedError() @dispatch((Gaussian, DiagonalGaussian), object, (InducingFeature, type(None)), object, (InducingFeature, type(None)), (int, type(None))) def _quadrature_expectation(p, obj1, feature1, obj2, feature2, num_gauss_hermite_points): """ General handling of quadrature expectations for Gaussians and DiagonalGaussians Fallback method for missing analytic expectations """ num_gauss_hermite_points = 100 if num_gauss_hermite_points is None else num_gauss_hermite_points if obj2 is None: eval_func = lambda x: get_eval_func(obj1, feature1)(x) elif obj1 is None: raise NotImplementedError("First object cannot be None.") else: eval_func = lambda x: (get_eval_func(obj1, feature1, np.s_[:, :, None])(x) * get_eval_func(obj2, feature2, np.s_[:, None, :])(x)) if isinstance(p, DiagonalGaussian): if isinstance(obj1, kernels.Kernel) and isinstance(obj2, kernels.Kernel) \ and obj1.on_separate_dims(obj2): # no joint expectations required num_gauss_hermite_points=num_gauss_hermite_points) num_gauss_hermite_points=num_gauss_hermite_points) return eKxz1[:, :, None] * eKxz2[:, None, :] else: cov = tf.matrix_diag(p.cov) else: cov = p.cov @dispatch(MarkovGaussian, object, (InducingFeature, type(None)), object, (InducingFeature, type(None)), (int, type(None))) def _quadrature_expectation(p, obj1, feature1, obj2, feature2, num_gauss_hermite_points): """ Handling of quadrature expectations for Markov Gaussians (useful for time series) Fallback method for missing analytic expectations wrt Markov Gaussians Nota Bene: obj1 is always associated with x_n, whereas obj2 always with x_{n+1} if one requires e.g. <x_{n+1} K_{x_n, Z}>_p(x_{n:n+1}), compute the transpose and then transpose the result of the expectation """ num_gauss_hermite_points = 40 if num_gauss_hermite_points is None else num_gauss_hermite_points if obj2 is None: eval_func = lambda x: get_eval_func(obj1, feature1)(x) mu, cov = p.mu[:-1], p.cov[0, :-1] # cross covariances are not needed elif obj1 is None: eval_func = lambda x: get_eval_func(obj2, feature2)(x) mu, cov = p.mu[1:], p.cov[0, 1:] # cross covariances are not needed else: eval_func = lambda x: (get_eval_func(obj1, feature1, np.s_[:, :, None])(tf.split(x, 2, 1)[0]) * get_eval_func(obj2, feature2, np.s_[:, None, :])(tf.split(x, 2, 1)[1])) mu = tf.concat((p.mu[:-1, :], p.mu[1:, :]), 1) # Nx2D cov_top = tf.concat((p.cov[0, :-1, :, :], p.cov[1, :-1, :, :]), 2) # NxDx2D cov_bottom = tf.concat((tf.matrix_transpose(p.cov[1, :-1, :, :]), p.cov[0, 1:, :, :]), 2) cov = tf.concat((cov_top, cov_bottom), 1) # Nx2Dx2D # =========================== ANALYTIC EXPECTATIONS =========================== def expectation(p, obj1, obj2=None, nghp=None): """ Compute the expectation <obj1(x) obj2(x)>_p(x) Uses multiple-dispatch to select an analytical implementation, if one is available. If not, it falls back to quadrature. :type p: (mu, cov) tuple or a `ProbabilityDistribution` object :type obj1: kernel, mean function, (kernel, features), or None :type obj2: kernel, mean function, (kernel, features), or None :param int nghp: passed to `_quadrature_expectation` to set the number of Gauss-Hermite points used: `num_gauss_hermite_points` :return: a 1-D, 2-D, or 3-D tensor containing the expectation Allowed combinations - Psi statistics: >>> eKdiag = expectation(p, kern) (N) # Psi0 >>> eKxz = expectation(p, (kern, feat)) (NxM) # Psi1 >>> exKxz = expectation(p, identity_mean, (kern, feat)) (NxDxM) >>> eKzxKxz = expectation(p, (kern, feat), (kern, feat)) (NxMxM) # Psi2 - kernels and mean functions: >>> eKzxMx = expectation(p, (kern, feat), mean) (NxMxQ) >>> eMxKxz = expectation(p, mean, (kern, feat)) (NxQxM) - only mean functions: >>> eMx = expectation(p, mean) (NxQ) >>> eM1x_M2x = expectation(p, mean1, mean2) (NxQ1xQ2) .. note:: mean(x) is 1xQ (row vector) - different kernels. This occurs, for instance, when we are calculating Psi2 for Sum kernels: >>> eK1zxK2xz = expectation(p, (kern1, feat), (kern2, feat)) (NxMxM) """ if isinstance(p, tuple): assert len(p) == 2 if p[1].shape.ndims == 2: p = DiagonalGaussian(*p) elif p[1].shape.ndims == 3: p = Gaussian(*p) elif p[1].shape.ndims == 4: p = MarkovGaussian(*p) if isinstance(obj1, tuple): obj1, feat1 = obj1 else: feat1 = None if isinstance(obj2, tuple): obj2, feat2 = obj2 else: feat2 = None try: return _expectation(p, obj1, feat1, obj2, feat2, nghp=nghp) except NotImplementedError as e: # pragma: no cover warn_msg = "Quadrature is used to calculate the expectation. " + str(e) logger.warning(warn_msg) return _quadrature_expectation(p, obj1, feat1, obj2, feat2, nghp) # ================================ RBF Kernel ================================= @dispatch(Gaussian, kernels.RBF, type(None), type(None), type(None)) def _expectation(p, kern, none1, none2, none3, nghp=None): """ Compute the expectation: <diag(K_{X, X})>_p(X) - K_{.,.} :: RBF kernel :return: N """ return kern.Kdiag(p.mu) @dispatch(Gaussian, kernels.RBF, InducingPoints, type(None), type(None)) def _expectation(p, kern, feat, none1, none2, nghp=None): """ Compute the expectation: <K_{X, Z}>_p(X) - K_{.,.} :: RBF kernel :return: NxM """ with params_as_tensors_for(kern, feat): # use only active dimensions Xcov = kern._slice_cov(p.cov) Z, Xmu = kern._slice(feat.Z, p.mu) D = tf.shape(Xmu)[1] if kern.ARD: lengthscales = kern.lengthscales else: lengthscales = tf.zeros((D,), dtype=settings.float_type) + kern.lengthscales chol_L_plus_Xcov = tf.cholesky(tf.matrix_diag(lengthscales ** 2) + Xcov) # NxDxD all_diffs = tf.transpose(Z) - tf.expand_dims(Xmu, 2) # NxDxM exponent_mahalanobis = tf.matrix_triangular_solve(chol_L_plus_Xcov, all_diffs, lower=True) # NxDxM exponent_mahalanobis = tf.reduce_sum(tf.square(exponent_mahalanobis), 1) # NxM exponent_mahalanobis = tf.exp(-0.5 * exponent_mahalanobis) # NxM sqrt_det_L = tf.reduce_prod(lengthscales) sqrt_det_L_plus_Xcov = tf.exp(tf.reduce_sum(tf.log(tf.matrix_diag_part(chol_L_plus_Xcov)), axis=1)) determinants = sqrt_det_L / sqrt_det_L_plus_Xcov # N return kern.variance * (determinants[:, None] * exponent_mahalanobis) @dispatch(Gaussian, mean_functions.Identity, type(None), kernels.RBF, InducingPoints) def _expectation(p, mean, none, kern, feat, nghp=None): """ Compute the expectation: expectation[n] = <x_n K_{x_n, Z}>_p(x_n) - K_{.,.} :: RBF kernel :return: NxDxM """ Xmu, Xcov = p.mu, p.cov with tf.control_dependencies([tf.assert_equal( tf.shape(Xmu)[1], tf.constant(kern.input_dim, settings.int_type), message="Currently cannot handle slicing in exKxz.")]): Xmu = tf.identity(Xmu) with params_as_tensors_for(kern, feat): D = tf.shape(Xmu)[1] lengthscales = kern.lengthscales if kern.ARD \ else tf.zeros((D,), dtype=settings.float_type) + kern.lengthscales chol_L_plus_Xcov = tf.cholesky(tf.matrix_diag(lengthscales ** 2) + Xcov) # NxDxD all_diffs = tf.transpose(feat.Z) - tf.expand_dims(Xmu, 2) # NxDxM sqrt_det_L = tf.reduce_prod(lengthscales) sqrt_det_L_plus_Xcov = tf.exp(tf.reduce_sum(tf.log(tf.matrix_diag_part(chol_L_plus_Xcov)), axis=1)) determinants = sqrt_det_L / sqrt_det_L_plus_Xcov # N exponent_mahalanobis = tf.cholesky_solve(chol_L_plus_Xcov, all_diffs) # NxDxM non_exponent_term = tf.matmul(Xcov, exponent_mahalanobis, transpose_a=True) non_exponent_term = tf.expand_dims(Xmu, 2) + non_exponent_term # NxDxM exponent_mahalanobis = tf.reduce_sum(all_diffs * exponent_mahalanobis, 1) # NxM exponent_mahalanobis = tf.exp(-0.5 * exponent_mahalanobis) # NxM return kern.variance * (determinants[:, None] * exponent_mahalanobis)[:, None, :] * non_exponent_term @dispatch(MarkovGaussian, mean_functions.Identity, type(None), kernels.RBF, InducingPoints) def _expectation(p, mean, none, kern, feat, nghp=None): """ Compute the expectation: expectation[n] = <x_{n+1} K_{x_n, Z}>_p(x_{n:n+1}) - K_{.,.} :: RBF kernel - p :: MarkovGaussian distribution (p.cov 2x(N+1)xDxD) :return: NxDxM """ Xmu, Xcov = p.mu, p.cov with tf.control_dependencies([tf.assert_equal( tf.shape(Xmu)[1], tf.constant(kern.input_dim, settings.int_type), message="Currently cannot handle slicing in exKxz.")]): Xmu = tf.identity(Xmu) with params_as_tensors_for(kern, feat): D = tf.shape(Xmu)[1] lengthscales = kern.lengthscales if kern.ARD \ else tf.zeros((D,), dtype=settings.float_type) + kern.lengthscales chol_L_plus_Xcov = tf.cholesky(tf.matrix_diag(lengthscales ** 2) + Xcov[0, :-1]) # NxDxD all_diffs = tf.transpose(feat.Z) - tf.expand_dims(Xmu[:-1], 2) # NxDxM sqrt_det_L = tf.reduce_prod(lengthscales) sqrt_det_L_plus_Xcov = tf.exp(tf.reduce_sum(tf.log(tf.matrix_diag_part(chol_L_plus_Xcov)), axis=1)) determinants = sqrt_det_L / sqrt_det_L_plus_Xcov # N exponent_mahalanobis = tf.cholesky_solve(chol_L_plus_Xcov, all_diffs) # NxDxM non_exponent_term = tf.matmul(Xcov[1, :-1], exponent_mahalanobis, transpose_a=True) non_exponent_term = tf.expand_dims(Xmu[1:], 2) + non_exponent_term # NxDxM exponent_mahalanobis = tf.reduce_sum(all_diffs * exponent_mahalanobis, 1) # NxM exponent_mahalanobis = tf.exp(-0.5 * exponent_mahalanobis) # NxM return kern.variance * (determinants[:, None] * exponent_mahalanobis)[:, None, :] * non_exponent_term @dispatch((Gaussian, DiagonalGaussian), kernels.RBF, InducingPoints, kernels.RBF, InducingPoints) def _expectation(p, kern1, feat1, kern2, feat2, nghp=None): """ Compute the expectation: expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n) - Ka_{.,.}, Kb_{.,.} :: RBF kernels Ka and Kb as well as Z1 and Z2 can differ from each other. :return: N x dim(Z1) x dim(Z2) """ if kern1.on_separate_dims(kern2) and isinstance(p, DiagonalGaussian): # no joint expectations required eKxz1 = expectation(p, (kern1, feat1)) eKxz2 = expectation(p, (kern2, feat2)) return eKxz1[:, :, None] * eKxz2[:, None, :] Ka, Kb = kern1, kern2 with params_as_tensors_for(Ka, feat1, Kb, feat2): # use only active dimensions Xcov = Ka._slice_cov(tf.matrix_diag(p.cov) if isinstance(p, DiagonalGaussian) else p.cov) Z1, Xmu = Ka._slice(feat1.Z, p.mu) N = tf.shape(Xmu)[0] D = tf.shape(Xmu)[1] def get_squared_length_scales(kern): squared_lengthscales = kern.lengthscales ** 2. if kern.ARD \ else tf.zeros((D,), dtype=settings.float_type) + kern.lengthscales ** 2. return squared_lengthscales if Ka == Kb: La = get_squared_length_scales(Ka) Lb = La half_mean_L = La * 0.5 # average length scale else: La, Lb = map(get_squared_length_scales, (Ka, Kb)) half_mean_L = La * Lb / (La + Lb) # average length scale sqrt_det_L = tf.reduce_prod(half_mean_L) ** 0.5 C = tf.cholesky(tf.matrix_diag(half_mean_L) + Xcov) # [N, D, D] dets = sqrt_det_L / tf.exp(tf.reduce_sum(tf.log(tf.matrix_diag_part(C)), axis=1)) # N # for mahalanobis computation we need Zᵀ (CCᵀ)⁻¹ Z as well as C⁻¹ Z # with Z = Z₁, Z₂ for two rbf kernels def get_cholesky_solve_terms(Z, C=C): C_inv_z = tf.matrix_triangular_solve( C, tf.tile(tf.expand_dims(tf.transpose(Z), 0), [N, 1, 1]), lower=True) # [N, D, M] z_CC_inv_z = tf.reduce_sum(tf.square(C_inv_z), 1) # [N, M] return C_inv_z, z_CC_inv_z C_inv_mu = tf.matrix_triangular_solve(C, tf.expand_dims(Xmu, 2), lower=True) # [N, D, 1] mu_CC_inv_mu = tf.expand_dims(tf.reduce_sum(tf.square(C_inv_mu), 1), 2) # [N, 1, 1] C_inv_z1, z1_CC_inv_z1 = get_cholesky_solve_terms(Z1 / La * half_mean_L) z1_CC_inv_mu = 2 * tf.matmul(C_inv_z1, C_inv_mu, transpose_a=True)[:, :, 0] # [N, M1] if feat1 == feat2 and Ka == Kb: # in this case Z2==Z1 so we can reuse the Z1 terms C_inv_z2, z2_CC_inv_z2 = C_inv_z1, z1_CC_inv_z1 z2_CC_inv_mu = z1_CC_inv_mu # [N, M] Z2 = Z1 else: # compute terms related to Z2 Z2, _ = Kb._slice(feat2.Z, p.mu) C_inv_z2, z2_CC_inv_z2 = get_cholesky_solve_terms(Z2 / Lb * half_mean_L) z2_CC_inv_mu = 2 * tf.matmul(C_inv_z2, C_inv_mu, transpose_a=True)[:, :, 0] # [N, M2] z1_CC_inv_z2 = tf.matmul(C_inv_z1, C_inv_z2, transpose_a=True) # [N, M1, M2] # along M1 z2_CC_inv_mu = tf.expand_dims(z2_CC_inv_mu, 1) # [N, 1, M2] z2_CC_inv_z2 = tf.expand_dims(z2_CC_inv_z2, 1) # along M2 z1_CC_inv_mu = tf.expand_dims(z1_CC_inv_mu, 2) # [N, M1, 1] z1_CC_inv_z1 = tf.expand_dims(z1_CC_inv_z1, 2) # expanded version of ((Z1 + Z2)-mu) (CCT)-1 ((Z1 + Z2)-mu) mahalanobis = mu_CC_inv_mu + z2_CC_inv_z2 + \ z1_CC_inv_z1 + 2 * z1_CC_inv_z2 - \ z1_CC_inv_mu - z2_CC_inv_mu # [N, M1, M2] exp_mahalanobis = tf.exp(-0.5 * mahalanobis) # [N, M1, M2] if Z1 == Z2: # CAVEAT : Compute sqrt(self.K(Z)) explicitly # to prevent automatic gradient from # being NaN sometimes, see pull request #615 sqrt_exp_dist = tf.exp(-0.25 * Ka.scaled_square_dist(Z1, None)) else: # Compute exp( -.5 (Z-Z')^top (L_1+L_2)^{-1} (Z-Z') ) lengthscales_rms = tf.sqrt(La + Lb) Z1 = Z1 / lengthscales_rms Z1sqr = tf.reduce_sum(tf.square(Z1), axis=1) Z2 = Z2 / lengthscales_rms Z2sqr = tf.reduce_sum(tf.square(Z2), axis=1) dist = -2 * tf.matmul(Z1, Z2, transpose_b=True) \ + tf.reshape(Z1sqr, (-1, 1)) + tf.reshape(Z2sqr, (1, -1)) sqrt_exp_dist = tf.exp(-0.5 * dist) # M1 x M2 return Ka.variance * Kb.variance * sqrt_exp_dist * \ tf.reshape(dets, [N, 1, 1]) * exp_mahalanobis # =============================== Linear Kernel =============================== @dispatch(Gaussian, kernels.Linear, type(None), type(None), type(None)) def _expectation(p, kern, none1, none2, none3, nghp=None): """ Compute the expectation: <diag(K_{X, X})>_p(X) - K_{.,.} :: Linear kernel :return: N """ with params_as_tensors_for(kern): # use only active dimensions Xmu, _ = kern._slice(p.mu, None) Xcov = kern._slice_cov(p.cov) return tf.reduce_sum(kern.variance * (tf.matrix_diag_part(Xcov) + tf.square(Xmu)), 1) @dispatch(Gaussian, kernels.Linear, InducingPoints, type(None), type(None)) def _expectation(p, kern, feat, none1, none2, nghp=None): """ Compute the expectation: <K_{X, Z}>_p(X) - K_{.,.} :: Linear kernel :return: NxM """ with params_as_tensors_for(kern, feat): # use only active dimensions Z, Xmu = kern._slice(feat.Z, p.mu) return tf.matmul(Xmu, Z * kern.variance, transpose_b=True) @dispatch(Gaussian, kernels.Linear, InducingPoints, mean_functions.Identity, type(None)) def _expectation(p, kern, feat, mean, none, nghp=None): """ Compute the expectation: expectation[n] = <K_{Z, x_n} x_n^T>_p(x_n) - K_{.,.} :: Linear kernel :return: NxMxD """ Xmu, Xcov = p.mu, p.cov with tf.control_dependencies([tf.assert_equal( tf.shape(Xmu)[1], tf.constant(kern.input_dim, settings.int_type), message="Currently cannot handle slicing in exKxz.")]): Xmu = tf.identity(Xmu) with params_as_tensors_for(kern, feat): N = tf.shape(Xmu)[0] var_Z = kern.variance * feat.Z # MxD tiled_Z = tf.tile(tf.expand_dims(var_Z, 0), (N, 1, 1)) # NxMxD return tf.matmul(tiled_Z, Xcov + (Xmu[..., None] * Xmu[:, None, :])) @dispatch(MarkovGaussian, kernels.Linear, InducingPoints, mean_functions.Identity, type(None)) def _expectation(p, kern, feat, mean, none, nghp=None): """ Compute the expectation: expectation[n] = <K_{Z, x_n} x_{n+1}^T>_p(x_{n:n+1}) - K_{.,.} :: Linear kernel - p :: MarkovGaussian distribution (p.cov 2x(N+1)xDxD) :return: NxMxD """ Xmu, Xcov = p.mu, p.cov with tf.control_dependencies([tf.assert_equal( tf.shape(Xmu)[1], tf.constant(kern.input_dim, settings.int_type), message="Currently cannot handle slicing in exKxz.")]): Xmu = tf.identity(Xmu) with params_as_tensors_for(kern, feat): N = tf.shape(Xmu)[0] - 1 var_Z = kern.variance * feat.Z # MxD tiled_Z = tf.tile(tf.expand_dims(var_Z, 0), (N, 1, 1)) # NxMxD eXX = Xcov[1, :-1] + (Xmu[:-1][..., None] * Xmu[1:][:, None, :]) # NxDxD return tf.matmul(tiled_Z, eXX) @dispatch((Gaussian, DiagonalGaussian), kernels.Linear, InducingPoints, kernels.Linear, InducingPoints) def _expectation(p, kern1, feat1, kern2, feat2, nghp=None): """ Compute the expectation: expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n) - Ka_{.,.}, Kb_{.,.} :: Linear kernels Ka and Kb as well as Z1 and Z2 can differ from each other, but this is supported only if the Gaussian p is Diagonal (p.cov NxD) and Ka, Kb have disjoint active_dims in which case the joint expectations simplify into a product of expectations :return: NxMxM """ if kern1.on_separate_dims(kern2) and isinstance(p, DiagonalGaussian): # no joint expectations required eKxz1 = expectation(p, (kern1, feat1)) eKxz2 = expectation(p, (kern2, feat2)) return eKxz1[:, :, None] * eKxz2[:, None, :] if kern1 != kern2 or feat1 != feat2: raise NotImplementedError("The expectation over two kernels has only an " "analytical implementation if both kernels are equal.") kern = kern1 feat = feat1 with params_as_tensors_for(kern, feat): # use only active dimensions Xcov = kern._slice_cov(tf.matrix_diag(p.cov) if isinstance(p, DiagonalGaussian) else p.cov) Z, Xmu = kern._slice(feat.Z, p.mu) N = tf.shape(Xmu)[0] var_Z = kern.variance * Z tiled_Z = tf.tile(tf.expand_dims(var_Z, 0), (N, 1, 1)) # NxMxD XX = Xcov + tf.expand_dims(Xmu, 1) * tf.expand_dims(Xmu, 2) # NxDxD return tf.matmul(tf.matmul(tiled_Z, XX), tiled_Z, transpose_b=True) # ================ exKxz transpose and mean function handling ================= @dispatch((Gaussian, MarkovGaussian), mean_functions.Identity, type(None), kernels.Linear, InducingPoints) def _expectation(p, mean, none, kern, feat, nghp=None): """ Compute the expectation: expectation[n] = <x_n K_{x_n, Z}>_p(x_n) - K_{.,} :: Linear kernel or the equivalent for MarkovGaussian :return: NxDxM """ return tf.matrix_transpose(expectation(p, (kern, feat), mean)) @dispatch((Gaussian, MarkovGaussian), kernels.Kernel, InducingFeature, mean_functions.MeanFunction, type(None)) def _expectation(p, kern, feat, mean, none, nghp=None): """ Compute the expectation: expectation[n] = <K_{Z, x_n} m(x_n)>_p(x_n) or the equivalent for MarkovGaussian :return: NxMxQ """ return tf.matrix_transpose(expectation(p, mean, (kern, feat), nghp=nghp)) @dispatch(Gaussian, mean_functions.Constant, type(None), kernels.Kernel, InducingPoints) def _expectation(p, constant_mean, none, kern, feat, nghp=None): """ Compute the expectation: expectation[n] = <m(x_n)^T K_{x_n, Z}>_p(x_n) - m(x_i) = c :: Constant function - K_{.,.} :: Kernel function :return: NxQxM """ with params_as_tensors_for(constant_mean): c = constant_mean(p.mu) # NxQ eKxz = expectation(p, (kern, feat), nghp=nghp) # NxM return c[..., None] * eKxz[:, None, :] @dispatch(Gaussian, mean_functions.Linear, type(None), kernels.Kernel, InducingPoints) def _expectation(p, linear_mean, none, kern, feat, nghp=None): """ Compute the expectation: expectation[n] = <m(x_n)^T K_{x_n, Z}>_p(x_n) - m(x_i) = A x_i + b :: Linear mean function - K_{.,.} :: Kernel function :return: NxQxM """ with params_as_tensors_for(linear_mean): N = p.mu.shape[0].value D = p.mu.shape[1].value exKxz = expectation(p, mean_functions.Identity(D), (kern, feat), nghp=nghp) eKxz = expectation(p, (kern, feat), nghp=nghp) eAxKxz = tf.matmul(tf.tile(linear_mean.A[None, :, :], (N, 1, 1)), exKxz, transpose_a=True) ebKxz = linear_mean.b[None, :, None] * eKxz[:, None, :] return eAxKxz + ebKxz @dispatch(Gaussian, mean_functions.Identity, type(None), kernels.Kernel, InducingPoints) def _expectation(p, identity_mean, none, kern, feat, nghp=None): """ This prevents infinite recursion for kernels that don't have specific implementations of _expectation(p, identity_mean, None, kern, feat). Recursion can arise because Identity is a subclass of Linear mean function so _expectation(p, linear_mean, none, kern, feat) would call itself. More specific signatures (e.g. (p, identity_mean, None, RBF, feat)) will be found and used whenever available """ raise NotImplementedError # ============================== Mean functions =============================== @dispatch(Gaussian, (mean_functions.Linear, mean_functions.Constant), type(None), type(None), type(None)) def _expectation(p, mean, none1, none2, none3, nghp=None): """ Compute the expectation: <m(X)>_p(X) - m(x) :: Linear, Identity or Constant mean function :return: NxQ """ return mean(p.mu) @dispatch(Gaussian, mean_functions.Constant, type(None), mean_functions.Constant, type(None)) def _expectation(p, mean1, none1, mean2, none2, nghp=None): """ Compute the expectation: expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n) - m1(.), m2(.) :: Constant mean functions :return: NxQ1xQ2 """ return mean1(p.mu)[:, :, None] * mean2(p.mu)[:, None, :] @dispatch(Gaussian, mean_functions.Constant, type(None), mean_functions.MeanFunction, type(None)) def _expectation(p, mean1, none1, mean2, none2, nghp=None): """ Compute the expectation: expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n) - m1(.) :: Constant mean function - m2(.) :: General mean function :return: NxQ1xQ2 """ e_mean2 = expectation(p, mean2) return mean1(p.mu)[:, :, None] * e_mean2[:, None, :] @dispatch(Gaussian, mean_functions.MeanFunction, type(None), mean_functions.Constant, type(None)) def _expectation(p, mean1, none1, mean2, none2, nghp=None): """ Compute the expectation: expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n) - m1(.) :: General mean function - m2(.) :: Constant mean function :return: NxQ1xQ2 """ e_mean1 = expectation(p, mean1) return e_mean1[:, :, None] * mean2(p.mu)[:, None, :] @dispatch(Gaussian, mean_functions.Identity, type(None), mean_functions.Identity, type(None)) def _expectation(p, mean1, none1, mean2, none2, nghp=None): """ Compute the expectation: expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n) - m1(.), m2(.) :: Identity mean functions :return: NxDxD """ return p.cov + (p.mu[:, :, None] * p.mu[:, None, :]) @dispatch(Gaussian, mean_functions.Identity, type(None), mean_functions.Linear, type(None)) def _expectation(p, mean1, none1, mean2, none2, nghp=None): """ Compute the expectation: expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n) - m1(.) :: Identity mean function - m2(.) :: Linear mean function :return: NxDxQ """ with params_as_tensors_for(mean2): N = tf.shape(p.mu)[0] e_xxt = p.cov + (p.mu[:, :, None] * p.mu[:, None, :]) # NxDxD e_xxt_A = tf.matmul(e_xxt, tf.tile(mean2.A[None, ...], (N, 1, 1))) # NxDxQ e_x_bt = p.mu[:, :, None] * mean2.b[None, None, :] # NxDxQ return e_xxt_A + e_x_bt @dispatch(Gaussian, mean_functions.Linear, type(None), mean_functions.Identity, type(None)) def _expectation(p, mean1, none1, mean2, none2, nghp=None): """ Compute the expectation: expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n) - m1(.) :: Linear mean function - m2(.) :: Identity mean function :return: NxQxD """ with params_as_tensors_for(mean1): N = tf.shape(p.mu)[0] e_xxt = p.cov + (p.mu[:, :, None] * p.mu[:, None, :]) # NxDxD e_A_xxt = tf.matmul(tf.tile(mean1.A[None, ...], (N, 1, 1)), e_xxt, transpose_a=True) # NxQxD e_b_xt = mean1.b[None, :, None] * p.mu[:, None, :] # NxQxD return e_A_xxt + e_b_xt @dispatch(Gaussian, mean_functions.Linear, type(None), mean_functions.Linear, type(None)) def _expectation(p, mean1, none1, mean2, none2, nghp=None): """ Compute the expectation: expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n) - m1(.), m2(.) :: Linear mean functions :return: NxQ1xQ2 """ with params_as_tensors_for(mean1, mean2): e_xxt = p.cov + (p.mu[:, :, None] * p.mu[:, None, :]) # NxDxD e_A1t_xxt_A2 = tf.einsum("iq,nij,jz->nqz", mean1.A, e_xxt, mean2.A) # NxQ1xQ2 e_A1t_x_b2t = tf.einsum("iq,ni,z->nqz", mean1.A, p.mu, mean2.b) # NxQ1xQ2 e_b1_xt_A2 = tf.einsum("q,ni,iz->nqz", mean1.b, p.mu, mean2.A) # NxQ1xQ2 e_b1_b2t = mean1.b[:, None] * mean2.b[None, :] # Q1xQ2 return e_A1t_xxt_A2 + e_A1t_x_b2t + e_b1_xt_A2 + e_b1_b2t # ================================ Sum kernels ================================ @dispatch(Gaussian, kernels.Sum, type(None), type(None), type(None)) def _expectation(p, kern, none1, none2, none3, nghp=None): r""" Compute the expectation: <\Sum_i diag(Ki_{X, X})>_p(X) - \Sum_i Ki_{.,.} :: Sum kernel :return: N """ expectation(p, k, nghp=nghp) for k in kern.kernels]) @dispatch(Gaussian, kernels.Sum, InducingPoints, type(None), type(None)) def _expectation(p, kern, feat, none2, none3, nghp=None): r""" Compute the expectation: <\Sum_i Ki_{X, Z}>_p(X) - \Sum_i Ki_{.,.} :: Sum kernel :return: NxM """ expectation(p, (k, feat), nghp=nghp) for k in kern.kernels]) @dispatch(Gaussian, (mean_functions.Linear, mean_functions.Identity, mean_functions.Constant), type(None), kernels.Sum, InducingPoints) def _expectation(p, mean, none, kern, feat, nghp=None): r""" Compute the expectation: expectation[n] = <m(x_n)^T (\Sum_i Ki_{x_n, Z})>_p(x_n) - \Sum_i Ki_{.,.} :: Sum kernel :return: NxQxM """ expectation(p, mean, (k, feat), nghp=nghp) for k in kern.kernels]) @dispatch(MarkovGaussian, mean_functions.Identity, type(None), kernels.Sum, InducingPoints) def _expectation(p, mean, none, kern, feat, nghp=None): r""" Compute the expectation: expectation[n] = <x_{n+1} (\Sum_i Ki_{x_n, Z})>_p(x_{n:n+1}) - \Sum_i Ki_{.,.} :: Sum kernel :return: NxDxM """ expectation(p, mean, (k, feat), nghp=nghp) for k in kern.kernels]) @dispatch((Gaussian, DiagonalGaussian), kernels.Sum, InducingPoints, kernels.Sum, InducingPoints) def _expectation(p, kern1, feat1, kern2, feat2, nghp=None): r""" Compute the expectation: expectation[n] = <(\Sum_i K1_i_{Z1, x_n}) (\Sum_j K2_j_{x_n, Z2})>_p(x_n) - \Sum_i K1_i_{.,.}, \Sum_j K2_j_{.,.} :: Sum kernels :return: NxM1xM2 """ crossexps = [] if kern1 == kern2 and feat1 == feat2: # avoid duplicate computation by using transposes for i, k1 in enumerate(kern1.kernels): crossexps.append(expectation(p, (k1, feat1), (k1, feat1), nghp=nghp)) for k2 in kern1.kernels[:i]: eKK = expectation(p, (k1, feat1), (k2, feat2), nghp=nghp) eKK += tf.matrix_transpose(eKK) crossexps.append(eKK) else: for k1, k2 in it.product(kern1.kernels, kern2.kernels): crossexps.append(expectation(p, (k1, feat1), (k2, feat2), nghp=nghp)) # =================== Cross Kernel expectations (eK1zxK2xz) =================== @dispatch((Gaussian, DiagonalGaussian), kernels.RBF, InducingPoints, kernels.Linear, InducingPoints) def _expectation(p, rbf_kern, feat1, lin_kern, feat2, nghp=None): """ Compute the expectation: expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n) - K_lin_{.,.} :: RBF kernel - K_rbf_{.,.} :: Linear kernel Different Z1 and Z2 are handled if p is diagonal and K_lin and K_rbf have disjoint active_dims, in which case the joint expectations simplify into a product of expectations :return: NxM1xM2 """ if rbf_kern.on_separate_dims(lin_kern) and isinstance(p, DiagonalGaussian): # no joint expectations required eKxz1 = expectation(p, (rbf_kern, feat1)) eKxz2 = expectation(p, (lin_kern, feat2)) return eKxz1[:, :, None] * eKxz2[:, None, :] if feat1 != feat2: raise NotImplementedError("Features have to be the same for both kernels.") if rbf_kern.active_dims != lin_kern.active_dims: raise NotImplementedError("active_dims have to be the same for both kernels.") with params_as_tensors_for(rbf_kern, lin_kern, feat1, feat2): # use only active dimensions Xcov = rbf_kern._slice_cov(tf.matrix_diag(p.cov) if isinstance(p, DiagonalGaussian) else p.cov) Z, Xmu = rbf_kern._slice(feat1.Z, p.mu) N = tf.shape(Xmu)[0] D = tf.shape(Xmu)[1] lin_kern_variances = lin_kern.variance if lin_kern.ARD \ else tf.zeros((D,), dtype=settings.float_type) + lin_kern.variance rbf_kern_lengthscales = rbf_kern.lengthscales if rbf_kern.ARD \ else tf.zeros((D,), dtype=settings.float_type) + rbf_kern.lengthscales ## Begin RBF eKxz code: chol_L_plus_Xcov = tf.cholesky(tf.matrix_diag(rbf_kern_lengthscales ** 2) + Xcov) # NxDxD Z_transpose = tf.transpose(Z) all_diffs = Z_transpose - tf.expand_dims(Xmu, 2) # NxDxM exponent_mahalanobis = tf.matrix_triangular_solve(chol_L_plus_Xcov, all_diffs, lower=True) # NxDxM exponent_mahalanobis = tf.reduce_sum(tf.square(exponent_mahalanobis), 1) # NxM exponent_mahalanobis = tf.exp(-0.5 * exponent_mahalanobis) # NxM sqrt_det_L = tf.reduce_prod(rbf_kern_lengthscales) sqrt_det_L_plus_Xcov = tf.exp(tf.reduce_sum(tf.log(tf.matrix_diag_part(chol_L_plus_Xcov)), axis=1)) determinants = sqrt_det_L / sqrt_det_L_plus_Xcov # N eKxz_rbf = rbf_kern.variance * (determinants[:, None] * exponent_mahalanobis) ## NxM <- End RBF eKxz code tiled_Z = tf.tile(tf.expand_dims(Z_transpose, 0), (N, 1, 1)) # NxDxM z_L_inv_Xcov = tf.matmul(tiled_Z, Xcov / rbf_kern_lengthscales[:, None] ** 2., transpose_a=True) # NxMxD cross_eKzxKxz = tf.cholesky_solve( chol_L_plus_Xcov, (lin_kern_variances * rbf_kern_lengthscales ** 2.)[..., None] * tiled_Z) # NxDxM cross_eKzxKxz = tf.matmul((z_L_inv_Xcov + Xmu[:, None, :]) * eKxz_rbf[..., None], cross_eKzxKxz) # NxMxM return cross_eKzxKxz @dispatch((Gaussian, DiagonalGaussian), kernels.Linear, InducingPoints, kernels.RBF, InducingPoints) def _expectation(p, lin_kern, feat1, rbf_kern, feat2, nghp=None): """ Compute the expectation: expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n) - K_lin_{.,.} :: Linear kernel - K_rbf_{.,.} :: RBF kernel Different Z1 and Z2 are handled if p is diagonal and K_lin and K_rbf have disjoint active_dims, in which case the joint expectations simplify into a product of expectations :return: NxM1xM2 """ return tf.matrix_transpose(expectation(p, (rbf_kern, feat2), (lin_kern, feat1))) # ============================== Product kernels ============================== # Note: product kernels are only supported if the kernels in kern.kernels act # on disjoint sets of active_dims and the Gaussian we are integrating over # is Diagonal @dispatch(DiagonalGaussian, kernels.Product, type(None), type(None), type(None)) def _expectation(p, kern, none1, none2, none3, nghp=None): r""" Compute the expectation: - \HadamardProd_i Ki_{.,.} :: Product kernel - p :: DiagonalGaussian distribution (p.cov NxD) :return: N """ if not kern.on_separate_dimensions: raise NotImplementedError( "Product currently needs to be defined on separate dimensions.") # pragma: no cover return functools.reduce(tf.multiply, [ expectation(p, k, nghp=nghp) for k in kern.kernels]) @dispatch(DiagonalGaussian, kernels.Product, InducingPoints, type(None), type(None)) def _expectation(p, kern, feat, none2, none3, nghp=None): r""" Compute the expectation: - \HadamardProd_i Ki_{.,.} :: Product kernel - p :: DiagonalGaussian distribution (p.cov NxD) :return: NxM """ if not kern.on_separate_dimensions: raise NotImplementedError( "Product currently needs to be defined on separate dimensions.") # pragma: no cover return functools.reduce(tf.multiply, [ expectation(p, (k, feat), nghp=nghp) for k in kern.kernels]) @dispatch(DiagonalGaussian, kernels.Product, InducingPoints, kernels.Product, InducingPoints) def _expectation(p, kern1, feat1, kern2, feat2, nghp=None): r""" Compute the expectation: expectation[n] = < prodK_{Z, x_n} prodK_{x_n, Z} >_p(x_n) = < (\HadamardProd_i Ki_{Z[:, active_dims_i], x[n, active_dims_i]}) <-- Mx1 1xM --> (\HadamardProd_j Kj_{x[n, active_dims_j], Z[:, active_dims_j]}) >_p(x_n) (MxM) - p :: DiagonalGaussian distribution (p.cov NxD) :return: NxMxM """ if feat1 != feat2: raise NotImplementedError("Different features are not supported.") if kern1 != kern2: raise NotImplementedError("Calculating the expectation over two " "different Product kernels is not supported.") kern = kern1 feat = feat1 if not kern.on_separate_dimensions: raise NotImplementedError( "Product currently needs to be defined on separate dimensions.") # pragma: no cover return functools.reduce(tf.multiply, [ expectation(p, (k, feat), (k, feat), nghp=nghp) for k in kern.kernels]) # ============== Conversion to Gaussian from Diagonal or Markov =============== # Catching missing DiagonalGaussian implementations by converting to full Gaussian: @dispatch(DiagonalGaussian, object, (InducingFeature, type(None)), object, (InducingFeature, type(None))) def _expectation(p, obj1, feat1, obj2, feat2, nghp=None): gaussian = Gaussian(p.mu, tf.matrix_diag(p.cov)) return expectation(gaussian, (obj1, feat1), (obj2, feat2), nghp=nghp) # Catching missing MarkovGaussian implementations by converting to Gaussian (when indifferent): @dispatch(MarkovGaussian, object, (InducingFeature, type(None)), object, (InducingFeature, type(None))) def _expectation(p, obj1, feat1, obj2, feat2, nghp=None): """ Nota Bene: if only one object is passed, obj1 is associated with x_n, whereas obj2 with x_{n+1} """ if obj2 is None: gaussian = Gaussian(p.mu[:-1], p.cov[0, :-1]) return expectation(gaussian, (obj1, feat1), nghp=nghp) elif obj1 is None: gaussian = Gaussian(p.mu[1:], p.cov[0, 1:]) return expectation(gaussian, (obj2, feat2), nghp=nghp) else: return expectation(p, (obj1, feat1), (obj2, feat2), nghp=nghp) ``````
11,169
35,019
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
2.796875
3
CC-MAIN-2024-26
latest
en
0.692738
https://study.com/academy/lesson/working-with-addition-input-output-tables.html
1,582,986,176,000,000,000
text/html
crawl-data/CC-MAIN-2020-10/segments/1581875149238.97/warc/CC-MAIN-20200229114448-20200229144448-00482.warc.gz
566,609,287
33,786
# Working with Addition Input-Output Tables Instructor: Jaimee Arnold Jaimee has worked in elementary school and has her Master's +30. In this lesson, you will learn what an input-output table is and how to use it. You will learn the vocabulary used with input/output tables and be able to complete an addition input/output table by the end of the lesson. ## What is an Input-Output Table? An input-output table is a table that shows the relationship between sets of numbers that always follow the same rule. Input-output tables can be very complex (with several math operations) or simple (with just one math operation). In some problems, the rule for the input-output table is given, and in other problems you will need to figure out the math rule for the input-output table. In this lesson, we will focus on working with addition to solve the input-output table. The rule for the input-output table must be the same for all the number sets that are included in the table. If the rule does not work for each set of numbers, it is not the correct rule for the table. ## Math Vocabulary Before you learn how to solve the input-output table, you will need to learn the vocabulary. Input-Output table: This is the actual table that has the sets of numbers on it. This table could also be referred to as a function table. Input is the number in the first column of the table. This is the number that you start with to follow the rule if the rule is given. You would also use this number to figure out what the rule is if it is not given. You always start with input. Output is the end number or the answer to the math equation. The rule is the math operation(s) that needs to be followed to get the correct sets of numbers for your input-output table. Look at the illustration below to see how the table is set up. ## Input-Output Table: Using the Rule In some input-output tables, the rule will be clearly stated. You can find the rule directly on top of the input-output table. When the rule is given, it is the same rule for every row of the table in that problem. So if the rule is ''add 3'', it is ''add 3'' for every single row in the table. Each row is a different set of numbers, but you do the same mathematical operation for each one. Take a look at the picture to see how the input output table is set up. To unlock this lesson you must be a Study.com Member. ### Register to view this lesson Are you a student or a teacher? #### See for yourself why 30 million people use Study.com ##### Become a Study.com member and start learning now. Back What teachers are saying about Study.com ### Earning College Credit Did you know… We have over 200 college courses that prepare you to earn credit by exam that is accepted by over 1,500 colleges and universities. You can test out of the first two years of college and save thousands off your degree. Anyone can earn credit-by-exam regardless of age or education level.
641
2,940
{"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
4.4375
4
CC-MAIN-2020-10
longest
en
0.934739
https://math.stackexchange.com/questions/2866390/fermat-little-theorem
1,558,638,735,000,000,000
text/html
crawl-data/CC-MAIN-2019-22/segments/1558232257361.12/warc/CC-MAIN-20190523184048-20190523210048-00321.warc.gz
549,089,914
31,217
# Fermat Little Theorem [closed] The problem I am trying to solve is how to use Fermat Little theorem to prove that the number 66013 is not prime. I found this problem on another website (Question Cove). The student who had to solve this problem didn't know how to proceed. I offered to help. I read the following lecture notes Princeton lecture notes on cryptography and thought I would follow the same procedure. I wrote $66013 = 257^2 + 36 = 256^2 + 513 + 36 = 2^{16} + 2^9 + 37$ but I am stuck after that. What is the correct approach? ## closed as off-topic by Adrian Keister, Mostafa Ayaz, Isaac Browne, José Carlos Santos, NamasteJul 31 '18 at 0:06 This question appears to be off-topic. The users who voted to close gave this specific reason: • "This question is missing context or other details: Please improve the question by providing additional context, which ideally includes your thoughts on the problem and any attempts you have made to solve it. This information helps others identify where you have difficulties and helps them write answers appropriate to your experience level." – Adrian Keister, José Carlos Santos, Namaste If this question can be reworded to fit the rules in the help center, please edit the question. Fermat's little theorem is an incredibly inefficient method for proving primality. In this case the standard test for divisibility by 3 (by adding the digits and checking whether the sum is a multiple of 3) shows that $66213$ is a multiple of $3$. • 66213 = 9* 7357 – Thierry Kauffmann Jul 29 '18 at 20:27 • Yes, indeed the quotient is easy to find. I was just pointing out that 66213 falls to the very simplest rule of thumb for checking primality. – Rob Arthan Jul 29 '18 at 20:31 Pick $a:=3$. We have $$a^{66212}=3^{66212}\not\equiv1\pmod{66213}$$ because $3\mid 66213$. Well, there, I used Fermat's Little Theorem! Edit: Originally, the OP's question was to disprove the primality of $66213$. For the corrected question (with number $66013$), choose $a:=15781$. Then, $$a^2=249039961\equiv 38925\pmod{66013}\,.$$ That is, $$a^4\equiv 38925^2=1515155625\equiv25249\pmod{66013}\,.$$ Finally, $$a^5=a\cdot a^4\equiv 15781\cdot 25249=398454469\equiv 1\pmod{66013}\,.$$ That is, $$a^{66013-1}=a^2\,\left(a^5\right)^{13202} \equiv a^2\cdot 1^{13202}\equiv a^2\equiv 38925\pmod{66013}\,.$$ Thus, $66013$ is not prime. • If $66213$ were prime, any $a$ between $1$ to $66212$ could be used. The point is, because $a=3$ couldn't be used, $66213$ is not prime. – Batominovski Jul 29 '18 at 20:20 • But you can't claim $3|66213$ unless you know that $66213$ is not prime in the first place. This argument is circular. – fleablood Jul 29 '18 at 20:21 • No, I didn't have to use circular logic to verify that $3\mid 66213$. I can just divide it. This problem is a joke. – Batominovski Jul 29 '18 at 20:23 • I agree with Batominovski, it is a form of torture to set students tasks like this. And to placate Anurag A and fleablood, we are entitled to assume for a contradiction that $a = 3$ and $66213$ are relatively prime (since if they aren't then the prime $3$ divides $66213$). – Rob Arthan Jul 29 '18 at 20:25 • @Batominovski May I ask how you picked a:=15781.? – Thierry Kauffmann Jul 30 '18 at 13:07
975
3,244
{"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0}
3.9375
4
CC-MAIN-2019-22
latest
en
0.939696