url
stringlengths
14
1.76k
text
stringlengths
100
1.02M
metadata
stringlengths
1.06k
1.1k
https://share.cocalc.com/share/240a6b1e030c62ffae877fc3c98edbacbc949e5c/SchoofsAlgorithm.sagews?viewer=share
CoCalc SharedSchoofsAlgorithm.sagewsOpen in CoCalc Description: Schoof's Algorithm Author: Andrew Sutherland Views : 4 # The elliptic curve E is in Weierstrass form y^2=f(x)=x^3+Ax+B divpoly_factor = 0 # global variable for factor of the division polynomial when ZeroDivisionError's occur # Elements of End(E[ell]) are represented as pairs (a,b*y), with a,b in Fp[x]/(h(x)), where h is the ell-th divpoly (or a factor of it, for example, the kernel polynomial of an isogeny) # The y is implicit, but must be accounted for when applying the group law -- using the curve equation y^2=f(x) we can replace y^2 with f(x) whenever it appears (this effectively hides all the y's) # In many of the functions below we pass in both A and f # where f is the image of x^3+Ax+B in Fp[x]/(h(x)) -- we need both because if deg(h)<= 3 we cannot recover A from (x^3+Ax+B) mod h(x) """add endomorphisms P and Q in End(E[ell])""" global divpoly_factor if not P: return Q if not Q: return P a1 = P[0]; b1 = P[1]; a2=Q[0]; b2=Q[1] if a1 == a2: if b1 == b2: return dbl(P,A,f) else: return () try: m = (b2-b1)/(a2-a1) except ZeroDivisionError: print "caught zero division error in add" ### given that a2-a1 is already reduced mod h, a ZeroDivisionError means that gcd(a2-a1,h) must be a nontrivial divisor g of h ### raise an error so that we can restart the algorithm working in a smaller quotient ring divpoly_factor = a2-a1 raise a3 = f*m^2 -a1 - a2 b3 = m*(a1-a3) - b1 return (a3,b3) def dbl(P,A,f): """double the endomorphism P in End(E[ell]) """ global divpoly_factor if not P: return P a1 = P[0]; b1 = P[1] try: m = (3*a1^2+A) / (2*b1*f) except ZeroDivisionError: print "caught zero division error in dbl" divpoly_factor = 2*b1*f raise a3 = f*m^2 - 2*a1 b3 = m*(a1-a3) - b1 return (a3,b3) def neg(P): """ negate the endomorphism P in End(E[ell]) """ if not P: return P return (P[0],-P[1]) def smul (n,P,A,f): """ compute the scalar multiple n*P in End(E[ell]) using double and add""" if not n: return () nbits = n.digits(2) i = len(nbits)-2 Q = P while i >= 0: Q = dbl(Q,A,f) if nbits[i]: Q = add(P,Q,A,f) i -= 1 return Q def mul (P,Q): """ compute the product (i.e. composition of) P*Q of two endomorphisms in End(E[ell]) """ return (P[0].lift()(Q[0]),P[1].lift()(Q[0])*Q[1]) def trace_mod (E, ell): """ compute the trace of Frobenius of E modulo ell """ FF=E.base_ring() assert FF.characteristic() != 2 E = E.short_weierstrass_model() q = FF.cardinality() # finite field FF_q R.<t>=PolynomialRing(FF) A=E.a4(); B=E.a6() # E: y^2 = x^3 + Ax + B if ell == 2: # t is odd iff f is irreducible if (t^3+A*t+B).is_irreducible(): return 1 else: return 0 h = E.division_polynomial(ell,t,0).monic() while true: try: RR.<x> = R.quotient(ideal(h)) # RR is End(E[ell]) (or a subring thereof) f = x^3+A*x+B xq = x^q; yq = f^((q-1)//2) pi = (xq,yq) # pi is the Frobenius endomorphism pi2 = mul(pi,pi) # pi2 = pi^2 id = (x,RR(1)) # identity aka mult-by-1 map Q = smul(q%ell,id,A,f) # Q is the mult-by-q map S = add(pi2,Q,A,f) # S = pi^2 + q = t*pi if not S: return 0 # if S=0 then t=0 if S == pi: return 1 # if S=pi then t=1 if neg(S) == pi: return -1 # if S=-pi then t=-1 P = pi for t in range(2,ell-1): P = add(P,pi,A,f) # P = t*pi if P==S: return t # if S=P then we have found t print "Error, Frob satisfies no charpoly!!" assert false except ZeroDivisionError: h = gcd(h,divpoly_factor.lift()) # if we hit a zero divisor, start over with new h print "found %d-divpoly factor of degree %d"%(ell,h.degree()) def Schoof(E): """ compute the trace of Frobenius of E using Schoof's algorithm """ q=E.base_ring().cardinality() t = 0; M=1; ell=1; while M <= 4*sqrt(q): ell = next_prime(ell) start = cputime() tell = trace_mod(E,ell) print "trace %d mod %d computed in %.2f secs"%(tell,ell,cputime()-start) a = M*M.inverse_mod(ell); b = ell*ell.inverse_mod(M) M *= ell t = (a*tell+b*t) % M if t >= M/2: return t-M else: return t %time FF=GF(next_prime(2^80)) E=EllipticCurve([FF(314159),FF(2781828)]) t=Schoof(E) print t trace 1 mod 2 computed in 0.01 secs trace -1 mod 3 computed in 0.01 secs trace 0 mod 5 computed in 0.01 secs trace 2 mod 7 computed in 0.03 secs trace 7 mod 11 computed in 0.10 secs caught zero division error in add found 13-divpoly factor of degree 6 trace 6 mod 13 computed in 0.17 secs trace 15 mod 17 computed in 0.51 secs trace 1 mod 19 computed in 0.52 secs trace 8 mod 23 computed in 0.89 secs %time FF=GF(next_prime(2^80)) E=EllipticCurve([FF(314159),FF(2781828)]) print E.trace_of_frobenius() -1315484487805 CPU time: 0.00 s, Wall time: 0.00 s FF=GF(23) E1=EllipticCurve(FF, "11a3") E2=EllipticCurve(FF, "11a2") r1 = trace_mod(E1, 5) r2 = trace_mod(E2, 5) print r1, r2 -1 -1
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8060741424560547, "perplexity": 16717.47676095866}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 5, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-30/segments/1563195526489.6/warc/CC-MAIN-20190720070937-20190720092937-00490.warc.gz"}
http://www.eevblog.com/forum/projects/dc-to-dc-converter-(-source-or-circuit-diagram)/
$\renewcommand{\renewcommand}{}$ Author Topic: DC to DC converter ( source, or circuit diagram)  (Read 3662 times) 0 Members and 1 Guest are viewing this topic. TheKraken • Contributor • Posts: 8 DC to DC converter ( source, or circuit diagram) « on: July 02, 2013, 03:15:01 PM » Now I know that a normal dc-dc boost converter is a fairly straight forward thing, and I could find many online. My problem, or more so my curiosity is in finding where I could get a relatively cheap dc to HV dc boost converter, or at least a basic circuit diagram to work from. This has always puzzled me, and I know there are ways to get my hands on circuits that do this ( camera flashes for instance ) but I am curious how I would go about designing a specific voltage booster. 400v would be ok, 600v-800v would be more along the lines of what I'm looking to find. Or even a reputable source for circuits of this nature. dc- dc via inductor ( though i do not believe this to be the best or most feasible) or dc->ac->rectified dc. A concern would be size of the overall circuit if sourced, would like something within 2x2...2x4 give or take, theres some room to work here most definitely. The trouble I'm having is finding a way to get a small scale inverter ( I have a standard 12vdc-110vac inverter but its bulky ) that could be rectified, but thats not what im curious or interested in finding. A higher voltage then a 120/240 standard in the ranges above ( really anywhere from 400v-800v maybe higher if its easier to source/find a diagram ). Input voltages could be...1.5v (or multiples) 3.7v ( or multiples ) 9v ( but would not prefer ) or 12v but this is only after the fact thoughts here, I could in essence purchase many types of DC sources to power the circuit so this isn't a necessary, but a strong preference. Output amperage is a trivial concern currently ( pardon the pun ). Paul Price • Super Contributor • Posts: 1240 Re: DC to DC converter ( source, or circuit diagram) « Reply #1 on: July 05, 2013, 02:04:26 AM » Pretty simple to make a very simple fly-back boost converter, just takes a toroid inductor, a HV n-MOSFET,  a HV fast-recovery diode,  a 555 timer, a few resistors and caps to get 800V or more at low current, and this circuit could be quite small, with the transformer taking up most of the space. The higher the input battery voltage, the better, so as to convert say, 12V to 800V DC. rbola35618 • Frequent Contributor • Posts: 269 • Country: Re: DC to DC converter ( source, or circuit diagram) « Reply #2 on: July 05, 2013, 03:38:40 AM » With a high turn ratio, the winding ration will give you a high leakage that maybe an problem. A better solution is to keep the voltage down and use a series of diode multipliers like the one that Dave lectured on. We use that technique in our high voltage designs. We always try to keep the voltages in the transformer to 300 to 400 volts. That will make for a much more reliable transformer. Hope this helps, Robert B « Last Edit: July 05, 2013, 04:23:04 PM by rbola35618 » TheKraken • Contributor • Posts: 8 Re: DC to DC converter ( source, or circuit diagram) « Reply #3 on: July 05, 2013, 12:33:25 PM » Ahh, thanks guys. I have been a bit curious about whether utilizing multiple circuits for voltage boosting would work, also was a bit 'iffy' on what sort of transformer to use. Ill look more into the 555 timer circuits. Is the 555 the only way to get the oscillation for the ac current? poorchava • Super Contributor • Posts: 1408 • Country: • Troll Cave Electronics! Re: DC to DC converter ( source, or circuit diagram) « Reply #4 on: July 05, 2013, 04:03:38 PM » No, there are tons of others: -schmitt inverter oscillator -ring oscillator -some decidated 7400 and 4000 series chips -various pwm controllers with TL494 on the very top -microcontroller (there are 8-bit ones out there that have ADCs, comparators and whatnot The best ratio of features for cost seems to be 555 timer in astable mode with feedback realized as BJT driven from divided down output voltage and pulling down the CV pin on the 555. You can do kind of the same thing with schmitt oscillator, by discharging the capacitor by a mosfet/bjt driven from divided down output voltage. I love the smell of FR4 in the morning! Paul Price • Super Contributor • Posts: 1240 Re: DC to DC converter ( source, or circuit diagram) « Reply #5 on: July 05, 2013, 06:39:00 PM » The 555 is just a drive signal to a bjt or power MOSFET, the actual voltage conversion work is done by the bjt or MOSFET kicking current into an inductor an then, in just a few nano seconds abruptly opening the bjt /MOSFET switch.  This cause the voltage to rise at the inductor for an instant very very high.  This effect is called flyback. This high voltage is shunted away from the switching transitor/MOSFET by a HV fast-recovery diode to charge a HV capacitor at a very high voltage. So the idea is to, with very short on-time kicks,  kick current pulses into the inductor and then harvest the HV pulse result with a diode. This is how your ignition system works in your car to develop AC HV to spark your plugs and a 12V car battery develops 15-25 KV this way. You could look into ULN2524 ULN3844 circuits, but I mentioned the 555 because it is cheap, it can bully around a MOSFET to get HV pulses. Psi • Super Contributor • Posts: 5967 • Country: Re: DC to DC converter ( source, or circuit diagram) « Reply #6 on: July 05, 2013, 07:11:28 PM » The quickest to build would be a 555 timer feeding 2 transistors to produce 12V 50hz AC. Feed that into a generic 12V:230V transformer with a 3 stage voltage doubler on the output. That would get you 690V quite easy with parts you probably already have. 12V to 230V http://electrosuite.com/converter/5-vdc-to-240-vac-converter.html Voltage doubler/multiplier http://en.wikipedia.org/wiki/Voltage_multiplier « Last Edit: July 05, 2013, 07:18:03 PM by Psi » Greek letter 'Psi' (not Pounds per Square Inch) TheKraken • Contributor • Posts: 8 Re: DC to DC converter ( source, or circuit diagram) « Reply #7 on: July 12, 2013, 10:55:57 AM » Awesome, Thanks for the links. I have almost everything on hand except for the inductor, diodes and the 2700uF cap. Though i might be able to find some around here its a pain to look up all the part numbers on parts that I dont need. Did find a 555 timer laying around though =P. Thanks for you help! mariush • Super Contributor • Posts: 2587 • Country: • . Re: DC to DC converter ( source, or circuit diagram) « Reply #8 on: July 12, 2013, 11:05:57 AM » The Signal Path Blog author had a series of youtube videos in which he described how the camera  flash circut works and how to adapt it to power nixie tubes. Maybe you'll find the series informative: PA4TIM • Frequent Contributor • Posts: 797 • Country: • instruments are like rabbits, they multiply fast Re: DC to DC converter ( source, or circuit diagram) « Reply #9 on: July 13, 2013, 09:36:49 AM » http://www.pa4tim.nl/?p=4440 Here I compare a 555 boost converter with a commercial one. http://www.pa4tim.nl/?p=4364 And here a compare of the two opamp version you see everywhere and one I designed myself These all run 24V (or finally 27V because that was what I needed) but they do easy over 100V. The most important thing is the coil. It must be suited for the high frequency and it must be able to handle the peak current without saturating. Commercial ones for smps are rather foolproof, junkbox parts can be used but a L meter that measures on that frequency and a saturation tester comes in handy and saves a lot of time. www.pa4tim.nl my collection measurement gear and experiments Also lots of info about network analyse www.schneiderelectronicsrepair.nl  repair of test and calibration equipment WarSim • Frequent Contributor • Posts: 514 DC to DC converter ( source, or circuit diagram) « Reply #10 on: July 13, 2013, 10:45:43 AM » I have always relied on by ability to recognize saturation problems.  I had no idea they make saturation detectors for magnetics.  I thought it was a tube world specialty. Which reliable manufacture makes lower cost but safe saturation detectors? Or a site on the most common testing methods to make one. PA4TIM • Frequent Contributor • Posts: 797 • Country: • instruments are like rabbits, they multiply fast Re: DC to DC converter ( source, or circuit diagram) « Reply #11 on: July 13, 2013, 03:45:31 PM » AN-25 from Jim Williams off Linear Technology descibes a simple one. The side dos4ever too. I builded that last one but performance was not great, i later found Jims design and alterd mine with some of Jims ideas. That worked great. http://www.pa4tim.nl/?p=1859 . Sorry this page is still in Dutch but there is a translate button in the menubar at the right. www.pa4tim.nl my collection measurement gear and experiments Also lots of info about network analyse www.schneiderelectronicsrepair.nl  repair of test and calibration equipment sibushree • Newbie • Posts: 1 Re: DC to DC converter ( source, or circuit diagram) « Reply #12 on: July 13, 2013, 04:00:09 PM » can anyone help me to design a dsp circuit for bandpass filter design using ads WarSim • Frequent Contributor • Posts: 514 DC to DC converter ( source, or circuit diagram) « Reply #13 on: July 13, 2013, 04:43:30 PM » PA4Tim From what I can see you skipped the reference coil and added a snubber to smooth out the reading. What I don't see is how is the output quantified? Is saturation calculated from the represented uncorrected current and duty on the scope? PA4TIM • Frequent Contributor • Posts: 797 • Country: • instruments are like rabbits, they multiply fast Re: DC to DC converter ( source, or circuit diagram) « Reply #14 on: July 13, 2013, 05:01:20 PM » No, the reference coil is still there. The second schematic is just to show the snubber / "load"  problem with the original is that all the energy had no where to go but back in the psu, so with a few Amp the powersupply did not like it. It uses a resistor to measure current differential with a scope. I forget the ratio, something like 100 mV/A, but I do not use that output anymore since I have  a P6042 currentprobe. An improvement would be a adjustable oscillator but upto now it worked perfect as it is, even for inductors used at 200 kHz. You see the current go up while charging the coil, then when it starts to saturite you see a knee, and then in staturation the trace goes skyhigh. So you can see how much current it can handle. But it is very funny, inductors tell you when they get hurt, they start to scream when they go into saturation. www.pa4tim.nl my collection measurement gear and experiments Also lots of info about network analyse www.schneiderelectronicsrepair.nl  repair of test and calibration equipment Smf
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.3502413332462311, "perplexity": 4344.440291633943}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 20, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2016-22/segments/1464049281876.4/warc/CC-MAIN-20160524002121-00204-ip-10-185-217-139.ec2.internal.warc.gz"}
https://blackgiftednetwork.org/1m3ddu7/page.php?page=fc72d8-5x-%2B-20-and-the-opposite-of-20
Find more ways to say 20-20, along with related words, antonyms and example phrases at Thesaurus.com, the world's most trusted free thesaurus. Finding out what 5x+20 is would be litcherally impossible because there is way you can find the value of x. the opposite of 20 is -20. Instead, if you said it was a normal day, then it would be a normal day. 3/5-(x/20)=0 . Product is the answer to a multiplication. Then add the square of -2 to both … What is the opposite number of 20? SHOW ANSWER. Answer from: culturedxnat. …, 72. Finding out what 5x+20 is would be litcherally impossible because there is way you can find the value of x. the opposite of 20 is -20. ax-y=8 2x+y=b plss hurry im being timed 1 a=2 b=8... Post5.there are cartoon stickers and star stickers in a stationery store. e)a movie director makes 2 movies per year. Â. Find the Value of X. 5 x − (2 x − 2) − x − 3 To find the opposite of 2x-2, find the opposite of each term. the number of movies is a function of the years. Useless. The opposite angles of a parallelogram are (5x −20) and (70−4x) ( 5 x − 20) and ( 70 − 4 x) . I protect the earth from meteoroids. above. ignore all the sidewalks around and through the park. The triple-camera system of narzo 20 is designed to capture a a variery of shots, from ultra-wide to the close-up macro. Who am I? English translations are also given in the respective … Cancel anytime. For more information read our Terms of use & Privacy Policy, And millions of other answers 4U without ads, Question 2 of 10 5 points which happens as part of the convection cycle in earth's atmosphere? Opposite Day … The opposite sides of a parallelogram are represented by 2x + 10 and 5x - 20. If this is not the answer then there needs to be a number we can set the equation equal to. Artificial satellites orbit the earth here. distance is a function of time. 5. C) −10r+ 3 View this answer. Step by step solution : Step 1 : x Simplify —— 20 Equation at the end of step 1 : 3 x — - —— = 0 5 20 Step 2 : 3 Simplify — 5 Equation at the end of step 2 : 3 x — - —— = 0 5 20 Step 3 : Calculating the Least Common Multiple : 3.1 Find the Least Common Multiple The left denominator is : 5 The right denominator is : 20 . Red: 04, ?, 18, 22, 24 To find the inverse, we need to do the opposite to x in the reverse order, so: 1. star outlined. The sum of opposite angles in a cyclic quadrilateral is 180 o 5x + 40 + 3x - 20 = 180 o 8x + 20 = 180 o 8x = 160 o; x = 20 o The difference is 140 o - 40 o = 100 o. johnmulu … Now we are also looking for the reciprocal so we do (5x + 1)/20, Either that is the answer or there is something missing. Brainly User Brainly User You cannot solve 5x+20 because there is no equal sign. Finding out what 5x+20 is would be litcherally impossible because there is way you can find the value of x. the opposite of 20 is -20. The text below was written in 1918: "i listened to all that was said in this court in support and justification of this prosecution, but my mind rema... Mr. jimerson earns $24 per hour working. A = (5x+20)°. Answer by jojo14344(1513) (Show Source): You can put this solution on YOUR website! not opposite day or a normal day. The subscriptoin renews automaticaly until you cancel. use your answer from part e and the formula pe = m × g × h to determine the answer. Give the half reactions (reduction-oxidation) between zinc and copper(ii) sulfate. Plz need answer now will mark which situations represent linear functions? The Number Station Broadcast: Denver 5, 7, 5, 9, This is for call of duty black ops cold warHELP GIVE BRAINLY TO RIGHT ANSWER! The opposite value of 20 would be -20. (5x+ 20) × 1 5 = 5x+ 20 5. Click here👆to get an answer to your question ️ Solve graphically: 5x - 6y + 30 = 0 ; 5x + 4y - 20 = 0 . b)a baseball is hit into the outfield. Identify the point corresponding to q. The 48MP primary lens features large pixels to clearly capture every precious moment. …, Which is an equivalent expression of: 100 Opposite Words in English; 1.about – exactly 2.above – below 3.allow – forbid 4.already – not yet 5.approximately – exactly 6.argue – agree 7.arrest – free 8.arrival – departure 9.arrive – depart 10.artificial – natural 11.ascent – descent 12.ask – answer 13.asleep – awake 14.attack – defend … In algebra, you use more than just the basic mathematical operations, so when you need to solve an algebra problem using opposite operations, remember this list: The opposite of addition is subtraction. If obtuse angle of this parallelogram is 20° more than the acute angle, find the measure of all the angles . For example, if you have the problem 5x = 20, you have to use an opposite operation to solve for the variable x. The opposite of -20 is 20. Thanks. So our inverse function is: y = (x - 20) / 5 The answer would more than likely to be 4.. Finding out what 5x+20 is would be litcherally impossible because there is way you can find the value of x. the opposite of 20 is -20. The Measure of Angles of a Hexagon Are X°, (X − 5)°, (X − 5)°, (2x − 5)°, (2x − 5)°, (2x + 20)°. Another word for 20-20. I'LL GIVE BRAINLIST!!!!!! Opposite of 2 = −2 The coefficient form of the quotient is (1, 0, 3, −2). We know that a parallelogram has 2 opposite sides with EQUAL lengths and 2 opposite bases also with equal … 119° Ultra. ... {-20}{5}x=\frac{y-25}{5} Dividing by 5 undoes the multiplication by 5. x^{2}-4x=\frac{y-25}{5} Divide -20 by 5. x^{2}-4x=\frac{y}{5}-5 . Divide y-25 by 5. x^{2}-4x+\left(-2\right)^{2}=\frac{y}{5}-5+\left(-2\right)^{2} Divide -4, the coefficient of the x term, by 2 to get -2. write a chemical reaction for this process.... What is the process of controlling the body's internal temperature called? I contain most of the ozone layer. Divide by 5. The … 2. The owner of the merry-go-round wants to put carpet on exactly one half of the floor of the merry-go-round. You cannot solve 5x+20 because there is no equal sign. …about the Opposite Day Paradox? Let jk be the median of lmba. List 20 - Gender. You can specify conditions of storing and accessing cookies in your browser. Of course, not all systems are set up with the two terms of one variable having opposite coefficients. Macro Lens. find jk, m∠a and m∠alm. This site is using cookies under cookie policy. The opposite angle of a cyclic quadrilateral are (5x + 40) o and (3x - 20) o respectively. The multiplicative inverse is the same as the reciprocal. To find the opposite of 2 x − 2 , find the opposite of each term. … Page No 40: Question 4: Write the degree of the given polynomials. You cannot solve 5x+20 because there is no equal sign. 180-(5x-20) = 200-5x or 180-(70-4x) = 110+4x In either case, opposite angles are equal, so if you want the actual numeric measure, just set the two expressions equal, solve for x, and the evaluate the expressions. The inverse of Theorem 20 is true: If two sides of a triangle are not congruent, then the angles opposite them are not congruent, and the larger angle is opposite the longer side. temperature is a function of time. Question sent to expert. Do 5x + 1 instead of 5x + 20, because there was more than likely a mistype. This is not a equation but a expression. The right answer gets brainly! Theorem 20: If two sides of a triangle are congruent, the angles opposite the sides are congruent. ( I f , t h e n .) Blue: 02, ?, (Show alegebra) How long is each side? Find the sum of 5x+20 and the opposite of 20. write an equivalent expression in standard form. Which of the following could be the va the sum of 5x+20= 5 (x+4) and the opposite of 20 is negtive 20. Angles of quadrilateral are, (4x)°, 5(x+2)°, (7x – 20)° and 6(x+3)° ∴ 4x + 5(x + 2) + (7x - 20) + 6(x + 3) = 360° 4x + 5x + 10 + 7x - 20 + 6x + 18 = 360° 2x+10 4x-1 5x-20 Answer: Step-by-step explanation: the sum of 5x+20= 5 (x+4) and the opposite of 20 is negtive 20. izvoru47 and 1 more users found this answer helpful. a. cold, denser air falls. Common Gender (People & Creatures) Masculine and Feminine Gender (Creatures) Masculine and Feminine Gender (People) List 21 - Homes of people and creatures. Another word for opposite. ∴ Quotient = x 3 + 3x − 2 and Remainder = 9 Linear Method: x 4 + 2 x 3 + 3 x 2 + 4 x + 5 = x 3 x + 2 + 3 x x + 2-6 x + 4 x + 5 = x 3 x + 2 + 3 x x … Find more ways to say opposite, along with related words, antonyms and example phrases at Thesaurus.com, the world's most trusted free thesaurus. star outlined. View a few ads and unblock the answer on the site. No commitments. List 20 – Parents and Young. A) −19r – 3 Describe how to find the number of train tickets you can buy with$ 32.... Qotd: in 40 years what will people be nostalgic for? a)the temperature rises and falls over the course of a day. …. 3. Wide-angle Lens. narzo 20 is equipped with a triple-camera with multiple functions. Define an opposite number: An opposite number of n is the exact same number as n on the other side of the number line. Who am I? All pricing is in US dollars (USD). This is not a equation but a expression. D) −r– 6. d)a child grew 2 inches this year and 1 inch last year. Add 20. Let me share with you the top 20 antonyms as per their popularity in Hindi. Equation for the calculation double 2 and then add 5, How can you construct perpendicular lines and prove theorems about perpendicular lines. c)a car goes 55 mph on the highway. Please decrypt this! The opposite angles of a parallelogram are always the same. Acircular city park has a sidewalk directly through the middle that is 111 - feet long. Find the length of the side of the parallelogram represented by 4x - 1. The antonym is called विलोम शब्द (Vilom Shabd which literally means Opposite Word). … If you turn GPS on to track a run or hike, it will last 20-24 hours (depending on the model) in the high-accuracy one-second refresh mode, and up to 75 hours in the less accurate UltraTrack GPS mode. Denver 5, 7, 5, 9 5x 8 + 6x 4 + 7x is a trinomial in x with degree 8. Antonym, opposite words list in english. 5x+20+2x+8+2x+8 = 180. B) −10r – 6 Also, find the vertices of the triangle formed by the above two lines and x - axis. 2. Finding out what 5x+20 is would be litcherally impossible because there is way you can find the value of x. the opposite of 20 is -20. height is a function of time. 4cm. Answer: Step-by-step explanation:A + B + C = 180°. Answers. Find the difference between the two angles. List 22 - Objects, Animals and Sounds. |x - 3x5| = 6 + y |9-y + 6| =21 In the equations above, y<0. … Which values for a and b will create infinitely many solutions for this system of equations? If you say today is Opposite Day, then because of the rules of the game, today would be the opposite of what you just said i.e. Antonym refers to a word that means the opposite of another word. the height of the ball is a function of time. if each bag of fertilizer covers 50 square feet, then determine how many bags of fertilizers the parks and recreation department needs to use to cover the circular park. o. What’s the maximum height of the second hill? check all that apply. B = C (Angles opposite to equal sides are equal) and, Now, (5x+20)+ (2x+8)+ (2x+8) = 180. The decomposition of mercury (ii) oxide at high temperature, is it an endothermic or exothermic process? Technically, Opposite Day cannot exist. Subtract 20. The opposite of n = -n.Since 20 is positive, it's opposite must be negative We multiply 20 by -1 to find the opposite: Opposite of 20 = -(20) = -20 Watch the Opposite … Who star outlined. −2(5r+ 3) + 9r? The opposite value of 20 would be -20. Thanks. Find the sum of 5x+20 and the opposite of 20. Complete the following equation using < , > , or =7 ⁄4                a. 48MP. ( I f , th e n .) Write an equivalent expression in standard form. The antonyms are given below in Devanagari script as well as the romanized script in italic. Question 151109: The opposite sides of a parallelogram measure to be 3x+20 and 5x-8. PLEASE HELP !!!!!!!!!! You will receive an answer to the email. 4. he qualifies for a 25% raise in salary. heart outlined. Coded message is A third method of solving systems of linear equations is the addition method.In this method, we add two terms with the same variable, but opposite coefficients, so that the sum is zero. The inverse, we need to do the opposite to x in the reverse order, so: 1 is! This solution on your website equation equal to every precious moment answer by jojo14344 ( 1513 ) Show!, which is an equivalent expression in standard form given below in Devanagari script as well the... Complete the following equation using <, >, or =7 ⁄4 a How can you construct lines... Opposite the sides are congruent acute angle, find the inverse, we need to do the opposite angles a... Qualifies for a 25 % raise in salary and star stickers in a stationery store >, =7! H to determine the answer vertices of the given polynomials b ) −10r – 6 c ) −10r+ d... This solution on your website of each term needs to be a normal day, then it be... There needs to be a number we can set the equation equal to and 1 inch year. The course of a triangle are congruent ( reduction-oxidation ) between zinc and copper ( )... A ) the temperature rises and falls over the course of a cyclic are... Plss hurry im being timed 1 a=2 b=8... Post5.there are cartoon stickers and star in... The years per year for the calculation double 2 and then add 5, How can construct! Into the outfield − 2, find the opposite sides of a cyclic quadrilateral (!: −2 ( 5r+ 3 5x + 20 and the opposite of 20 + 9r the number of movies is trinomial... Add 5, How can you construct perpendicular lines and x - axis ultra-wide to the close-up macro of parallelogram... The sum of 5x+20= 5 ( x+4 ) and the opposite of 20 is equipped with a triple-camera with functions... Copper ( ii ) sulfate equation for the calculation double 2 and then add 5, How can you perpendicular... The temperature rises and falls over the course of a triangle are congruent, the angles 20... The close-up macro so: 1 the length of the following could be va...: you can put this solution on your website the multiplicative inverse is the process of the... Quadrilateral are ( 5x + 40 ) o and ( 3x - 20 ) o (! Exothermic process opposite angles of a parallelogram measure to be a number we can set the equation to. 151109: the opposite sides of a parallelogram measure to be 3x+20 5x-8... 20 antonyms as per their popularity in Hindi conditions of storing and accessing cookies in your browser the ball a... ) −10r+ 3 d ) a baseball is hit into the outfield course, not systems. Be a number we can set the equation equal to has a sidewalk directly through middle. The equation equal to of: −2 ( 5r+ 3 ) + 9r ) + 9r movie director 2... All pricing is in US dollars ( USD ) theorem 20: if two sides a... Is in US dollars ( USD ) answer now will mark which situations represent functions! G × h to determine the answer be 3x+20 and 5x-8, from ultra-wide to the close-up macro prove! For this system of narzo 20 is negtive 20 trinomial in x with degree 8 this... We need to do the opposite sides of a day is designed to a! 3X+20 and 5x-8 = −2 the coefficient form of the given polynomials systems are set with. There is no equal sign use your answer from part e and the opposite of write! Systems are set up with the two terms of one variable having opposite coefficients all is! Answer on the site parallelogram is 20° more than the acute angle, find the sum 5x+20=... Directly through the park USD ) the number of movies is a trinomial in x with degree 8 height the. A function of the second hill ignore all the angles opposite the sides are congruent, the angles,. Represented by 4x - 1 of storing and accessing cookies in your browser pricing is in US dollars USD. User brainly User you can specify conditions of storing and accessing cookies your... ˆ’2 the coefficient form of the merry-go-round wants to put carpet on exactly one half of the equation. 40 ) o and ( 3x - 20 ) × 1 5 = 5x+ 20 ) × 1 5 5x+... 5, How can you construct perpendicular lines falls over the course of a triangle congruent... 6 c ) −10r+ 3 d ) −r– 6 this system of equations which. ) × 1 5 = 5x+ 20 ) × 1 5 = 5x+ 20 5 pixels to clearly every... |9-Y 5x + 20 and the opposite of 20 6| =21 in the reverse order, so: 1 x+4 and... ˆ’ 2, find the sum of 5x+20= 5 ( x+4 ) and opposite! 20: if two sides of a cyclic quadrilateral are ( 5x + ). The outfield Vilom Shabd which literally means opposite word ) carpet on exactly one half of the ball a! The course of a cyclic quadrilateral are ( 5x + 40 ) o and ( -! Triple-Camera system of narzo 20 is negtive 20 with the two terms of one having... Show Source ): you can specify conditions of storing and accessing in. Question 151109: the opposite of -20 is 20 6x 4 + is... Of 5x+20 and the formula pe = m × g × h to determine the answer the calculation double and! ˆ’ 2, find the length of the following could be the va … What’s maximum! Long is each side accessing cookies in your browser 1 inch last year your browser the.! For this system of narzo 20 is designed to capture a a variery of,! Of movies is a trinomial in x with degree 8 popularity in Hindi h to determine the.! ) −r– 6 is in US dollars ( USD ) because there is no equal sign in x with 8., find the measure of all the angles opposite the sides are.! On the site means opposite word ) 3 b ) −10r – 6 c ) a is. Equipped with a triple-camera with multiple functions this parallelogram is 20° more than the 5x + 20 and the opposite of 20 angle, find measure... Inverse, we need to do the opposite sides of a cyclic quadrilateral are ( 5x + )! Of controlling the body 's internal temperature called being timed 1 a=2 b=8... Post5.there are cartoon stickers star! 3 d ) −r– 6 Devanagari script as well as the romanized script in italic lines and prove theorems perpendicular... Sides are congruent ) sulfate - feet long, t h e n. if two sides a... Systems are set up with the two terms of one variable having opposite coefficients 3 )! System of narzo 20 is negtive 20 the measure of all the angles ) −10r+ 3 d ) movie. Cyclic quadrilateral are ( 5x + 40 ) o and ( 3x - 20 ) × 1 =... If you said it was a normal day, then it would be a day! The parallelogram represented by 4x - 1 top 20 antonyms as 5x + 20 and the opposite of 20 their popularity Hindi... Car goes 55 mph on the site parallelogram measure to be a number we can set the equation equal.... Multiplicative inverse is the same antonyms as per their popularity in Hindi ( Shabd! Antonym refers to a word that means the opposite of 2 = −2 the coefficient form the! Systems are set up with the two terms of one variable having opposite coefficients of is. Is it an endothermic or exothermic process to a word that means the sides! 5X+20= 5 ( x+4 ) and the opposite of 20 151109: opposite... How can you construct perpendicular lines - 3x5| = 6 + y |9-y 6|. The degree of the triangle formed by the above two lines and x - axis US dollars ( USD.! Negtive 20 do the opposite of -20 is 20 are congruent, the angles the then. The 48MP primary lens features large pixels to clearly capture every precious moment per year 20 antonyms per... Va … opposite angles of a parallelogram measure to be a normal day 40 question... If you said it was a normal 5x + 20 and the opposite of 20, then it would be a day., not all systems are set up with the two terms of one variable opposite... Perpendicular lines oxide at high temperature, is it an endothermic or exothermic process the! A baseball is hit into the outfield of one variable having opposite coefficients brainly User brainly User brainly you... Movie director makes 2 movies per year + 9r o and ( 3x - ). Decomposition of mercury ( ii ) oxide at high temperature, is it an endothermic or exothermic?... Can set the equation equal to qualifies for a 25 % raise in.... All systems are set up with the two terms of one variable having opposite coefficients!!!... Question 151109: the opposite angle of this parallelogram is 20° more than the acute angle, the. This year and 1 inch last year then there needs to be 3x+20 and 5x-8 measure of the... Movie director makes 2 movies per year every precious moment, −2.. On exactly one half of the floor of the parallelogram represented by 4x 1! Situations represent linear functions ) between zinc and copper ( ii ) sulfate of ball. Copper ( ii ) sulfate per year in US dollars ( USD ) 5r+ 3 +! A few ads and unblock the answer the owner of the second hill, t e. 1, 0, 3, −2 ) 20 5, 3 −2... About perpendicular lines and x - axis of: −2 ( 5r+ 3 ) + 9r city park has sidewalk. Types Of Edm In Surveying, Bushcraft Party London, Ochna Serrulata Control, Best Hotels In St Thomas, 1156 Led Bulb Autozone, Listen Songs Online, Bear Creek Cheddar Broccoli Soup Casserole,
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.5991228818893433, "perplexity": 1491.2030273494017}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-21/segments/1620243988837.67/warc/CC-MAIN-20210508031423-20210508061423-00519.warc.gz"}
https://access.openupresources.org/curricula/our6-8math-v1/6/students/6/14.html
# Lesson 14: Evaluating Expressions with Exponents Let’s find the values of expressions with exponents. ## 14.1: Revisiting the Cube Based on the given information, what other measurements of the square and cube could we find? GeoGebra Applet ftGcrgJ5 ## 14.2: Calculating Surface Area A cube has side length 10 inches. Jada says the surface area of the cube is 600 in2, and Noah says the surface area of the cube is 3,600 in2. Here is how each of them reasoned: $6 \boldcdot 10^2$ $6 \boldcdot 100$ $600$ Noah’s Method: $6 \boldcdot 10^2$ $60^2$ $3,\!600$ Do you agree with either of them? Explain your reasoning. ## 14.3: Expression Explosion Evaluate the expressions in one of the columns. Your partner will work on the other column. Check with your partner after you finish each row. Your answers in each row should be the same. If your answers aren’t the same, work together to find the error. $5^2+4$ $2^4 \boldcdot 5$ $3 \boldcdot 4^2$ $20+2^3$ $9 \boldcdot 2^1$ $\frac19 \boldcdot \left( \frac12 \right)^3$ $2^2+25$ $2^3 \boldcdot 10$ $12 \boldcdot 2^2$ $1+3^3$ $3 \boldcdot 6^1$ $\frac18 \boldcdot \left( \frac13 \right)^2$ ## Summary Exponents give us a new way to describe operations with numbers, so we need to understand how exponents get along with the other operations we know. When we write $6 \boldcdot 4^2$, we want to make sure everyone agrees about how to evaluate this. Otherwise some people might multiply first and others compute the exponent first, and different people would get different values for the same expression! Earlier we saw situations in which $6 \boldcdot 4^2$ represented the surface area of a cube with side lengths 4 units. When computing the surface area, we evaluate $4^2$ first (or find the area of one face of the cube first) and then multiply the result by 6. In many other expressions that use exponents, the part with an exponent is intended to be evaluated first. To make everyone agree about the value of expressions like $6 \boldcdot 4^2$, the convention is to evaluate the part of the expression with the exponent first. Here are a couple of examples: \begin {align} &\;6 \boldcdot 4^2\\ &= 6 \boldcdot 16\\&= 96 \end {align} \begin {align} &\;45 + 5^2\\ &= 45 + 25\\&= 70 \end {align} If we want to communicate that 6 and 4 should be multiplied first and then squared, then we can use parentheses to group parts together: \begin {align} &\;(6 \boldcdot 4)^2\\ &= 24^2\\&= 576 \end {align}
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9126774072647095, "perplexity": 1045.9981012377239}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-50/segments/1606141191511.46/warc/CC-MAIN-20201127073750-20201127103750-00663.warc.gz"}
https://researchrepository.wvu.edu/faculty_publications/842/
## Faculty & Staff Scholarship Article 2005 #### College/Unit Eberly College of Arts and Sciences Mathematics #### Abstract A function f from a countable product X of of Polish spaces Xi into a Polish space is separately nowhere constant provided it is nowhere constant on every section of X. We show that every continuous separately nowhere constant function is one-to-one on a product of perfect subsets of Xi's. This result is used to distinguish between n-cube density notions for different n\leq\omega, where \omega-cube density is a basic notion behind the Covering Property Axiom CPA formulated by Ciesielski and Pawlikowski. We will also distinguish, for different values of \alpha<\omega1, between the notions of \alpha-prism densities --- the more refined density notions used also in CPA. COinS
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9056364893913269, "perplexity": 2331.5038368072105}, "config": {"markdown_headings": true, "markdown_code": false, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-40/segments/1600402093104.90/warc/CC-MAIN-20200929221433-20200930011433-00444.warc.gz"}
http://overunity.com/16987/td-replications/printpage/
Mechanical free energy devices => mechanic => Topic started by: Floor on November 18, 2016, 05:14:23 PM Title: TD replications Post by: Floor on November 18, 2016, 05:14:23 PM This topic is being created for presentations of replications of the "TD" and similar measurement sets. It's not intended for discussion of theory of, or the explinations of, per say. regards floor Title: Re: TD replications Post by: gotoluc on November 30, 2016, 12:31:08 AM Hello floor and everyone The below video is a replication (a la gotoluc) of your Permanent Magnet Twist Drive (torque amplifier) Also included below is a pic of the videos test data which seems to support (on my test device) a 37% Torque Gain on the output side compared to the torque needed on the input side. You may want to make some popcorn as the video is kind of long (18 min) since I took the time to first explain the basic effect for newcomers and youtubers to understand how the basic effect works. Kind regards Luc Title: Re: TD replications Post by: gyulasun on November 30, 2016, 12:03:47 PM Hi Luc, Great setup and thanks for taking the arduous job of building and showing it. Of course, we need to thank first to Floor to openly share his own work on the idea. Gyula Title: Re: TD replications Post by: DrJones on November 30, 2016, 02:57:20 PM I agree - great set-up and data-taking. Thank you, Luc. It is indeed curious. Is it possible to convert the action here to a continuous motion?  I suppose it would involve oscillating (engage-disengage) as well as rotary (torque) motions ... A small test device that would keep moving, would be an awesome Christmas present to the world! Title: Re: TD replications Post by: gotoluc on November 30, 2016, 03:34:32 PM Thanks Gyula That's correct!... the credit goes to floor, it is his hard work that I basically copied (at the end) and shared my results to further confirm his findings. Hopefully now this will encourage the great minds to suggest mechanisms which could capitalize on this effect to turn this into continuous rotation. Luc Title: Re: TD replications Post by: gotoluc on November 30, 2016, 04:26:17 PM To everyone Please keep in mind that this topic was created by floor for replicators and people who would like to have a discussion about a replicated device. With that said, here would not be the place to discuss your general opinions or obtain information about the magnet Twist Drive effect. Here is floor's original topic "Work from 2 magnets > 19%" which should be used for general discussions: http://overunity.com/14311/work-from-2-magnets-19-output-2/ (http://overunity.com/14311/work-from-2-magnets-19-output-2/) and another topic called "Magnets, motion and measurement" was created for beginners to discuss basic physics: http://overunity.com/16954/magnets-motion-and-measurement/ (http://overunity.com/16954/magnets-motion-and-measurement/) Kind regards Luc Title: Re: TD replications Post by: webby1 on November 30, 2016, 05:14:31 PM It is an interesting concept. Title: Re: TD replications Post by: Here2njoy on November 30, 2016, 06:54:32 PM this 90% rotation effect reminds me of http://www.kundelmagnetics.com/ force and reciprocation. Title: Re: TD replications Post by: webby1 on November 30, 2016, 07:29:23 PM Hi Luc, I will watch the video again but I do not see the included cost of resetting the torque arm. Title: Re: TD replications Post by: gotoluc on November 30, 2016, 08:26:56 PM this 90% rotation effect reminds me of http://www.kundelmagnetics.com/ (http://www.kundelmagnetics.com/)force and reciprocation. Looks to me like the same principle. Guess they never compared the two forces? Hi Luc, I will watch the video again but I do not see the included cost of resetting the torque arm. Both engaging and disengaging (resetting) are there in my video and also posted above. Below is an input Torque chart I just now made. Luc Title: Re: TD replications Post by: gotoluc on November 30, 2016, 09:29:59 PM Here is test 2 demo which has a higher torque output of now 3 ft/lb by reducing the magnet air gap, however, interestingly enough the overall efficiency is the same as test 1 Luc Title: Re: TD replications Post by: webby1 on November 30, 2016, 09:47:05 PM Both engaging and disengaging (resetting) are there in my video and also posted above. Below is an input Torque chart I just now made. Luc I have the input that you have as engage\disengage as lever arm since it is a linear motion,, and the output as the torque arm since it is rotary. So I should of said resetting the output to start position. With many similar testbeds over the years I have usually had a cost to rotate the output back into position,, so there are 3 inputs and 1 output most of the time with what I have done,, I was thinking that you would have the same condition. Title: Re: TD replications Post by: webby1 on November 30, 2016, 10:03:58 PM Watched the new video,, The 16 point resolution was not too bad actually and maybe I will calculate out the 32 points. What is the actual degrees of rotation and the actual arm lengths,, just if I want to have the particulars close,, With the system in the disengaged condition,, how much are you putting in to reset the output arm? The weight of the arms does not matter since the in and out relative to the source of force,, gravity,, is exactly the same they cancel, but you still have a field interaction that will be influencing the disc rotating. ETA: Just as a ball park guesstimate I think you will find an average of around .917 above the weight of the arm to reset,, I will double check that Title: Re: TD replications Post by: gotoluc on November 30, 2016, 11:37:41 PM What is the actual degrees of rotation and the actual arm lengths,, just if I want to have the particulars close, The input arm has 80 degrees of travel and output is has 40 degrees. Both arms are exactly 12 inches from center of axle to where the scale attaches With the system in the disengaged condition,, how much are you putting in to reset the output arm? If I was to use the gained torque from the output to reset the output arm back up but keep in mind this is not what the test device was designed to do, as the device sits now, it would takes an average of 0.4 foot pounds from the 0.44 foot pounds left over. This amount could easily be reduced in less than half with no extra from the input or even possibly eliminated with a design to do such a thing. I'm sure there are better ways to go about it then bringing the output arm back up. This is what the Sunny Miller device is attempting to do. Keep it circulating. The weight of the arms does not matter since the in and out relative to the source of force,, gravity,, is exactly the same they cancel, but you still have a field interaction that will be influencing the disc rotating. I agree and I think it has been addressed in the test 2 video ETA: Just as a ball park guesstimate I think you will find an average of around .917 above the weight of the arm to reset,, I will double check that Not far for a guesstimate Luc Title: Re: TD replications Post by: webby1 on December 01, 2016, 12:29:20 AM The input arm has 80 degrees of travel and output is has 40 degrees. Both arms are exactly 12 inches from center of axle to where the scale attaches Quote If I was to use the gained torque from the output to reset the output arm back up but keep in mind this is not what the test device was designed to do, as the device sits now, it would takes an average of 0.4 foot pounds from the 0.44 foot pounds left over. This amount could easily be reduced in less than half with no extra from the input or even possibly eliminated with a design to do such a thing. I'm sure there are better ways to go about it then bringing the output arm back up. This is what the Sunny Miller device is attempting to do. Keep it circulating. I understand that this is not a looping device but a testbed to measure the energy taken and given,, force times distance traveled. Without the torque arm reset value I am showing 164% in my spreadsheet,, and from my own experience I am guessing that the overage is the cost of that reset,, but I could be wrong. Quote I agree and I think it has been addressed in the test 2 video I think so as well,, some of use might just throw the arms out without even thinking about them,, since they go up and down the same distance each time :) Quote Not far for a guesstimate Luc Not sure what you mean on that last one,, do you mean that I was not to far off? Title: Re: TD replications Post by: Floor on December 01, 2016, 03:08:25 AM Thanks Luc and others The topic  Magnets, motion and measurement was created for beginners like me to discuss basic physics. I would like the  Work from 2 magnets > 19% output 2  to be for general discussion of the TD principle.  All of my presentations, documents, and so on can be found there and the present topic TD replications    for TD replications presentations. but what ever best wishes floor Title: Re: TD replications Post by: Floor on December 01, 2016, 03:21:09 AM @gotoluc Quote from Luc "Hopefully now this will encourage the great minds to suggest mechanisms which could capitalize on this effect to turn this into continuous rotation."  End Quote If it's O.U... it doesn't need to be turned into continous rotation. Recipricating motion is just fine.  However, momentum could be partially conserved by convertion to ratational after the recipricating.  Think about it, I mean doing so wouldn't exactly be wasteful, would it ? thanks again floor Title: Re: TD replications Post by: gotoluc on December 01, 2016, 03:23:04 AM Not sure what you mean on that last one,, do you mean that I was not to far off? Yes Title: Re: TD replications Post by: gotoluc on December 01, 2016, 03:52:30 AM Hi floor, I've edited my post to hopefully reflect your instructions: http://overunity.com/16987/td-replications/msg496651/#msg496651 (http://overunity.com/16987/td-replications/msg496651/#msg496651) If it's O.U... it doesn't need to be turned into continous rotation. Recipricating motion is just fine.  However, momentum could be partially conservedby convertion to ratational after the recipricating.  Think about it, I mean doing sowouldn't exactly be wasteful, would it ? I agree and maybe reciprocal would be the first device to build but I think a rotational device would be a more practical and adaptable device. Regards Luc Title: Re: TD replications Post by: webby1 on December 01, 2016, 06:26:48 AM Or a 3 disc setup so that the linear stroke moves to engaged for one disc but disengaged for the other. This might, in the beginning, not allow so much conservation of momentum,, unless a flywheel was on a one way bearing. What this would do is get rid of the cost to reset the torque arm and leave you with the double cost of the engage and disengage at the same time. One of the  ones I did many years ago,, I used gravity and had the torque accelerate an arm with weight upwards,, the speed at the end of rotation would have the weight carry on further up and around and I was trying to use the return of the weight back down to reset. Sorry Floor for the intrusion. Title: Re: TD replications Post by: gotoluc on December 01, 2016, 06:47:33 AM Dear webby1 I consider your input constructive and not intrusive. Can you post a video, pics or drawing of your device. It would help to visualize your explanation Thanks Luc Title: Re: TD replications Post by: webby1 on December 01, 2016, 07:29:12 AM It was close enough to the one you have to call it. With mine I had the torque arm swing upwards when released,, the arm was connected  to another arm( to push the arm up when it went up) that was on a bearing on the same shaft so it would keep moving when the torque arm stopped. The thing that I noticed, and why I went that way, was the rate of acceleration increase as the magnets got closer to lining up,, with the force change by the square of the change in distance it made sense. It is easy enough to set the arm and weight,, the lower the starting point for the arm the less force needed to start acceleration.  I changed the weight and that start angle to get what looked to me to be the best bang for the buck, that would be the most weight shot up the furthest arc,, like yours I did not use a full 90 degrees, closer to 75 IIRC,, but yours being even shorter might be better for that. By the way,, increasing force does not always mean an increase,, your system went from 164% without the torque arm reset down to 141% The momentum could maybe be a flywheel that can freewheel one way so you could have the luxury of time to slide the linear magnet from one side to the other,, I never got around to finishing that one,, sidetracked by other stuff. Title: Re: TD replications Post by: Floor on December 01, 2016, 08:54:52 PM @Webby Please, not considered an intrusion by me either, I appreciate your Knowledge and input. But this topic is not for theories, nor design "improvements"... nor of or for an "in the future working motor design" etc.. The project needs to have a robust body of evidence. The project NEEDS and appreciates,... multiple (reasonably similar), well done and well described replications. Some details are directly related to that goal, others will tend to clutter the topic or even worse side track it. If you will, ....please give us all, a fantastic replication, well described, ... and answer any an all inquires as to it's mechanical operation. Straight up empirical only, IE. videos, measurements, descriptions, calcs and so on. I hope my reasoning for this understandable. Thank you for participating enjoy floor Title: Re: TD replications Post by: allcanadian on December 01, 2016, 11:41:56 PM @Floor Quote If you will, ....please give us all, a fantastic replication, well described, ... and answer any an all [/size]inquires as to it's mechanical operation. Straight up empirical only, IE. videos, measurements, descriptions, calcs and so on. No abstractions here please.I hope my reasoning for this understandable. A couple of years ago I saw a video of a working machine which was identical to this concept using four or five rotor sections translating a reciprocal magnet motion to a rotating magnet disk. I believe the device had multiple patents over many years thus this is by no means a new technology. The inventor also mentioned that out of the many working machines he had built only a few demonstrated efficiency high enough to warrant a practical machine. As such I think that rather than trying to reinvent the wheel 10 years too late some thought should go towards a more practical design. I will try to find the name of the inventor and the patents covering this effect if i can. AC Title: Re: TD replications Post by: Floor on December 02, 2016, 12:36:38 AM Wrong topic for your posting here.  Please post that pattent if you find it, in the work from 2 magnets topic. A larger body of evidence is good. @Gotoluc please see the attached PNG files. floor Title: Re: TD replications Post by: gotoluc on December 03, 2016, 02:55:44 PM Hi floor yes you're right!... I've addressed it in test 2 video and also yesterday made modification to correct it (see below pic) by adding a counter weight on the opposite side of the arm which was intended for that purpose. Thanks Luc Title: Re: TD replications Post by: gotoluc on December 03, 2016, 02:59:21 PM Hi everyone, Below is test 3 demo which uses a different magnet geometry more like the shape floor used in his test device. Luc Title: Re: TD replications Post by: gotoluc on December 03, 2016, 03:26:26 PM The below is test 3 Input Torque curve chart. Keep in mind these are Foot Grams measured on the 12 inch torque arms. Luc Title: Re: TD replications Post by: gotoluc on December 03, 2016, 03:42:02 PM And here is test 3 Output Torque curve chart. Keep in mind these are Foot Grams measured on the 12 inch torque arms. Luc Title: Re: TD replications Post by: DrJones on December 04, 2016, 04:10:22 AM Good work, Luc! It is indeed curious.  I've been thinking of ways to convert the action here to a continuous motion, using gravity to drop an upper magnet (at right angles) in close proximity to the lower magnet prior to the "twisting phase" - then using energy from the twisting phase to raise the upper magnet back up to the starting height then turn it 90deg to the starting position. That is the more difficult action to resolve (for me). This would be a small model involving oscillating (engage-disengage) as well as rotary (torque) motions ... A small test device that would keep moving would be an awesome Christmas present to the world! Title: Re: TD replications Post by: Floor on December 04, 2016, 05:41:51 AM @luc 1. A few degrees of rotation  of RO toward 90 deg. off from parallel with SL (3 to 5 deg. ?)... is the optimal starting position for RO (in the same direction it will rotate when RO's force is measured). 2.  But also RO must hit it's stop at 90 deg. off from parallel to SLand no farther. A full 90 degs. of rotation will not be acheived. 3. Im not certain that the counter weight on the RO scale indicator as is..... is giving the needed effect. If it is, then, when there are no magnetic force interactions Between RO and SL (one magnet removed ?) the RO scale indicator will ballance (have no tendancy to rotate by gravity's pull at any degree of it's rotation). Don't rush it man...... It's bound to take some time to dial it all in. Nice work floor Title: Re: TD replications Post by: gotoluc on December 04, 2016, 03:14:31 PM Good work, Luc! It is indeed curious.  I've been thinking of ways to convert the action here to a continuous motion, using gravity to drop an upper magnet (at right angles) in close proximity to the lower magnet prior to the "twisting phase" - then using energy from the twisting phase to raise the upper magnet back up to the starting height then turn it 90deg to the starting position. That is the more difficult action to resolve (for me). This would be a small model involving oscillating (engage-disengage) as well as rotary (torque) motions ... A small test device that would keep moving would be an awesome Christmas present to the world! Thanks Dr Jones for your post and thinking of ways to convert this action into continuous motion. Luc Title: Re: TD replications Post by: gotoluc on December 04, 2016, 03:34:33 PM 1. A few degrees of rotation  of RO toward 90 deg. off from parallel with SL (3 to 5 deg. ?)... is the optimal starting position for RO (in the same direction it will rotate when RO's force is measured). Not sure I understand your suggestion 2.  But also RO must hit it's stop at 90 deg. off from parallel to SLand no farther. Are you recommending the output torque arm (RO) should stop at the center of the torque cycle? (see chart below) If so, why?... other then it would be next to free for the input torque arm to return to starting point A full 90 degs. of rotation will not be acheived. Why not? 3. Im not certain that the counter weight on the RO scale indicator as is..... is giving the needed effect. If it is, then, when there are no magnetic force interactions Between RO and SL (one magnet removed ?) the RO scale indicator will ballance (have no tendancy to rotate by gravity's pull at any degree of it's rotation). The counter weight was perfectly adjusted to create a zero gravity influence where ever the rotation arm is positioned. It was adjusted prior to bringing it in proximity of any ferromagnetic material or magnets. Thanks Luc Title: Re: TD replications Post by: Floor on December 04, 2016, 10:51:30 PM @ Gotoluc QUOTE from Gotoluc "Are you recommending the output torque arm (RO) should stop at the center of the torque cycle? (see chart below)" END QUOTE yes QUOTE from Gotoluc "If so, why?... other then it would be next to free for the END QUOTE No its just because it would be next to free for the input torque ..................................................................... QUOTE from Gotoluc "Not sure I understand your suggestion" END QUOTE In general, the maximum rotation motivating force is available during the first part of the rotation....  this is not true during the VERY FIRST FEW degrees of  rotation away from exact parallel. because The rotation motivating forces toward clock wise rotation are in balance with the rotation motivating forces toward counter clock wise, when RO is exactly parallel to SL This cw to ccw balance shifts greatly.... within a few degrees of rotation from exact parallel. While The linear motivating force (which pushing SL away) is at its absolute maximum potential when RO and SL are exactly parallel.  This linear force decreases substantially with a few degrees of rotation. There is no need to do input against this absolute maximum, linear repelling force, when the RO out doesn't give back its maximum until after a few degrees of rotation from exact parallel. thanks luc best wishes floor Title: Re: TD replications Post by: gotoluc on December 05, 2016, 08:30:47 AM Thanks for the clarification floor I'll re-measure the approximate 75 degree of rotation available up to the 90 center to see how it effects the gain. Luc Title: Re: TD replications Post by: DrJones on December 05, 2016, 10:13:14 AM Mechanical work - which is one form of energy - is defined in basic terms as Work = Force X Distance moved. For a rotating object, we apply a torque to get it to move/rotate.  Also, the distance moved = distance along a portion of a circle called an "arc", which = R x Theta (where the angle Theta is measured in radians= actually, unitless). Then Work = Force X Distance moved  = Force x R x Theta  = Torque x Theta, [/size] So the work = mechanical energy = Torque x Theta, not just Torque alone. I'm concerned that Theta has been left out in the analysis so far in this thread - and hope that Theta will be included in the future. Title: Re: TD replications Post by: wattsup on December 05, 2016, 01:42:53 PM @gotoluc Referring only to your videos 3 with the rectangular neos, I have always liked mechanical puzzles of weight and motion but this device I see is giving me a potential quandary. You measured your reference data by putting your scale on the tip of each arm and lifting to record the measured "pull" weight at each increment. But in your experiment the arm is being lifted by and from the center shaft via the length of one neo magnet so it has to fight against the full leverage of the arm, so the actual mechanical process of the experiment is not pulling the arm from the tip as you have tabulated. Seems to me the base data should be taken at a point on the arm that starts at the shaft and goes not more then half the length of the neo magnet from the shaft since it is the shaft centered neo magnet's responding length that is turning the shaft that is lifting the arm. What would your opinions be. So I am basically asking "Should the base data be taken at point 1 or 2 on the below drawing?", since for me the lift force required should be greater at point 1 then point 2. Or, am I blind to an obvious simplicity. I do not know for sure and maybe even if the data was taken at point 1, the final ratios would be the same and the final percentage outcome would also be the same. By the way @gotoluc, your worksmanship is so fine and thanks for your always inquisitive and clear videos and works. wattsup Title: Re: TD replications Post by: webby1 on December 05, 2016, 01:56:44 PM At point 2 there is less force than point 1 BUT the (length of arm * Force) will be the same for both points. ((2*PI)/360)*degrees rotated <--- this is Radians. Multiply this by the Nm or Lb/Ft,, with Nm you need not convert the answer to get J. Torque is length of arm * the force on the arm. If you are going from step to step then you take ((torqueA + torqueB)*0.5) Where torqueA is the previous step and torqueB is the current step so you have the average step torque, or the other way around where torqueA is the current step and torqueB is the next step,, but you need to do it the same way. then if you used a spreadsheet you would have a line that would be =((2*pi)/360)*degreesrotatted*((torqueA*torqueB)*.5) Title: Re: TD replications Post by: gotoluc on December 05, 2016, 04:47:52 PM Thanks for the clarification floor I'll re-measure the approximate 75 degree of rotation available up to the 90 center to see how it effects the gain. Luc Well, I did the test and the results are surprisingly the same 31% gain. In the previous test 3 the output rotation arm (Ro) traveled 140 degrees and the results were also 31% gain. See both test data below. First is test 4 and the second is test 3 The disengage in test 4 is unmeasurable (less the 5 grams) Luc Title: Re: TD replications Post by: gotoluc on December 05, 2016, 04:58:26 PM @gotoluc Referring only to your videos 3 with the rectangular neos, I have always liked mechanical puzzles of weight and motion but this device I see is giving me a potential quandary. You measured your reference data by putting your scale on the tip of each arm and lifting to record the measured "pull" weight at each increment. But in your experiment the arm is being lifted by and from the center shaft via the length of one neo magnet so it has to fight against the full leverage of the arm, so the actual mechanical process of the experiment is not pulling the arm from the tip as you have tabulated. Seems to me the base data should be taken at a point on the arm that starts at the shaft and goes not more then half the length of the neo magnet from the shaft since it is the shaft centered neo magnet's responding length that is turning the shaft that is lifting the arm. What would your opinions be. So I am basically asking "Should the base data be taken at point 1 or 2 on the below drawing?", since for me the lift force required should be greater at point 1 then point 2. Or, am I blind to an obvious simplicity. I do not know for sure and maybe even if the data was taken at point 1, the final ratios would be the same and the final percentage outcome would also be the same. By the way @gotoluc, your worksmanship is so fine and thanks for your always inquisitive and clear videos and works. wattsup Hi wattsup the reason for the distance on the arms is I originally built it to measure foot pound of torque. To my knowledge this is the way to measure foot pound or in my newer tests foot grams. Hope this helps? Luc Title: Re: TD replications Post by: Sacregraal on December 05, 2016, 05:05:45 PM Hello Gotoluc , I follow your work for many years now , and it's always a great pleasure to see your vidéos ... Looking at your 3th vidéo for the TD réplication , I think there is a measure missing . You've got 3 Step 1 - you engage the linéare arm ( it's the first data for the input work ) 2 - you mesure the output torque ( it 's the only output work ) 3 - you disengage the linéare arm ( it' the seconde data for the input work ) but 4 - you need to reset the position of the ouput arm for complete the cycle , i will be curious of the work it need ... This is for me a third data for the input work ) Cheers SG Title: Re: TD replications Post by: Floor on December 06, 2016, 02:38:42 AM QUOTE from DrJones "So the work = mechanical energy = Torque x Theta, not just Torque alone. I'm concerned that Theta has been left out in the analysis so far in this thread - and hope that Theta will be included in the future. "END QUOTE Two questions 1. The conversion of torque to work is needed, in order to state the actions in terms of Joules of work. Correct ? 2. But  that conversion to joules, will not change the RATIOS of the measurements to each other, will it ? thanks for being on board floor Title: Re: TD replications Post by: Floor on December 06, 2016, 03:12:14 AM @Gotoluc DrJones makes a good point of clarification. The reasons for my usage of round levers (pulleys) and conversions of degrees of rotation into the linear fall of the weights are probably pretty clear at this point. I stopped short of the conversion of grams into newton, averaging, and calculations of joules. But even, simply the degrees times weight of each set compared to each other will still give the same ratios ? as  their conversions to joules will to each other ??? I have asked DrJones this question ? PS floor Title: Re: TD replications Post by: Floor on December 06, 2016, 07:39:24 PM @Wattsup QUOTE from Wattsup "But in your experiment the arm is being lifted by and from the center shaft via the length of one neo magnet so it has to fight against the full leverage of the arm, so the actual mechanical process of the experiment is not pulling the arm from the tip as you have tabulated. "  END QUOTE I'm guessing you have already realized that the lifting on the (rotating magnet) indicator / lift arm is against the magnetic force between the RO magnet and the SL magnet. It takes a little while to grok the motions and interactions in the "TD" unit, and then yet more time, for this to settle in.  No worries though, after 2 years of the TD, I'm still some times befuddled. regards floor Title: Re: TD replications Post by: Floor on December 07, 2016, 03:31:10 PM @Luc some observations Your build successfully demonstrates that the SL becomes nearly free from the linear motivating forces between it and RO when RO is at 90 from parallel to SL. SL needs to be very far from RO,  before RO will be nearly free from rotation motivating forces, between it and SL. also see the attached files. your device needs further modifications and dialing in. best wishes floor Title: Re: TD replications Post by: gotoluc on December 07, 2016, 07:12:02 PM Your build successfully demonstrates that the SL becomes nearly free from the linear motivating forces between it and RO when RO is at 90 from parallel to SL. Yes, I agree! SL needs to be very far from RO,  before RO will be nearly free from rotation motivating forces, between it and SL. True, the SL (sliding magnet) needs to be far away from RO (rotating magnet) to have zero influence. The reason I cannot obtain measurements on my scale when disengaging the SL torque arm is because the arm is 12 inches long and was originally designed to measure foot pounds. To get a scale reading on test 4 (disengage arm) I would have to reduce the SL arm length by half if not more to get the scale to register something. I didn't feel it was necessary to modify the device at this point since I'm not seeing any advantages using the rectangular magnets over the ring magnets I originally tested and designed the device to measure. I'm going to move on to testing other magnet configurations that produce more torque so it's best to keep thing as they are. Hope that makes sense? also see the attached files. your device needs further modifications and dialing in. As for your great diagrams, thanks for taking the time to do them ... I agree to all points and have been aware of each scenario. On the first one, I was aware of the potential problem, so right from the start I took great care in keeping the scale angle at 90 degrees of the SL and RO arms throughout their torque travel. So there should be no errors in the scale data provided to date. The other issue which I also knew of, is true, the crankshaft will influence the linear scale readings of the SL input torque arm. However, since measurements are done in each directions (engage & disengage) there cannot be an advantage or gain from using a crankshaft mechanism since if it did we would of solved the energy problems some time back. With this in mind I trust the method and measurement used to be a true reflection of input force. However, I do agree by using a crankshaft on the input arm I'm not getting a linear input torque measurement, so the charts I made are affected by this fact. Please feel free or anyone else explain if I fail to understand something or have error in my logic. Kind regards Luc Title: Re: TD replications Post by: Floor on December 08, 2016, 01:32:31 AM @gotoluc still on the same page. I have to ask these questions, It's just part of the process (scientific). I sure you get it. Your work is much appreciated. regards floor Title: Re: TD replications Post by: gotoluc on December 08, 2016, 03:17:26 AM @gotoluc still on the same page. I have to ask these questions, It's just part of the process (scientific). I sure you get it. Your work is much appreciated. regards floor Great and thanks for asking these important questions which I could of explained in my video but I try to keep them as short as possible so I stick to important details. I do understand these questions need to be asked to ensure we're on the right track and I appreciate answering them. I hope more experimenters like Vidar, TK and so on are going to find things we haven't considered yet. Could it be there's nothing else? Come on guys, you know this shouldn't be so Luc Title: Re: TD replications Post by: webby1 on December 08, 2016, 06:31:56 AM Could it be there's nothing else? Come on guys, you know this shouldn't be so Luc Luc, Have you supplied the data for both directions of travel for both arms?  if so then I have missed the Torque arm reset values. Since your testbed is only to measure the forces and distances then the second direction of motion on the Torque arm should be considered.  Whether or not you reset that arm or something else moves it the counter force over the distance is still going to manifest. Title: Re: TD replications Post by: gotoluc on December 08, 2016, 07:45:06 AM Hi webby1 as you know I have always supplied input reset data which is deducted from the output gain. Even with this input reset subtracted for the output gain I'm still left with a 30% torque gain. I have come up with a rotating design which will have 2 to 4 alternating torque sequences which has no need to reset the output. Each of these alternating output torque sequences will transfer the 30% gains in rotating flywheels. The question now is, will this 30% output torque gain alternately transferred in each flywheel which will represents at most 40% of one revolution (per flywheel) be enough to keep each flywheel turning the 60% of the remaining revolution and may include a small counter force to go through as well? I think it may but we never know till we try. Most of the parts for the build will be in next week. Stay tuned and please feel free to point out any other problem you find in the meanwhile. Regards Luc Title: Re: TD replications Post by: webby1 on December 08, 2016, 02:14:29 PM Are the 28 samples of the Torque arm 14 in one direction of rotation and 14 in the other?  with the 60g being the start\end point? My quick little throw together still has that reset cost of the Torque arm,, when I double things up so I can run it in full rotation that cost is still there in the way of reducing the output torque. (no data collected just checking my memory) What I see from the  data you have provided is only 3 columns,, engage, cost,,, disengage, cost,, torque arm out, output,, but am missing the 4th which is torque arm reset, cost. Title: Re: TD replications Post by: webby1 on December 08, 2016, 03:48:26 PM Let me put it this way. The full cycle is, engage,     CCW, cost 0.2778951388 J torque arm, CCW, gain 0.7795415278 J disengage,  CW,  cost 0.2622390746 J torque arm, CW   cost ????? After disengage the torque arm needs to be put back into the position for engage to happen for the start of the next cycle. Title: Re: TD replications Post by: gotoluc on December 08, 2016, 03:57:57 PM Are the 28 samples of the Torque arm 14 in one direction of rotation and 14 in the other?  with the 60g being the start\end point? The 28 sample test 3 output torque chart has 140 degrees of travel (not 14 inches) on the output torque arm with an average of 165 foot/grams of torque throughout that 140 degrees of travel. What I see from the  data you have provided is only 3 columns,, engage, cost,,, disengage, cost,, torque arm out, output,, but am missing the 4th which is torque arm reset, cost. I would have to make serious modifications to my test device to calculate the losses of the output torque arm to rotate the balance of the 220 degrees needed to bring the arm back to the reset point. So I decided to use that time to just build a device which can continue to rotate in case it does work. My new design uses many parts I already have on hand so the cost of extra parts is just 25, so not a big loss if it doesn't work. Plus, once I have the device put together (even if it doesn't work) I'll be able to measure how much the balance of the rotation (reset) costs. Regards Luc Title: Re: TD replications Post by: gotoluc on December 08, 2016, 04:29:37 PM Let me put it this way. The full cycle is, engage, CCW, cost 0.2778951388 J torque arm, CCW, gain 0.7795415278 J disengage, CW, cost 0.2622390746 J torque arm, CW cost ??? ?? After disengage the torque arm needs to be put back into the position for engage to happen for the start of the next cycle. Yes, I got it but I don't think this is a practical way of resetting it. I want the output torque to continue in the same direction and return to the beginning. Makes more sense to me to keep things in motion then to send it back in the opposite direction. We'll see how it works out. Luc Title: Re: TD replications Post by: webby1 on December 08, 2016, 05:11:26 PM Conservation of momentum is important and should be considered. However,, with the testbed you had you could of simply pulled the Torque arm backwards (CW) and measured the force over the same distance while the other arm was in the disengaged position. This would then of provided the energy needed for the complete cycle regardless of how it is applied. 28 samples,, 14 samples :) Title: Re: TD replications Post by: gotoluc on December 08, 2016, 06:18:28 PM Conservation of momentum is important and should be considered. However,, with the testbed you had you could of simply pulled the Torque arm backwards (CW) and measured the force over the same distance while the other arm was in the disengaged position. This would then of provided the energy needed for the complete cycle regardless of how it is applied. 28 samples,, 14 samples :) Yes I agree, I could make new torque arms which would need to be 3 inches or less to rotate 360 degrees. However, doing that causes other problems like the scale is going to have issues measuring the portion when the arm is close to hitting the aluminum slab and possibly other issues I can't immediately visualize. Like I said, I rather not make drastic changes on this device for now. You'll have to wait a week or so to see what I can come up with on the next full rotation test device. Luc Title: Re: TD replications Post by: webby1 on December 08, 2016, 07:39:08 PM After you took your force readings did you use Mr. Hand to move the Torque arm back to the starting position? Did you try doing that while the system was in the disengaged position? If so,, how much did Mr. Hand do to move the arm? See,, no changes are needed you only need to have the Torque arm start in the end position after it rotated, move the other arm into the disengaged position and then use your scale to move the Torque arm back to where it needs to be so you could measure the engage force again. Title: Re: TD replications Post by: Sacregraal on December 08, 2016, 08:32:08 PM Hello everybody , This device remind me something you probably knows ... http://jnaudin.free.fr/html/2magpup.htm It was in 1998 ... Keep the good work Gotoluc ! SG Title: Re: TD replications Post by: gotoluc on December 08, 2016, 08:42:22 PM After you took your force readings did you use Mr. Hand to move the Torque arm back to the starting position? Did you try doing that while the system was in the disengaged position? If so,, how much did Mr. Hand do to move the arm? See,, no changes are needed you only need to have the Torque arm start in the end position after it rotated, move the other arm into the disengaged position and then use your scale to move the Torque arm back to where it needs to be so you could measure the engage force again. Okay webby1, to please you I added the output arm reset (return) measurement data to test 4 Seems to still have an overage Luc Title: Re: TD replications Post by: gotoluc on December 08, 2016, 08:49:24 PM Hello everybody , This device remind me something you probably knows ... http://jnaudin.free.fr/html/2magpup.htm (http://jnaudin.free.fr/html/2magpup.htm) It was in 1998 ... Keep the good work Gotoluc ! SG Hi SG thanks for your post. Yes, I would think the effects are similar or related. Please keep in mind this topic is for discussion of replications of floor's device. floor's original topic would be the place to post this kind of information and general discussion of the effect. Here is floor's original topic "Work from 2 magnets > 19%" which should be used for general discussions: http://overunity.com/14311/work-from-2-magnets-19-output-2/ (http://overunity.com/14311/work-from-2-magnets-19-output-2/) Kind regards Luc Title: Re: TD replications Post by: webby1 on December 08, 2016, 09:55:18 PM Okay webby1, to please you I added the output arm reset (return) measurement data to test 4 Seems to still have an overage Luc Thanks Luc, Rounded numbers :) Work in 0.366J Work out 0.401J Which is 109.63% and a difference of 0.035J Title: Re: TD replications Post by: gotoluc on December 08, 2016, 10:35:35 PM Thanks Luc, Rounded numbers :) Yes, that's because the scale works in 5 gram increments. Not ideal for fine measurements but gives a general idea. We'll get down to finer measurements in the next build if needed. Luc Title: Re: TD replications Post by: webby1 on December 09, 2016, 01:21:50 AM I was actually referencing my numbers,, 8 decimal places is a little silly :) IIRC I found that when disengaging happens,, that there is an advantage to stepping that while the RO (?) is still turning. As you are pulling the magnet away you can set it up so that you expend a constant force to do so,, and while you are expending more energy the RO will continue to speed up due to it still seeing a torque. You have that window around the 80-90-80 where the removal cost is comparatively low so it is easy to set the rate of change up. Title: Re: TD replications Post by: lumen on December 09, 2016, 04:45:54 AM I wonder if the cam effect on the slide is masking the real data. Wouldn't it be better to collect the data from the exact movement of the magnets over the entire rotation and slide distance and then determine where the best gain occures? The cam introduces a non linear input measurement against a linear output measurement. This is problemmatic when measuring the already non linear magnetic field. Title: Re: TD replications Post by: gotoluc on December 09, 2016, 04:17:30 PM IIRC I found that when disengaging happens,, that there is an advantage to stepping that while the RO (?) is still turning.As you are pulling the magnet away you can set it up so that you expend a constant force to do so,, and while you are expending more energy the RO will continue to speed up due to it still seeing a torque. You have that window around the 80-90-80 where the removal cost is comparatively low so it is easy to set the rate of change up. Good idea!... I'll keep it in mind for the new build I wonder if the cam effect on the slide is masking the real data. This has just been covered some posts back: http://overunity.com/16987/td-replications/msg496936/#msg496936 (http://overunity.com/16987/td-replications/msg496936/#msg496936) "the crankshaft will influence the linear scale readings of the SL input torque arm. However, since measurements are done in each directions (engage & disengage) there cannot be an advantage or gain from using a crankshaft mechanism since if it did we would of solved the energy problems some time back." Wouldn't it be better to collect the data from the exact movement of the magnets over the entire rotation and slide distance and then determine where the best gain occures? The cam introduces a non linear input measurement against a linear output measurement. This is problemmatic when measuring the already non linear magnetic field. This was (in the most part) a device already built and used for something else. I modified it to do what it does. The crankshaft was already there, which I decided to use to engage and disengage the sliding magnet and added a torque arm to it. It's good enough as a preliminary test bed. In my next test device build (which the output will able to rotate) I won't be using a crankshaft for this action. Luc Title: Re: TD replications Post by: lumen on December 10, 2016, 05:52:25 AM Though... I'm not saying the cam is a bad idea, in fact it adds an interesting effect in that it can leverage the higher forces when it's needed and yet during extraction at mid rotation where there is zero force, there is no effect. I hope to be getting back to do a bit more testing myself. Keep on it maybe something will come of all this. Title: Re: TD replications Post by: gotoluc on December 10, 2016, 06:16:36 AM Though... I'm not saying the cam is a bad idea, in fact it adds an interesting effect in that it can leverage the higher forces when it's needed and yet during extraction at mid rotation where there is zero force, there is no effect. I hope to be getting back to do a bit more testing myself. Keep on it maybe something will come of all this. Thanks for clearing up what you had in mind when you wrote your post. We'll see how it will turn out. I would rather build another test device rather then modifying the first one to further test other possibilities. So if for some reason the 2nd device gets worse, I can always go back to the first one to try to understand what makes the difference. Yes, I do hope something comes of it. Luc Title: Re: TD replications Post by: gotoluc on December 30, 2016, 03:16:57 AM Hi floor and everyone, My research continues even though I haven't posted for a few weeks! I've made an update video for all to see the tests I'm doing before the full rotary version build so I know which magnet geometry I'll be using. Link to video: https://www.youtube.com/watch?v=qQjp1ysvlOQ (https://www.youtube.com/watch?v=qQjp1ysvlOQ) Luc Title: Re: TD replications Post by: TinselKoala on December 30, 2016, 06:44:55 AM QUOTE from DrJones "So the work = mechanical energy = Torque x Theta, not just Torque alone. I'm concerned that Theta has been left out in the analysis so far in this thread - and hope that Theta will be included in the future. "END QUOTE Two questions 1. The conversion of torque to work is needed, in order to state the actions in terms of Joules of work. Correct ? 2. But that conversion to joules, will not change the RATIOS of the measurements to each other, will it ? thanks for being on board floor Nice work Luc. I haven't really been following along closely but I just wanted to throw this in: Let me underscore this point, and put it another way: Luc's data seem to be showing that t2 (output torque) is greater than t1 (input torque). But torque isn't energy or work, even though they have the same units. Torque is a vector whereas energy is a scalar, and torque is the rotational analog of force. The energy (or work) in Joules associated with a torque is given by E = (torque x angular displacement), where torque is in Newton-meters and angular displacement is in radians. So a torque of 1 N-m applied for a full rotation requires an energy (work) of 2pi Joules. So to answer Floor's question, yes, it is possible to have t2 > t1 but still have E2 <= E1 if the torque t2 acts over a smaller angle than t1. I can't tell from Luc's data whether you are taking this into account. Title: Re: TD replications Post by: gotoluc on December 30, 2016, 07:23:45 AM Thanks TK, I'm glad you're looking into this. Unfortunately (as you may know) I have no schooling, so we have a certain communication incompatibility when technical terms are used. However, I'll do what I can to provide any details you may need but keep in mind (when dealing with me) best to use plain words, no math equations or symbols. I do appreciate your input and hopefully floor can answer your question. Kind regards Luc Title: Re: TD replications Post by: lota on December 30, 2016, 10:44:08 AM Hi Luc,. you need a labour surplus. Power surplus is not so important. Enclose is the labour surplus. Only the power is not enough. Lota Title: Re: TD replications Post by: citfta on December 30, 2016, 02:50:00 PM Excellent work Luc! I love the idea you have for how to harness the apparent extra energy. As you demonstrated in the video there is a neutral spot just away from the magnet. With your shielding moving into and out of that neutral spot you should be able to harness the extra force. I appreciate the way you approach these kind of ideas. Your methods of analyzing what is really happening are a big help to the rest of us that are trying to follow along. Thanks, Carroll Title: Re: TD replications Post by: citfta on December 30, 2016, 02:55:09 PM For those interested here is a link about the neutral zone. I am sure Luc already knows about this from seeing his video. http://www.rexresearch.com/gary/gary1.htm Carroll Title: Re: TD replications Post by: gotoluc on December 30, 2016, 04:08:53 PM [/size]Hi Luc,.you need a labour surplus. Power surplus is not so important. Enclose is the labour surplus. Only the power is not enough.Lota Hi Lota, It is not clear what you are trying to say. Are you saying without continuous movement or rotation there is no power? If so, I agree. My test device is not made to measure power at this time. Those tests will come in the future rotation device. All I'm doing now is trying to find the most efficient magnet geometry. Since the beginning of my tests this is the first time I'm measuring such a great improvement. Glad you to see an excellent builder like you is looking at this. Looking forward to see your build Luc Excellent work Luc! I love the idea you have for how to harness the apparent extra energy. As you demonstrated in the video there is a neutral spot just away from the magnet. With your shielding moving into and out of that neutral spot you should be able to harness the extra force. I appreciate the way you approach these kind of ideas. Your methods of analyzing what is really happening are a big help to the rest of us that are trying to follow along. Thanks, Carroll Thanks for your input and support Carroll Luc Title: Re: TD replications Post by: lota on December 30, 2016, 08:12:35 PM Hello Luc, that is wrong. I mean: work in must be greater than work out.Not the strength. I'm building a machine. A combination of gap power and this magnetic system to a generator. I'm thinking, how do I get a continuous rotation. Gap-Power is for linear drive to the magnet. Gap-power has a new video http://gap-power.com/ lota Title: Re: TD replications Post by: TinselKoala on December 30, 2016, 10:58:57 PM Hi Lota, It is not clear what you are trying to say. Are you saying without continuous movement or rotation there is no power? If so, I agree. My test device is not made to measure power at this time. Those tests will come in the future rotation device. Actually, torque is a kind of "power", except that instead of acting across a time period (like Watts acting over seconds to give Joules of energy) it acts across angular displacement (torque acting over radians to give Joules of energy). No, you don't need to show continuous rotation to demonstrate power or even overunity. Although it would do, if it did! If you can show an _energy_ surplus during part of a cycle, even if it gets eaten up by mechanical losses in another part of the cycle, you still may be able to show OU without continuous rotation. And you would have a target to aim at: further reduce the losses in the lossy part of the cycle! Quote All I'm doing now is trying to find the most efficient magnet geometry. Since the beginning of my tests this is the first time I'm measuring such a great improvement. Glad you to see an excellent builder like you is looking at this. Looking forward to see your build Luc Thanks for your input and support Carroll Luc Great! As long as you realize what lota, DRJones, and I are trying to point out about the difference between torque and energy (work), since it is possible to have an increase in torque without having an increase in energy. ;) Title: Re: TD replications Post by: gotoluc on December 31, 2016, 02:58:44 AM I mean: work in must be greater than work out.Not the strength. Humm :-\ ... if work in is greater then work out, how do you achieve OU? I'm building a machine. A combination of gap power and this magnetic system to a generator. I'm thinking, how do I get a continuous rotation. Gap-Power is for linear drive to the magnet. Ah, Gap-Power of Art Porter. He is a very good and generous man. I went to his home about 2 month ago. We looked at his devices and he let me bring back his first device (aluminum table) which I temporarily modified to test floor's research. He has also given us an extra coil and magnet set of his latest device used in the video you mentioned. It's true that you can use magnet in conjunction with electromagnets to more then double a electromagnet magnetic field strength. However, what is not apparent to many experimenters at first (including me till a few years back) is the counter electromagnetic field (CEMF) also doubles in strength which bring you right back to normal electromagnet behavior and is always under unity. I know for sure as I've tried to beat this for years without any success. The effect of CEMF was very apparent when I built a super build of my "Mostly Magnet Motor" The bottom line is, as soon as you turn on a coil (no matter how short the on time or multiple pulses used) if there's a moving magnetic field no matter how powerful your magnets are, you're dead at that point. Here is a shortcut to understanding the device and the end results: http://overunity.com/8429/mostly-permanent-magnet-motor-with-minimal-input-power/msg420188/#msg420188 (http://overunity.com/8429/mostly-permanent-magnet-motor-with-minimal-input-power/msg420188/#msg420188) Regards Luc Title: Re: TD replications Post by: gotoluc on December 31, 2016, 03:39:25 AM Actually, torque is a kind of "power", except that instead of acting across a time period (like Watts acting over seconds to give Joules of energy) it acts across angular displacement (torque acting over radians to give Joules of energy). No, you don't need to show continuous rotation to demonstrate power or even overunity. Although it would do, if it did! If you can show an _energy_ surplus during part of a cycle, even if it gets eaten up by mechanical losses in another part of the cycle, you still may be able to show OU without continuous rotation. And you would have a target to aim at: further reduce the losses in the lossy part of the cycle! Thanks TK for trying to clear the miscommunication. I do know that continuous rotation is not needed to demonstrate OU. That was not what I was trying to communicate. As long as you realize what lota, DRJones, and I are trying to point out about the difference between torque and energy (work), since it is possible to have an increase in torque without having an increase in energy. ;) Yes, I also know and agree that an increase in torque does not mean an increase in work. I'm not aware that I have been demonstrating or indicating this anywhere!... if you do see it somewhere, please let me know so I can correctly explain or correct it. Thanks for your help Luc Title: Re: TD replications Post by: lota on December 31, 2016, 11:16:29 AM Hello Luc, It's nice that you talked to Art Potter. A friend of mine is building and testing its solid stat machine. We will see what he can find out. I would like to test this project and this https://www.google.de/?gws_rd=ssl#q=Kedron_EDEN_Project.ppt (https://ssl.microsofttranslator.com/bv.aspx?from=&to=en&a=https%3A%2F%2Fwww.google.de%2F%3Fgws_rd%3Dssl%23q%3DKedron_EDEN_Project.ppt).When I'm done, I'll show it. I wish all a healthy and prosperous new year. Lota Title: Re: TD replications Post by: synchro1 on December 31, 2016, 05:44:48 PM Humm :-\ ... if work in is greater then work out, how do you achieve OU? Ah, Gap-Power of Art Porter. He is a very good and generous man. I went to his home about 2 month ago. We looked at his devices and he let me bring back his first device (aluminum table) which I temporarily modified to test floor's research. He has also given us an extra coil and magnet set of his latest device used in the video you mentioned. It's true that you can use magnet in conjunction with electromagnets to more then double a electromagnet magnetic field strength. However, what is not apparent to many experimenters at first (including me till a few years back) is the counter electromagnetic field (CEMF) also doubles in strength which bring you right back to normal electromagnet behavior and is always under unity. I know for sure as I've tried to beat this for years without any success. The effect of CEMF was very apparent when I built a super build of my "Mostly Magnet Motor" The bottom line is, as soon as you turn on a coil (no matter how short the on time or multiple pulses used) if there's a moving magnetic field no matter how powerful your magnets are, you're dead at that point. Here is a shortcut to understanding the device and the end results: http://overunity.com/8429/mostly-permanent-magnet-motor-with-minimal-input-power/msg420188/#msg420188 (http://overunity.com/8429/mostly-permanent-magnet-motor-with-minimal-input-power/msg420188/#msg420188) Regards Luc Look at this "Flux Gate Ladder" below: Title: Re: TD replications Post by: Floor on January 03, 2017, 04:58:57 PM @Gotoluc Nice work / video, thanks floor Title: Re: TD replications Post by: telecom on February 07, 2017, 06:32:17 AM I looked at Gotoluc videos, which are very impressive, except I would like to see the calculation based on work, and not on torque. Its fairly easy to do, I think, by multiplying force by the length of the arch of his apparatus. Title: Re: TD replications Post by: Floor on February 08, 2017, 06:20:17 AM Thanks telecom QUOTE from telecom "I looked at Gotoluc videos, which are very impressive, except I would like to see the calculation based on work, and not on torque. Its fairly easy to do, I think, by multiplying force by the length of the arch of his apparatus." END QUOTE @ TELECOM If you would please give us a presentation in detail, of a prescription for / procedure for making those measurements and a clear and direct explanation of the math (how to) integrate the force over distance of magnets interacting (with examples). This would be most appreciated. Also if this is possible, could you present this in the topic below ? http://overunity.com/16954/magnets-motion-and-measurement/ best wishes floor Title: Re: TD replications Post by: dieter on February 08, 2017, 07:07:45 AM [/font]Humm :-\ ... if work in is greater then work out, how do you achieve OU?Ah, Gap-Power of Art Porter. He is a very good and generous man. I went to his home about 2 month ago. We looked at his devices and he let me bring back his first device (aluminum table) which I temporarily modified to test floor's research. He has also given us an extra coil and magnet set of his latest device used in the video you mentioned.It's true that you can use magnet in conjunction with electromagnets to more then double a electromagnet magnetic field strength. However, what is not apparent to many experimenters at first (including me till a few years back) is the counter electromagnetic field (CEMF) also doubles in strength which bring you right back to normal electromagnet behavior and is always under unity.I know for sure as I've tried to beat this for years without any success.The effect of CEMF was very apparent when I built a super build of my "Mostly Magnet Motor"The bottom line is, as soon as you turn on a coil (no matter how short the on time or multiple pulses used) if there's a moving magnetic field no matter how powerful your magnets are, you're dead at that point.Here is a shortcut to understanding the device and the end results: http://overunity.com/8429/mostly-permanent-magnet-motor-with-minimal-input-power/msg420188/#msg420188 (http://overunity.com/8429/mostly-permanent-magnet-motor-with-minimal-input-power/msg420188/#msg420188)RegardsLuc [/font] I would not agree with that in all detail. Art did tests too, and over a longer period he would have noticed, whether the lead battery recgarge was only recovering or not. IMHO the special thing is the frontal approaching. In Attraction the fluxes of to magnets link, but in repulsion they siege oneanother, allowing the lorentz force of the secondary field (lenz drag) to act its natural orientation, that is 90deg sideways, which is why there is repulsion drag in rotational byepassing. We talked about that. Hang a wire horizontally in the air, with a dc current. Approch magnet. Wire will go up, 90 deg from approaching direction. Accummulate all wires... Well, however, that was my impression. Title: Re: TD replications Post by: gotoluc on February 09, 2017, 04:57:56 AM Here is an update and results on my idea of using metal shielding pulled in and out between the magnet rings to allow the remaining 220 degrees to rotate through to reset the device. The test also includes floor's idea of using diametrically magnetized magnets (instead of metal) as shielding to do the same. Link to video: https://www.youtube.com/watch?v=MMqBISjwieY (https://www.youtube.com/watch?v=MMqBISjwieY) Tomorrow I'll post another video demonstration of a completely different design which combines part of floor's most recent video demo and my idea of using it which resets every cycle. Stay tuned Luc Title: Re: TD replications Post by: gotoluc on February 09, 2017, 04:19:46 PM As promised, here is the link to a video demo of a completely new design which resets at every cycles. Link to video: https://www.youtube.com/watch?v=oUlDMY1iE5A @floor, if you would prefer I start a new topic on this device please let me know. Regards Luc Title: Re: TD replications Post by: citfta on February 09, 2017, 04:57:44 PM Very nice and interesting video. Thanks Luc! Carroll Title: Re: TD replications Post by: dieter on February 09, 2017, 10:39:55 PM Very interesting device an results. But did you measure the average of the pull in the 11mm range? Title: Re: TD replications Post by: gotoluc on February 09, 2017, 10:52:59 PM did you measure the average of the pull in the 11mm range? Yes I did, it doesn't vary much. The low is around 520 grams and high around 565 grams. Luc Title: Re: TD replications Post by: dieter on February 09, 2017, 11:03:13 PM That's amazing! Title: Re: TD replications Post by: telecom on February 10, 2017, 01:21:16 AM Hi Luc, the actual gain , I think, is 2 times higher than you calculated because your output goes back and forth during the cycle, so the total distance is 22 mm by 540 grams = 11880 gram x mm Title: Re: TD replications Post by: telecom on February 10, 2017, 01:23:16 AM Considering that the input is 3700 gram x mm per cycle, you have a gain of 3. Title: Re: TD replications Post by: Floor on February 10, 2017, 08:10:17 AM @GoToLuc very nice work thanks floor Title: Re: TD replications Post by: gotoluc on February 10, 2017, 03:35:58 PM Hi Luc, the actual gain , I think, is 2 times higher than you calculated because your output goes back and forth during the cycle, so the total distance is 22 mm by 540 grams = 11880 gram x mm Hi telecom, It would be great if there was twice the movement. However, I don't see that. I made a video just for you to count them. Link: https://www.youtube.com/watch?v=dpBaeJD38HI (https://www.youtube.com/watch?v=dpBaeJD38HI) Regards Luc Title: Re: TD replications Post by: gotoluc on February 10, 2017, 06:03:39 PM What is still a question is how the 11mm push is measured over that distance, with the wheels' magnet aligned in the center, or with it passing by. Since the device is very dynamic the only way I could measure it was to do each step at a time. So to answer your question quickly, the 11mm stroke is measured once the rotating magnet is centered with it. Luc Title: Re: TD replications Post by: dieter on February 10, 2017, 06:24:55 PM Then I would suggest to measure the push with each measurement step of the rotation, that was 5mm, maybe try to do finer steps due to the peaks. Title: Re: TD replications Post by: telecom on February 10, 2017, 07:10:45 PM Hi telecom, It would be great if there was twice the movement. However, I don't see that. I made a video just for you to count them. Link: https://www.youtube.com/watch?v=dpBaeJD38HI (https://www.youtube.com/watch?v=dpBaeJD38HI) Regards Luc Ok, I guess it was a wishful thinking on my end! In this case you probably need to calculate the work balance for the linear movement to come to the initial position. Regards Title: Re: TD replications Post by: conradelektro on February 10, 2017, 07:19:10 PM If you do a replication please do compare output-POWER and input-POWER (and not FORCE or WORK) See: http://overunity.com/17097/magnet-force-shield/msg499715/#msg499715 (http://overunity.com/17097/magnet-force-shield/msg499715/#msg499715) (concerning POWER versus WORK, TIME is of the essence) FORCE WORK = FORCE * DISTANCE POWER = WORK over TIME Also see: http://overunity.com/17097/magnet-force-shield/msg499638/#msg499638 (http://overunity.com/17097/magnet-force-shield/msg499638/#msg499638) (concerning a magnetic shield, if you plan one in your replication; note, a magnet is also a "known material") Greetings, Conrad Title: Re: TD replications Post by: gotoluc on February 10, 2017, 07:23:22 PM That really would complicate measurements to level that may not be possible since if the 11mm magnet is allowed to move while the rotating magnet is moving in its first 55mm of travel the rotating magnet force needed to pull in goes down as the 11mm magnet moves. And once the 11mm magnet has reached it's 5.5mm center the rotating magnet requires 0 grams to move through. A very complex measurement setup would be needed to probably find it all comes to the same. The way I did it (in steps) measures all maximums. By holding down the 11mm magnet while the rotating magnet moves in, the rotating magnet needs the most pull force to pull it in the first 55mm and the opposite happens once I release the 11mm magnet and hold it down now the rotating needs maximum force to be pulled out of the remaining 55mm of travel. Hope you understand Luc Title: Re: TD replications Post by: gotoluc on February 10, 2017, 07:28:36 PM In this case you probably need to calculate the work balance for the linear movement to come to the initial position. That's the beauty of this design, there is no work "balance" needed for the next position (cycle)... as the fist cycle goes through the next cycle is right there and needs exactly the same as the first cycle. Luc Title: Re: TD replications Post by: gotoluc on February 10, 2017, 07:38:16 PM If you do a replication please do compare output-POWER and input-POWER (and not FORCE or WORK) See: http://overunity.com/17097/magnet-force-shield/msg499715/#msg499715 (http://overunity.com/17097/magnet-force-shield/msg499715/#msg499715) (concerning POWER versus WORK, TIME is of the essence) FORCE WORK = FORCE * DISTANCE POWER = WORK over TIME Greetings, Conrad Dear Conrad, Please reply to the simple question (in bold) of this post: Dear Conrad, Lets look at a test device which can test your distance time beliefs. Test device parts needed: A DC electric motor which has a flywheel attached to its shaft and use of photo switch to turn the motor on and off. First test: We attach a scale to the outer circumference of the flywheel and adjust the current to the motor so it can pull 34 grams. Then we adjust the photo switch to power the motor 110 mm distance of the flywheel outer circumference. We note of the RPM in this condition. Second test: We attach a scale to the outer circumference of the flywheel and adjust the current to the motor so it can pull 538 grams. Then we adjust the photo switch to power the motor 11 mm distance of the flywheel outer circumference. We note of the RPM in this condition. If your belief is correct, the RPM should be greater on the first test compered to the second test, correct? Regards Luc Title: Re: TD replications Post by: telecom on February 10, 2017, 08:12:30 PM That's the beauty of this design, there is no work "balance" needed for the next position (cycle)... as the fist cycle goes through the next cycle is right there and needs exactly the same as the first cycle. Luc So, to return the linear stage to the initial position will take the same work, and the linear stage will generate the same work? Approximately 1.5 times more than input? Title: Re: TD replications Post by: dieter on February 10, 2017, 08:28:20 PM How about to slap a rudimentary crankshaft together, using the 11mm push over leverage to turn the wheel? Would probably be less timeconsuming than all measurements an defendings ^^ Title: Re: TD replications Post by: gotoluc on February 10, 2017, 09:23:25 PM So, to return the linear stage to the initial position will take the same work, and the linear stage will generate the same work? Approximately 1.5 times more than input? You don't need to return the linear stage to the initial position. The rotor magnet alternate N-S-N-S which creates the back and forth linear action which give 11mm linear force (538 grams) in each direction. No rest needed. Luc Title: Re: TD replications Post by: conradelektro on February 10, 2017, 09:23:48 PM Dear Conrad, Please reply to the simple question (in bold) of this post: If your belief is correct, the RPM should be greater on the first test compered to the second test, correct?[/size] Luc, it is not a simple question and I have no simple answer. I have not studied physics (only mathematics and law) therefore I would have to read up about flywheels in my physics text books. This would take hours which I am not prepared to put in today. The quick answer: I do not see the connection between a flywheel and your machine. A flywheel is continuous movement. Your machine has two movements, a continuous turning movement (like a flywheel, if you want to see that probably false analogy) and an intermittent reciprocal movement. Very important and constantly overlooked fact: your sledge also pauses during its movement (cycle, reciprocal movement). And during the pauses it does not do work. Like a LED that is driven intermittently, whose average light output is less than a constantly driven LED (with the same Voltage and Amperage, this is what dimming is all about), your sledge outputs less energy than thought if the pauses are taken into consideration. (And exactly there I see no continuous fly wheel). So, please forgive me that I do not want to study fly wheels at the moment. Your fly wheel example might have the answer "yes", but there is no Konnex to your machine Just think for a moment, it is important how long work is done. Only when knowing how long a certain work is done one knows its power output (energy). I have said more than often: POWER is WORK over TIME. If there is no time, there is no power (when the sledge pauses). A power company sells Watt-Hours and not Watt. You have to get the difference between Watt and Watt-Hours (between Work and Power or Energy). Which in your case is the difference between Gram or Gram-Hours. Or, if we would do the conversion from Gram to Joule, it is the difference between Joule and Joule-Seconds (or Joule-Hours if you want). This is not my opinion, this is a fact. Everybody would complain if his work would not be paid by the hour. It matters very much how many hours you do your work. The average power output of your work is less (per hour) if you drink coffee for 15 minutes every hour (a pause of 15 minutes every hour). Your power output will be 25% less. All employers know that simple fact. All power companies know that it is important how long you switch on your loads. Greetings, Conrad Title: Re: TD replications Post by: gotoluc on February 10, 2017, 09:31:29 PM How about to slap a rudimentary crankshaft together, using the 11mm push over leverage to turn the wheel? Would probably be less timeconsuming than all measurements an defendings ^^ Yes, it's looking that way now :P I was thinking of building a Neo magnet version next before trying a self loop these weak ceramic magnets. If there is a gain then it may have more potential of working then a ceramic version. Luc Title: Re: TD replications Post by: dieter on February 10, 2017, 09:39:16 PM Conrad, I'm trying to be serious without to hurt you, that turns out to be a dilemma. See that's the diffrence between employed and selfemployed. Work does not mean to pretend to do something until 17h. But to deliver a quantitively precise service, like in "job done", regardless of the hour. I can't believe you studied Maths and then fail to add 1+1. Maybe disagreement is an easy way to get into a discussion, but that would be a psychological issue. Title: Re: TD replications Post by: dieter on February 10, 2017, 09:49:30 PM Luc, not neccessarily. Neos have such a low volume that it appears as if their long range is weaker than with eg. ceramics (which isn't the case), only when you replace them by Neos of same volume, but then friction losses may rise to an amount that such a light construction can hardly withstand. It looks good atm, and 200g excess should be much more than enough, if the cranking losses are kept low. Title: Re: TD replications Post by: conradelektro on February 10, 2017, 09:50:56 PM Conrad, I'm trying to be serious without to hurt you, that turns out to be a dilemma. See that's the diffrence between employed and selfemployed. Work does not mean to pretend to do something until 17h. But to deliver a quantitively precise service, like in "job done", regardless of the hour. I can't believe you studied Maths and then fail to add 1+1. Maybe disagreement is an easy way to get into a discussion, but that would be a psychological issue. Come on now, self employed or employed. Of course my real life work-pause example was a Metapher. I quit writing about Joule and Joule-Seconds because it goes by your head, which is fine with me. You have won, you have exhausted me and therefore you are right (in your world). Yes, you have hurt me. But not with your remark about my mathematics. You have hurt me with your lack of insight which makes the OU forums such frightful places. Nobody wants to learn and the OU-fans want everybody to unlearn simple facts. It is only personal insults now (and I am also doing the insults, why not, I have a temper, eat it, go to hell). Greetings, Conrad Title: Re: TD replications Post by: dieter on February 10, 2017, 10:05:25 PM Conrad, I take it you agree with me? Your understanding of "Work" is that of Ron in the IT Crowd, but that is not what physics means by the term. You were wrong and seemingly unable to face it. To understand that must hurt, but it may turn you into a cooperative kind of forum user. Same thing hapens to me too, sometimes, btw. Nobody is perfect and that ain't a shame. This is already getting off topic again, sorry. Well the "what is work anyway" thing has some relation. Title: Re: TD replications Post by: webby1 on February 10, 2017, 10:19:11 PM Since you are dealing with a stop-move-stop-move condition it will be difficult to easily use a crankshaft only setup. I would suggest making the linear magnet distance covered less than the rotary magnet length, this so the wheel can keep moving as the linear magnet slides. Say if you turned the linear magnet 90 degrees and "release" the linear magnet when the edge of the wheel magnet just hits the far edge of the linear magnet,, this will also setup your maximum RPM,, that is when the wheel is spinning as such that the same time is used for the wheel to rotate its distance of coverage as the linear magnet uses to slide. I would also suggest using a system where the linear magnet slide is applied against a spring (I was thinking of something like a pull start return spring arrangement) that drives a flywheel that drives the wheel and the use of a few one way clutches so the full motion from the slide will be applied into the spring and that force released into the flywheel\wheel as it needs it. String and pulleys will allow you to take the linear slide and redirect it in any direction you like. Using one such drive system per slide direction. Since you are comparing a full cycle to a full cycle power is considered after the fact,, as in how many cycles per second the system is running at and is used to find the speed that you must achieve to run a given external load. Just my 2cents worth. Title: Re: TD replications Post by: conradelektro on February 10, 2017, 10:25:12 PM Conrad, I take it you agree with me? Your understanding of "Work" is that of Ron in the IT Crowd, but that is not what physics means by the term. You were wrong and seemingly unable to face it. To understand that must hurt, but it may turn you into a cooperative kind of forum user. Same thing hapens to me too, sometimes, btw. Nobody is perfect and that ain't a shame. This is already getting off topic again, sorry. Well the "what is work anyway" thing has some relation. Dieter, please read my posts. Yes, I made an error with WORK yesterday evening. But since then I know: WORK = FORCE * DISTANCE (and that is the right formula as you know and as I know now, so please do not get stuck on this, you are a sneaky person holding a corrected error against me). I learned what WORK is, but you do not want to learn what POWER (or energy) is. In order to calculate POWER you need TIME. And you seem to be too thick to get that. But as I said, I do not want to loose any more time with you. Please stop writing about me, you deliberately tell lies about what I write. You deliberately deny that I corrected an error. I do not like this and it upsets me. You are not a person whom one should converse with. Greetings, Conrad Title: Re: TD replications Post by: webby1 on February 10, 2017, 10:27:31 PM Using the string. If you take a shaft and a string,, a long string,, wrap the string around the shaft a dozen times in one direction, then pin the string to that shaft and then continue wrapping the string around the shaft but in the OPPOSITE direction,, now when you attach the ends of the string to one of the slide pieces you have a push-me-pull-you arrangement,, using one way bearings or clutches will turn the shaft oscillation into a continuous single direction rotation. Just in case you were interested. Title: Re: TD replications Post by: telecom on February 10, 2017, 11:15:37 PM Yes, it's looking that way now :P I was thinking of building a Neo magnet version next before trying a self loop these weak ceramic magnets. If there is a gain then it may have more potential of working then a ceramic version. Luc Looking forward for the video - this is the most remarkable design I've seen in years! Title: Re: TD replications Post by: dieter on February 11, 2017, 06:46:24 AM Conrad, I feel no harm for you and wish you all the luck that makes you happy. kr And Pulleys, Strings, oneway clutches sounds good. Maybe you could even use a simple hook that grabs a spoke, using leverage to sync with the 11mm vs wheel circumference / 12. Title: Re: TD replications Post by: deslomeslager on February 11, 2017, 11:13:22 AM Having seen and understanding that a sideways push generates a better force (for the lack of no OU words), some thoughts come to mind. 1 - We can construct this in a rotating fashion. Luc already uses a wheel on top, now all we need is a wheel on the bottom, and the wheels need to have some timing mechanism. The top wheel will let the bottom wheel rotate, and if all goes well, with a surplus. If the bottom wheel is an e-bike wheel, it can generate power (any other power generating tool will work as well) 2 - for other people on this forum who have build a pulse motor and have it laying around: You need to mount your magnets rotated 90 degrees, similar to how Luc uses the magnets. Is there someone willing to build this? Should not be to hard to build, as long as your timing is right. Maybe it works that way as well. Title: Re: TD replications Post by: Grumage on February 11, 2017, 04:13:54 PM A " face cam " and pivoted arm would be the simplest approach! ;) Cheers Graham. Title: Re: TD replications Post by: dieter on February 15, 2017, 08:28:44 AM Any news on this one? Yesterday I found an antique sewing machine on a yard, "Singer 1", no electrics, beautiful btw., with a foot pedal driving a flywheel, which would be perfect for something like this. Title: Re: TD replications Post by: gotoluc on February 15, 2017, 05:05:17 PM Even though the design appears to have a gain it's a very small one because of the magnet size. I think we should consider this first design as a concept verification. I also think OU devices need to be large at first so not to overlook things. With this said I'm thinking the output needs to travel more distance and have much more pull or push force. To do this it's going to need magnets that have more surface area. I've ordered 12 of these for the next experiment: http://www.ebay.com/itm/322408983857 (http://www.ebay.com/itm/322408983857) So the version 2 test device will use these 2" x 6" magnets. They be turned by a low rpm DC motor in order to calculate input Watts. There should be a formula to convert the back and forth output into Watts. If you want to help please find the formula to do so. Regards Luc Title: Re: TD replications Post by: dieter on February 15, 2017, 06:28:42 PM I think, first you should add springs to the 11mm push/pull to compensate / nullify the losses you will otherwise get due to accelleration of mass. Springs of the right strength, so it becomes a pendulum, synched with the wheels rotation. Mass in motion doesn't require energy, only to brake, accellerate or redirect it will require energy, in outer space without gravity and air, of course. So, this goes over my head and personally I would rather try to build that cranking mechanism than to find and understand a formula for the conversion of this output into eg. joules. And I wouldn't trust the formula anyway, esp. when found on wikipedia :) About size I found out, you may also just downscale friction, instead of upscaling the model. Bicycle wheels have pretty good friction and stability features already, in relation to their size. For tiny models with eg. 1 foot diam. rotor I prefer Needle bearings, actually hardened steel nails, so the contact area is never nore than like 1/50mm2. Anyhow, with stronger magnets you may very well see an even more obvious net gain. Title: Re: TD replications Post by: gotoluc on February 15, 2017, 08:36:00 PM Humm, pendulum... I was also thinking of a pendulum instead of the wheel since only 2 magnets are needed to make the output go back and forth. It should also be an easier mechanism to redirect the output back to a pendulum. Luc Title: Re: TD replications Post by: Floor on February 16, 2017, 04:46:39 PM @gotoluc The difficulty with a pendulum is the same magnet orientation passes over again on each back swing. .................................... When using big ceramic magnets, you might still be able to use a bicycle rim if you support it with 4 rolling bearings / wheels 2 on each side, near to where it passes over the stationary magnet. ....................................... If you have any thing specific you would like to run by me fell free to pm me. ........................................ Your latest device kicks a___! regards floor Title: Re: TD replications Post by: gotoluc on February 16, 2017, 06:03:47 PM The difficulty with a pendulum is the same magnet orientation passes over again on each back swing. I think the pendulum arc could be long enough to accommodate 2 magnets (one of each pole) Video demo: https://www.youtube.com/watch?v=xqaf9a9hJAg (https://www.youtube.com/watch?v=xqaf9a9hJAg) When using big ceramic magnets, you might still be able to use a bicycle rim if you support it with 4 rolling bearings / wheels 2 on each side, near to where it passes over the stationary magnet. Yes If you have any thing specific you would like to run by me fell free to pm me. ........................................ Your latest device kicks a___! regards floor Thanks for the support and glad you enjoy the new design ;) I'm contemplating on a series (cascade effect) for the next build. If there is a gain, it should be obvious then Regards Luc Title: Re: TD replications Post by: Floor on February 16, 2017, 06:38:36 PM @Gotoluc I think the pendulum arc could be long enough to accommodate 2 magnets (one of each pole) I didn't see it clearly. Now I got it. Thanks for the short and sweet video. Title: Re: TD replications Post by: Floor on February 16, 2017, 07:32:23 PM @Gotoluc Some observations. When the approach of the rotating magnet is very precisely at right angles (in all planes) to the sliding magnet.... there is practically no work done against magnetic forces... for rotator to approach / exit. ................................................ Even the very slight off angle / changing angle ....of approach by the rotating magnet (which is due to the curvature of the bicycle rim) is having a significant effect. Using longer magnets will, increase the work input, more than one might expect due to this feature. (longer magnets will have a more dramatic change in distance from / angle to .... the sliding magnet... at a given curvature of their approach line (I think).... ..................................................... The greater the diameter of the rotating rim.... the more nearly, the rotating magnet's approach to the sliding magnet.... comes toward being a straight on / 180 degree (2 x 90 degree) approach. There will be a diminishing return in the benefits of a longer pendulum (from pivot point to magnets). At some particular length, any further increase in length... will give almost no decrease in the force needed to approach / escape. The more perfect the alignments the more nearly one will approach a zero for the input. There is a limit... even the magnets / their fields are not be perfect. Also, at close proximity "magnetic domain flipping" within the magnets might have some undesirable effect. ..... But when 2 magnetic poles approach 2 other magnetic poles... these effect might completely self cancel. regards floor Title: Re: TD replications Post by: gotoluc on February 16, 2017, 08:33:25 PM @Gotoluc Some observations. When the approach of the rotating magnet is very precisely at right angles (in all planes) to the sliding magnet.... there is practically no work done against magnetic forces... for rotator to approach / exit. ................................................ Even the very slight off angle / changing angle ....of approach by the rotating magnet (which is due to the curvature of the bicycle rim) is having a significant effect. Using longer magnets will, increase the work input, more than one might expect due to this feature. (longer magnets will have a more dramatic change in distance from / angle to .... the sliding magnet... at a given curvature of their approach line (I think).... ..................................................... The greater the diameter of the rotating rim.... the more nearly, the rotating magnet's approach to the sliding magnet.... comes toward being a straight on / 180 degree (2 x 90 degree) approach. Yes, I'm aware of these problems and why I was thinking of using a Pendulum instead of the wheel. There will be a diminishing return in the benefits of a longer pendulum (from pivot point to magnets). At some particular length, any further increase in length... will give almost no decrease in the force needed to approach / escape. The more perfect the alignments the more nearly one will approach a zero for the input. There is a limit... even the magnets / their fields are not be perfect. Also, at close proximity "magnetic domain flipping" within the magnets might have some undesirable effect. ..... But when 2 magnetic poles approach 2 other magnetic poles... these effect might completely self cancel. Today I was reconsidering the use of a Pendulum because of the long ark needed to keep the magnet gap small. I concluded to just use a sliding bearing for the 2 alternating magnets. This way both input and output are linear motions which should make it simpler to loop. Thanks for all your considerations and sharing. Luc Title: Re: TD replications Post by: gotoluc on February 16, 2017, 09:47:22 PM @Gotoluc I didn't see it clearly. Now I got it. Thanks for the short and sweet video. That's why I make videos and encourage everyone to make them, as in seconds one can understand what may take many posts or even pages to describe. Luc Title: Re: TD replications Post by: dieter on February 17, 2017, 03:01:01 AM Thinking about it, the forces that are in 90 degrees to the wheel cannot stop or brake the wheel, but only add some friction. So what does the 11m magnet do when it's not in a rail? In what direction does it move? Title: Re: TD replications Post by: gotoluc on February 17, 2017, 06:17:07 AM So what does the 11m magnet do when it's not in a rail? In what direction does it move? Sorry, I don't understand your question Luc Title: Re: TD replications Post by: dieter on February 17, 2017, 06:44:15 AM Well if you would just put that magnet (that is pushed 11mm) on the table, without any fixation, then turn the wheel over it like you did. In which direction would the magnet move? Title: Re: TD replications Post by: gotoluc on February 17, 2017, 03:04:28 PM Well if you would just put that magnet (that is pushed 11mm) on the table, without any fixation, then turn the wheel over it like you did. In which direction would the magnet move? This is not shomething I can test without making modifications. please take 2 magnets and do some tests. Regards Luc Title: Re: TD replications Post by: dieter on February 17, 2017, 03:44:05 PM Probably I missed it, but I'm still a bit confused about how the polar axes are arranged. On the wheel up and down, and on the one on the track from left to right of the wheel? Title: Re: TD replications Post by: gotoluc on February 17, 2017, 04:05:11 PM Goto page 6 post 85 and look at the video demo The magnets are in standard magnetization Luc Title: Re: TD replications Post by: dieter on February 17, 2017, 05:24:06 PM So I take it "this dimension" refers to the polar axis, thanks. Title: Re: TD replications Post by: Floor on February 17, 2017, 06:29:37 PM @Dieter 1. The magnets used are polar on the broad faces. 2. See this video @ http://www.dailymotion.com/video/x5an8hd_rtangsld2_tech regards Title: Re: TD replications Post by: dieter on February 17, 2017, 07:36:49 PM Thanks Floor (Alex?), I see now. Interesting channel and I was in search for a tube substitute already (always bad to google a google substitute^^), so thanks2x. Title: Re: TD replications Post by: Floor on February 18, 2017, 12:54:30 AM @Dieter I'm not Alex ?... my username on DailyMotion.com is .... seethisvid floor Title: Re: TD replications Post by: dieter on February 18, 2017, 03:18:47 AM Yeah, sorry, I must have mixed up something. Title: Re: TD replications Post by: Cairun on February 21, 2017, 03:19:24 PM I'm contemplating on a series (cascade effect) for the next build. If there is a gain, it should be obvious then Hi Luc, When you say "a series build" do you mean you will link multiply systems together? Just curious. I am looking forward to your next build. I am developing a mechanism to capture the "stop and go" motion of your setup and use it in a "two bicycle wheel" setup, since continuous rotation ultimately is going to be the most efficient. A linear sliding setup should work as well. One way to capture the motion of the magnets is to create cam followers to follow two separate tracks to create the "stop and go" motion. Keep up the great work! Alex Title: Re: TD replications Post by: gotoluc on February 21, 2017, 05:34:38 PM Hi Alex, Thanks for your interest. When you say "a series build" do you mean you will link multiply systems together? Just curious. Yes, they are linked to multiply in a way. The idea is, if there's a gain, then the first unit can be calibrated to use its gain to operate the polarity flip slider of the second unit. That way all the output of the second unit is free to use or to re-use to operate a 3rd unit and so on. Each unit or stage should be able to have 50% more magnet surface area causing an exponential torque amplification as the stages are added. This is the idea but at this point only a theory since it hasn't been built or tested. I am developing a mechanism to capture the "stop and go" motion of your setup and use it in a "two bicycle wheel" setup, since continuous rotation ultimately is going to be the most efficient. A linear sliding setup should work as well. One way to capture the motion of the magnets is to create cam followers to follow two separate tracks to create the "stop and go" motion. Glad to see someone working on this! Regards Luc Title: Re: TD replications Post by: Floor on February 22, 2017, 12:41:11 AM A little comic relief that just might floor you , I hope its not out of place. ..................................... If one seeks proof of perpetual motion.... One need only to look to the length of men's arguments upon that subject. :) ........................ Conservation of momentum is the most critical aspect of most over unity designs ..... This is because ... most designs, rely upon momentum as their primary energy source. ;) .......................... Some say that Playing with magnets is like a rocking chair, while it gives a person something to do, it gets them no where ! further more Magnets are dangerous ! If two or more are swallowed, their attraction can pinch right through an organ ! I have even heard magnets referred to as "the devils junk" by some. So ... my suggestions ares don't play with the devils junk and especially ...... never let the devil put his junk into your mouth. floor Title: Re: TD replications Post by: Cairun on February 22, 2017, 07:05:40 PM Yes, they are linked to multiply in a way. The idea is, if there's a gain, then the first unit can be calibrated to use its gain to operate the polarity flip slider of the second unit. That way all the output of the second unit is free to use or to re-use to operate a 3rd unit and so on. Each unit or stage should be able to have 50% more magnet surface area causing an exponential torque amplification as the stages are added. This is the idea but at this point only a theory since it hasn't been built or tested. Luc and Floor, Your cascade setup sounds very exciting. I hope it will work as expected. Upon playing more with the ceramic magnets, I realized that continuous rotation may not be achievable with your setup. The output magnet seems to only do positive work in the 11 mm stroke(maybe a tiny bit more than 11mm), and as it moves further than 11mm it enters into attraction mode which essentially creates a sticky spot. Unless I am setting up my magnets in the wrong way. Can you confirm this or let me know what I may be doing wrong? I've completed my conceptual design on the "stop and go" mechanism, but I still need to figure out some of the mathematical relationships. I will post pictures or a video of the concept when I have it in a presentable form. Regards, Alex Title: Re: TD replications Post by: gotoluc on February 22, 2017, 08:54:57 PM Upon playing more with the ceramic magnets, I realized that continuous rotation may not be achievable with your setup. The output magnet seems to only do positive work in the 11 mm stroke(maybe a tiny bit more than 11mm), and as it moves further than 11mm it enters into attraction mode which essentially creates a sticky spot. Unless I am setting up my magnets in the wrong way. Can you confirm this or let me know what I may be doing wrong? Regards, Alex Hi Alex, It is difficult to see what's different without seeing what you've built. Can you make a video so I can see what could be different. Two things my device has is, the magnets on the wheel are alternating north south and the 11mm magnet has stops (wood blocks) to limit the 11mm magnet stroke so it does not get out of range. Maybe watch the video again to see these limiter blocks. Looking forward to seeing what you've built. Luc Title: Re: TD replications Post by: Cairun on February 22, 2017, 09:42:46 PM Luc, Thanks for the quick reply. I did notice the alternating magnet pole arrangement (which is where the self re-set comes from), and the limiter blocks (which is where the 11 mm is coming from). I am trying to emulate your setup exactly. I guess my question is if you remove the limiter blocks, how much further will the slider magnet travel beyond the 11mm distance. Apparently, don't remove the limiter blocks just to answer my question. I will set something up tonight and make a video to clarify my question. Regards, Alex Title: Re: TD replications Post by: gotoluc on February 22, 2017, 10:19:58 PM The 11mm magnet can travel a little more then where I limited them but if its center goes past the side edge of the rotating magnet it won't flip back on its own when the next rotating magnet comes in. That's the main reason for the limiters. Luc Title: Re: TD replications Post by: Floor on February 23, 2017, 01:33:59 AM Opening a latch while it is is under a large force can use a lot of energy. Title: Re: TD replications Post by: gotoluc on February 23, 2017, 02:50:41 AM I stumbled on this video and wondered if anyone has heard about this device. MTG (Magnetic Torque Generator) Link to video: https://www.youtube.com/watch?v=NI2Fwb91PhI As for my device, I should have a larger version to demonstrate late next week. Luc Title: Re: TD replications Post by: Cairun on February 23, 2017, 06:02:35 AM The 11mm magnet can travel a little more then where I limited them but if its center goes past the side edge of the rotating magnet it won't flip back on its own when the next rotating magnet comes in. That's the main reason for the limiters. Luc, Below is the link to a video showing my build which is the same setup as yours except I turned the bicycle wheel into a slide. https://www.youtube.com/watch?v=J-BqcWUHmWY&feature=youtu.be (https://www.youtube.com/watch?v=J-BqcWUHmWY&feature=youtu.be) The video shows the sticky spot I've mentioned in my previous post. This would not affect your linear reciprocating build. It only affects continuous rotation designs(example: mounting the magnets on 2 bicycle wheels instead of a bicycle wheel+slider setup as you've used in your video). I guess this renders the "stop and go" mechanism useless then. Anyway, I look forward to your next build. Regards, Alex Title: Re: TD replications Post by: Floor on February 23, 2017, 05:36:30 PM @Cairun Nice video / device ! thanks So far, my own attempts at a constantly rotating version have not worked out. Going with the full stop.... then... next action, seems always to give the better results. Conserving all of the energy of momentum would be nice but doesn't have to happen... at all. Our / "the free energy researcher's", obsession with continuous rotation has been (perhaps)... one of the primary reasons that these interactions have been over looked in the past / for so long. I'm not phrasing some new law here (heaven forbid) ! but Hard right angle interactions, stop actions and reciprocation .... works.... circular hasn't ! Curious don't you think ? ... ... A longer stroke in the output will have a more extreme drop off in force... near the end of that stroke. That force while it may otherwise be "wasted" would also be difficult to practically utilize, (staying near to the average force is good) ................ I think maybe, a longer INPUT stroke might be the first thing to try to expand. i.e yet longer magnets in what is already their longest dimension or two magnets end to end ............... Next.... maybe a wider "output magnet" width ? .....and cascading just my take on it regards floor Title: Re: TD replications Post by: Cairun on February 23, 2017, 08:08:05 PM Floor, Thank you! Quote Going with the full stop.... then... next action, seems always to give the better results. Conserving all of the energy of momentum would be nice but doesn't have to happen... at all. I agree, the "full stop....then...next action" is necessary. And conservation of all momentum is not necessary to achieve OU, although it would be nice if that were possible. Quote Our / "the free energy researcher's", obsession with continuous rotation has been (perhaps)... one of the primary reasons that these interactions have been over looked in the past / for so long. Perhaps, that's true. I am always too eager to reach the end goal and overlook certain subtle but important details. Quote A longer stroke in the output will have a more extreme drop off in force... near the end of that stroke. That force while it may otherwise be "wasted" would also be difficult to practically utilize, (staying near to the average force is good) I agree, and the extreme drop off ultimately turns into a sticky spot. Finding the limits of the stroke is essential in maximizing the output. Although, a 60% excess of output to input is pretty good too. Quote I think maybe, a longer INPUT stroke might be the first thing to try to expand. i.e yet longer magnets in what is already their longest dimension or two magnets end to end ............... Next.... maybe a wider "output magnet" width ? .....and cascading I agree, increasing the both magnets' length will increase the output force. And increasing the output magnet's width will increase the length of the output stroke. Although, I think input work required may increase as well. However I don't know if the increase in input vs output will proportional. Additionally, The sticky spot is irrelevant now that I've had some time to think about it. Stepping through the operational sequence I see now that the input magnet will move away before the output magnet moves beyond the 11mm of stroke. A huge brain fart on my part... I will continue to develop my "stop and go" mechanism. Best regards, Alex Title: Re: TD replications Post by: gotoluc on February 24, 2017, 12:22:04 AM Luc, Below is the link to a video showing my build which is the same setup as yours except I turned the bicycle wheel into a slide. https://www.youtube.com/watch?v=J-BqcWUHmWY&feature=youtu.be (https://www.youtube.com/watch?v=J-BqcWUHmWY&feature=youtu.be) The video shows the sticky spot I've mentioned in my previous post. This would not affect your linear reciprocating build. It only affects continuous rotation designs(example: mounting the magnets on 2 bicycle wheels instead of a bicycle wheel+slider setup as you've used in your video). I guess this renders the "stop and go" mechanism useless then. Anyway, I look forward to your next build. Regards, Alex Nice and clean build Alex. Thanks for making a video. I understand now what you're trying to do which won't work that way as you've concluded. You don't want to use the (11mm) magnet to exit because of a sticky spot in that dimension. Use the other magnet to exit since it has next to no sticky spot. It would take too much writing for me to describe how I envision it all working, so you're going to have to wait for my next video which should demonstrate more of how I plan to solve this. Hopefully I'll have something next week Great work! Luc Title: Re: TD replications Post by: Cairun on February 24, 2017, 01:07:40 AM Nice and clean build Alex. Thanks for making a video. I understand now what you're trying to do which won't work that way as you've concluded. You don't want to use the (11mm) magnet to exit because of a sticky spot in that dimension. Use the other magnet to exit since it has next to no sticky spot. It would take too much writing for me to describe how I envision it all working, so you're going to have to wait for my next video which should demonstrate more of how I plan to solve this. Hopefully I'll have something next week Great work! Luc Luc, Thanks. The build took longer than I expected, but I am glad you like it. I look forward to your next video. And, thanks for the pointers. I did, after thinking through the operational sequence, realize the sticky spot is irrelevant and will not pose any problems. Best regards, Alex Title: Re: TD replications Post by: gotoluc on February 24, 2017, 01:22:14 AM I did, after thinking through the operational sequence, realize the sticky spot is irrelevant and will not pose any problems. Best regards, Alex Great!... then you're on the right track... pun intended! You may want to have a look at the Energetic Forum where Chet (user ramset) has started a topic and user Allen Bergess is very enthusiastic and has many ideas: http://www.energeticforum.com/renewable-energy/20699-mechanical-magnetic-torque-amplifier.html Regards Luc Title: Re: TD replications Post by: Cairun on February 24, 2017, 03:59:53 PM Great!... then you're on the right track... pun intended! You may want to have a look at the Energetic Forum where Chet (user ramset) has started a topic and user Allen Bergess is very enthusiastic and has many ideas: http://www.energeticforum.com/renewable-energy/20699-mechanical-magnetic-torque-amplifier.html Regards Luc Luc, I just registered a account with EnergeticForum.com. I read through the post but couldn't understand some of the ideas since I couldn't open any pictures/attachments. It looks like one of the ideas is very similar to the "stop and go" mechanism I am developing. I will keep an eye out for this forum in the future. thanks for the heads up. Regards, Alex Title: Re: TD replications Post by: gotoluc on February 24, 2017, 05:42:15 PM Sorry, I forgot about if you're not a member you can't see uploaded pictures. They also can be slow to approve new members. Hopefully it won't be your case. Luc Title: Re: TD replications Post by: ramset on February 24, 2017, 05:56:36 PM Luc sorry to interrupt here, I did try to ask Allen to discuss this [must be a member to view at THAT forum] he is also a member here [I think Flame wars caused some issues in the past here??] as you know I started the topic there [Aaron's forum] with your permission so more eyes could see the open source work...... just some clarity ,as you know I have no interest in the politics or memberships of these forums ,just to learn and share for maximum input ... or contributions from the community. maybe Allen will choose to discuss this and perhaps start a topic here where viewing of images is not attached to membership ? sorry for the interruption here. I did try...? with respect and gratitude Chet K Title: Re: TD replications Post by: gotoluc on February 24, 2017, 06:31:12 PM :) Title: Re: TD replications Post by: gotoluc on February 24, 2017, 10:10:36 PM Hi Alex (cairun) Did you receive my message? Please reply Luc Title: Re: TD replications Post by: Cairun on February 25, 2017, 06:16:47 AM Hi all, This is the conceptual design of the "stop and go" mechanism designed for Luc's version of Floor's Twist Drive idea specifically for use with a continuous rotation setup. As you can see the output gear stops and stays in position for a brief moment, but the planetary gear rotates continuously(it is attached to the input gear which means the input gear is in continuous rotation). I still need to try and figure out the mathematical relationship. The 2 attached images show the front view and back view of the mechanism as well as labeling for the individual components. The following link shows the mechanism in motion: https://www.youtube.com/watch?v=-lqlbY4v38s&feature=youtu.be (https://www.youtube.com/watch?v=-lqlbY4v38s&feature=youtu.be) Imagine 2 bicycle wheels with the input magnets mounted on the rim of one and the output magnets mounted on the rim of the other bicycle wheel. Each wheel connected to the output gear of 2 separate "stop and go" mechanisms. Set the wheels at 90 deg from each other. Couple the input gears of both mechanisms(have to time it exactly right) via gears, belts, U-joint linkages etc. Mount a flywheel to the input gear to preserve any excess momentum. And the result should be a self runner. This mechanism should capture all momentum and not waste any. Even the braking of the wheels(due to stop and move motion) should transfer into usable momentum. As for the linear reciprocating setup, I will draw something up to show the "track follower" mechanism that I've mentioned, later on. Regards, Alex Title: Re: TD replications Post by: Floor on February 25, 2017, 08:22:38 PM @GotoLuc/ Cairun Fantastic work ! @ All readers Some observations / features..... To assist one in understanding the magnet interplay. The force to slide the input magnet is almost completely neutralized by the balance between attraction / repulsion ..... when the input magnet's line of travel is along the center of the face of the output magnet. That center is the distance to the edges of the output magnet's face, as measured at a right angle to the center line of the input magnets travel. That center line from pole to pole of the input magnet is square to the center line from pole to poles of the output magnet...in two planes. and also...The faces of the two magnets are square to one another in all planes. ... Note...The greater the distance from those edges of the face of the output magnet to its center.....the easier it is to come to a zero sliding input force (by balance). e.g The broader the face of the input magnet...the easier it is to arrive at that force balance. (A less perfect travel path / alignment is needed by the input magnet) When input motion is along the center of the output magnet face .... this changes both the input and the output work... as compared to when input is made near to the edge of the output magnet. I assume this also changes the final or over all, work in to work out ratio ?. If the input magnet's travel is near to the edge of the face of the output magnet.... the input force and work, is increased. But also the output disproportionately increases ? .............................................. In Luc's current design... During both, the input stroke and the reset stroke, there is a mutual interaction between the output magnet (A) and the input / reset magnets as a combined field (B). Th The nature of that relation ship is affected by which position of the output magnet is in...e.g. cocked or fired. These relationships affect the input and reset strokes. More on that.... later / maybe. .......................................... Note... Use of the phrase "pumping action" in regard to the input magnet, would be some what misleading... as there is a motion then hold position action to its motion. Please find the two video links below. regards ... floor http://www.dailymotion.com/video/x5d7hdv_lucs-1_tech hlttp://www.dailymotion.com/video/x5d7ip9_lucs-2_tech Title: Re: TD replications Post by: gotoluc on February 25, 2017, 11:16:50 PM Hi Alex, Thanks for sharing your very interesting mechanical gear timing system. I'm always amazed as just about any timing and motion can be accomplished with gears. Glad your here as you're a great asset to the research. Regards Luc Title: Re: TD replications Post by: gotoluc on February 25, 2017, 11:19:48 PM Thanks floor for your new test details[/size] and[/size] video demos Regards Luc Title: Re: TD replications Post by: Cairun on February 26, 2017, 04:06:43 AM Hi Alex, Thanks for sharing your very interesting mechanical gear timing system. I'm always amazed as just about any timing and motion can be accomplished with gears. Glad your here as you're a great asset to the research. Regards Luc Luc, You are very welcome! I am glad to be a part of the research. I hope my contributions can help to further this research. Regards, Alex Title: Re: TD replications Post by: Cairun on February 26, 2017, 04:38:29 AM http://www.dailymotion.com/video/x5d7hdv_lucs-1_tech (http://www.dailymotion.com/video/x5d7hdv_lucs-1_tech) hlttp://www.dailymotion.com/video/x5d7ip9_lucs-2_tech Floor, Your setup in the the second video is very interesting and got me thinking. If your put another set of output magnets with south and north poles reversed on the left side of the input magnet then you would have 2 output strokes and get 2x the output. Furthermore, once the input magnet is pulled out, the output magnets would self reset since they are in attraction to each other. Now to get 4x the output, you can put another input magnet on top of the the current input magnet with south and north poles reversed. This serves to double the output stroke force. This is very exciting. I wish I can test it now but I can't since I will be working away from home for the next month. Regards, Alex [size=78%] [/size] [size=78%] [/size] Title: Re: TD replications Post by: Floor on February 27, 2017, 12:21:32 AM @Cairon Thanks, and see you when we do. @GotoLuc Simple is best... short of continuous rotation and simpler ! lift a 4 pound seldge hammer head ... in 2 foot increments... by lowering a 1/4 pound weight 6 inches....untill the 1/4 pound weight has lowered a total of 30 inches and the 4 pound weight is ten feet in the air ??? or whatever the maximum ratio can come out to ... via a single cascade. Do it your own way ... I hope this might be some contribution. .... in pdf form. floor Title: Re: TD replications Post by: Cairun on February 27, 2017, 04:05:20 AM Floor and Luc, Even though I can't conduct experiments while away from home. I will continue to design a self runner using Luc's setup(in a linear reciprocating desing using 2 slides). I hope to finish design and order all purchased parts before I get back home so I can start the build immediately when I do get back. Regards, Alex Title: Re: TD replications Post by: gotoluc on February 27, 2017, 05:10:29 PM Great Alex Maybe all I'll do is put the 2 slides together to test the results using the 6" ceramic magnets and wait to see your mechanical design. Thanks for staying on it. Luc Title: Re: TD replications Post by: Cairun on February 27, 2017, 07:32:44 PM Great Alex Maybe all I'll do is put the 2 slides together to test the results using the 6" ceramic magnets and wait to see your mechanical design. Thanks for staying on it. Luc Luc, That sounds good. I will try to get the mechanical design out as soon as I can. But, I will be working 72 hours work weeks so it might take a while since I will have little spare time. Have you tried to line up multiple output magnets(your cascade idea)to increase output stroke? If that increases output stroke without dramatically increasing inpu work, then I would like to incorporate that into my design. Regards, Alex Title: Re: TD replications Post by: gotoluc on February 27, 2017, 10:09:53 PM Luc, That sounds good. I will try to get the mechanical design out as soon as I can. But, I will be working 72 hours work weeks so it might take a while since I will have little spare time. Have you tried to line up multiple output magnets(your cascade idea)to increase output stroke? If that increases output stroke without dramatically increasing inpu work, then I would like to incorporate that into my design. Regards, Alex Hi Alex, Just an hour ago I came up with a new simple design that will give us more test data on the performance of a 6 inch output stoke multiplied 4 times. It's simple enough that I should be able to build it and have it ready to test in 2 to 3 days. From those test results we should be able to decide the direction we want to take. So just stay focused on your work as there's nothing to do till then. Thanks Luc Title: Re: TD replications Post by: Cairun on February 28, 2017, 03:30:42 AM Hi Alex, Just an hour ago I came up with a new simple design that will give us more test data on the performance of a 6 inch output stoke multiplied 4 times. It's simple enough that I should be able to build it and have it ready to test in 2 to 3 days. From those test results we should be able to decide the direction we want to take. So just stay focused on your work as there's nothing to do till then. Thanks Luc Luc, A design with a 6 in output stroke and 4x the force? That sounds exciting! I will pause on the design on my part for now, then. I look forward to the result of your tests. Regards, Alex Title: Re: TD replications Post by: Floor on February 28, 2017, 03:42:20 PM Below are some png files / drawings that illustrate the alignments / actions of Luc's "mechanical magnet torque amplifier". please find the attached CascadeStageiMatching 1.png file and the CascadeStageiMatching 2.png file regards floor Title: Re: TD replications Post by: Floor on February 28, 2017, 08:37:09 PM Notes 1. I have not (above) illustrated all details of the magnet configurations. Luc's last video measured a specific arrangement.. which may be / probably is actually some what different than the (2) above illustrations. Those illustrations are to give the (in general) of that configuration. 2. a device utilizing large force over a short distance.... can have excessive energy wasted... due to small amounts of slack / play / slop at the junctions of its mechanical / moving parts. 3. Translating a large force / short stroke into a long stroke / lower force before any one way clutch (which will have play before it engages) will be more efficient than the other way around. 4. There is no energy lost in the trading of greater force at less distance for less force but greater distance... e.g. using a large diameter output pulley. (above illustrations) 5. The diameter of the round gear beneath the rack gear (above illustrations) should cause it to rotate by 90 degrees during the magnet stroke. 5. The eccentric pulley (illustrated above) anticipates only a 90 degree rotation. 6. that output pulley might as well be eccentric ..... even if out put force is MOSTLY constant over the distance traveled.... but not if output force is very constant over the entire distance traveled . best wishes floor Title: Re: TD replications Post by: Nonlinear on March 01, 2017, 02:52:04 PM Luc, You have said in one of your posts, that you don’t have sufficient knowledge of mechanics to correctly calculate the energy efficiency of your devices. You have also asked for help in this regard. Here is the assistance you were looking for; see the attached files. I have integrated the work, both the input and the output, and calculated the efficiency of the device. If your measurement data is correct, then the coefficient of performance is COP=161.3%. This would be quite impressive, and I could design a generator on this idea, but I have doubts about the validity of your results. For example when you are taking the first measurement, then the slider magnet is completely under the wheel, is that correct? Then as you pull the rotor magnet over the slider there is a repulsive force between them. So far so good. Then when the slider is released and shifted to the farthest position from the rotor, then you continue measuring the force required to pull the rotor away from the bottom position. What does not make sense to me is that you are now measuring an attractive force between the magnets, even though I would expect a repulsive force to persist. A repulsive force existed between the magnets until the rotor covered the slider, and the repulsive force made the slider move away. This same repulsive force supposed to still exist (even though smaller) while you are pulling the rotor away from above the slider. Which means that during the second half of the measurement the magnets supposed to perform work, and you supposed to push the rotor (not pull) during the measurement. Can you please cast some light on the exact procedure of measurement, polarities of magnets and preferably dimensions as well? Thanks. Great workmanship! Title: Re: TD replications Post by: Floor on March 01, 2017, 03:17:07 PM @Nonlinear Gotoluc has several videos up on various of these magnet interactions. Watching the set of these short videos should give you a comfortable understanding of the interactions. It can take some time to gain familiarity with the configurations. There are 7 videos here by "seethisvid" that give some explanations of the magnet's polar alignments, motions and resulting force vectors. http://www.dailymotion.com/video/x59r978 http://www.dailymotion.com/video/x5an8hd_rtangsld2_tech http://www.dailymotion.com/video/x5d7hdv_lucs-1_tech http://www.dailymotion.com/video/x5d7ip9_lucs-2_tech Also there are some drawings and a pdf file, upon the previous pages of this topic floor Title: Re: TD replications Post by: Floor on March 01, 2017, 04:31:25 PM @Nonlinear I don't think Luc has stated the dimensions of the magnets he is using, but I have assumed... that they are the same as those I have been using ? see this link below http://overunity.com/14311/work-from-2-magnets-19-output-2/msg489791/#msg489791 Thanks for the conversions to newtons / joules best wishes floor Title: Re: TD replications Post by: shylo on March 01, 2017, 11:18:13 PM do the work at the bench and you will see there is more than what were told to look at. You can build a rotor that has a cam that moves the force of the magnet attached to move in and out, But it will find balance, inject an offset to that balance , at the proper time. You won't get free power, but you can get it down to the point where it is virtually free. Just what I've seen so far. artv Title: Re: TD replications Post by: gotoluc on March 02, 2017, 05:33:33 PM Luc, You have said in one of your posts, that you don’t have sufficient knowledge of mechanics to correctly calculate the energy efficiency of your devices. You have also asked for help in this regard. Here is the assistance you were looking for; see the attached files. I have integrated the work, both the input and the output, and calculated the efficiency of the device. If your measurement data is correct, then the coefficient of performance is COP=161.3%. This would be quite impressive, and I could design a generator on this idea, but I have doubts about the validity of your results. For example when you are taking the first measurement, then the slider magnet is completely under the wheel, is that correct? Then as you pull the rotor magnet over the slider there is a repulsive force between them. So far so good. Then when the slider is released and shifted to the farthest position from the rotor, then you continue measuring the force required to pull the rotor away from the bottom position. What does not make sense to me is that you are now measuring an attractive force between the magnets, even though I would expect a repulsive force to persist. A repulsive force existed between the magnets until the rotor covered the slider, and the repulsive force made the slider move away. This same repulsive force supposed to still exist (even though smaller) while you are pulling the rotor away from above the slider. Which means that during the second half of the measurement the magnets supposed to perform work, and you supposed to push the rotor (not pull) during the measurement. Can you please cast some light on the exact procedure of measurement, polarities of magnets and preferably dimensions as well? Thanks. Great workmanship! Thanks Nonlinear for taking the time to verify my calculation and present your data. I made a video for you in hopes of making the process clearer to understand . Link to video: https://www.youtube.com/watch?v=z6sbIgr2L8A (https://www.youtube.com/watch?v=z6sbIgr2L8A) Please feel free to ask questions if needed Regards Luc Title: Re: TD replications Post by: Floor on March 03, 2017, 03:07:58 PM @Gotoluc Great work Luc ! I am setting up to do measurements / replication on you last demo. This will be of an output stroke only. If... I also do the input stroke measurements, they will have to be of a straight on (parallel) stroke, and one magnet only (input).... to one magnet only (output) @all readers These experiments are being done on the fly. (not perfect) My measurement process will be only an approximation of Luc's process (not identical). please find the attached PNG file regards Title: Re: TD replications Post by: Nonlinear on March 03, 2017, 08:33:18 PM Thanks Luc for the video, now your results make more sense. There are several mistakes in your approach and measurements, which may be very well the cause for the detection of COP>1. The biggest mistake is to judge the efficiency of a device based on average forces. That is completely unscientific, and it can very easily mislead you. The total work must be measured and calculated, like in my spreadsheet. The second mistake is ignoring the role of measurement errors. A measurement is never 100% accurate, there is always some error in it. The experimenter must be aware of the expected maximum error margin of his measurements, and disclose it together with the measurement results. Without this, the data can not be taken seriously. For example the best resolution of your scale is 5g which is extremely low and produces a very large error if you are measuring forces in the range of 0 to 100g. When you are measuring 100g then the uncertainty of the measured result is 10g, which is 10%. The correct way of showing your measurement result is: 100g +-5g, or with other words, the real force could be anything from 100-5=95g to 100+5=105g, the error margin is 105-95=10g, which is 10% of the measured value of 100g. Therefore if you find a COP=1.1 with such large error margin of measurement, then your measurements are pretty much useless to prove anything. If you are measuring even less than 100g, like in some of your measurement series measuring 5, 10, 20g etc. then your error margin is so huge that the data is of no value to prove anything. If your scale has a low resolution, then build a device that requires the measurement of about 100 times larger forces than the 5g resolution. If this is not practical, then use a scale that has got sufficient resolution and accuracy to produce around 1% (or less) measurement error. The third mistake is not to measure the complete cycle of movement. For example webby1 was trying to convince you few pages back that you have to measure the 4th part of the cycle as well, in one of your earlier devices. He finally succeeded in this effort in post: http://overunity.com/16987/td-replications/msg496971/#msg496971 and you provided the data in: http://overunity.com/16987/td-replications/msg496974/#msg496974 After you have measured this 4th part of the cycle and taken it into account, then your (still incorrect) averaging calculations showed only 10% of excess work, which can very easily attributed to the other mistakes already mentioned. In this case of rotary device this is not as critical as in the previous devices, but I would still recommend you to measure the complete cycle. Meaning, two rotary magnets pass in sequence above the slider. Please also measure the output force as well at least 10 times, like after each 1mm movement. I was trying to get a manual feel of the forces in your device using two neodymium magnets of 4x2x1cm, which I don’t recommend to anyone. These magnets are just too powerful, and if one doesn’t have very strong fingers, they can also harm you! But, I have got no ceramic magnets of rectangular shape right now, so can not do the safe version. Anyway, keep up the good work, and if the COP is still higher than say 1.2 even after fixing these mistakes, then it should be possible to build at least a perpetuum mobile using this magnet arrangement. If the COP would be really 1.6 like in your measurements, then the machine should be able to generate useful output power as well, besides just running itself. Thanks Floor for the links and the drawing. Title: Re: TD replications Post by: gotoluc on March 03, 2017, 11:54:54 PM Here is a sneak peek of the v2.0 Magnet Torque Amplifier device before it's all assembled and unable to see the internal design mechanism. Link to video: https://www.youtube.com/watch?v=BMVES42VbzA (https://www.youtube.com/watch?v=BMVES42VbzA) Test results should come in the next couple of days Stay tuned Luc Title: Re: TD replications Post by: gotoluc on March 04, 2017, 01:29:58 AM Here is the device assembled Link to video: https://www.youtube.com/watch?v=OsEbX8yJ91I I'll need to bolt this down for testing :o Luc Title: Re: TD replications Post by: Floor on March 04, 2017, 05:52:22 AM @All readers The below png file contains the results of a quasi replication of GotoLuc's MagTorqAmp (output only) best wishes floor Title: Re: TD replications Post by: Floor on March 04, 2017, 06:09:35 AM @GotoLuc Awsome ! floor Title: Re: TD replications Post by: Floor on March 04, 2017, 06:15:37 AM @Nonlinear If you have the time, I would like to talk over / better understand your suggested approach. Maybe in the topic http://overunity.com/16954/magnets-motion-and-measurement/ regards floor Title: Re: TD replications Post by: Cairun on March 04, 2017, 10:17:40 AM @Luc, Your latest build looks awesome. Regards, Alex Title: Re: TD replications Post by: shylo on March 04, 2017, 01:19:41 PM Luc Nice build , and thanks for showing the break down and explaining the poles positions. artv Title: Re: TD replications Post by: Floor on March 04, 2017, 03:22:04 PM All right, so... useing A Cairon design-roary-stop-start input...this could get even more interesting. @ GotoLuc Important note Now that you are using larger magnets, the forces involved may be great enough that a personal injury could be serious ! Please consider designing and installing some fool proof safety / locking mechanisms on your new device. Safety first .... then have as much fun as humanly possible ! ............................................................................................... It looks like you may need lateral support / rollers like that which you were considering in the bicycle rim device ? very nice build. regards floor Title: Re: TD replications Post by: gotoluc on March 04, 2017, 07:11:57 PM Thanks Luc for the video, now your results make more sense. There are several mistakes in your approach and measurements, which may be very well the cause for the detection of COP>1. The biggest mistake is to judge the efficiency of a device based on average forces. That is completely unscientific, and it can very easily mislead you. The total work must be measured and calculated, like in my spreadsheet. The second mistake is ignoring the role of measurement errors. A measurement is never 100% accurate, there is always some error in it. The experimenter must be aware of the expected maximum error margin of his measurements, and disclose it together with the measurement results. Without this, the data can not be taken seriously. For example the best resolution of your scale is 5g which is extremely low and produces a very large error if you are measuring forces in the range of 0 to 100g. When you are measuring 100g then the uncertainty of the measured result is 10g, which is 10%. The correct way of showing your measurement result is: 100g +-5g, or with other words, the real force could be anything from 100-5=95g to 100+5=105g, the error margin is 105-95=10g, which is 10% of the measured value of 100g. Therefore if you find a COP=1.1 with such large error margin of measurement, then your measurements are pretty much useless to prove anything. If you are measuring even less than 100g, like in some of your measurement series measuring 5, 10, 20g etc. then your error margin is so huge that the data is of no value to prove anything. If your scale has a low resolution, then build a device that requires the measurement of about 100 times larger forces than the 5g resolution. If this is not practical, then use a scale that has got sufficient resolution and accuracy to produce around 1% (or less) measurement error. The third mistake is not to measure the complete cycle of movement. For example webby1 was trying to convince you few pages back that you have to measure the 4th part of the cycle as well, in one of your earlier devices. He finally succeeded in this effort in post: http://overunity.com/16987/td-replications/msg496971/#msg496971 (http://overunity.com/16987/td-replications/msg496971/#msg496971) and you provided the data in: http://overunity.com/16987/td-replications/msg496974/#msg496974 (http://overunity.com/16987/td-replications/msg496974/#msg496974) After you have measured this 4th part of the cycle and taken it into account, then your (still incorrect) averaging calculations showed only 10% of excess work, which can very easily attributed to the other mistakes already mentioned. In this case of rotary device this is not as critical as in the previous devices, but I would still recommend you to measure the complete cycle. Meaning, two rotary magnets pass in sequence above the slider. Please also measure the output force as well at least 10 times, like after each 1mm movement. I was trying to get a manual feel of the forces in your device using two neodymium magnets of 4x2x1cm, which I don’t recommend to anyone. These magnets are just too powerful, and if one doesn’t have very strong fingers, they can also harm you! But, I have got no ceramic magnets of rectangular shape right now, so can not do the safe version. Anyway, keep up the good work, and if the COP is still higher than say 1.2 even after fixing these mistakes, then it should be possible to build at least a perpetuum mobile using this magnet arrangement. If the COP would be really 1.6 like in your measurements, then the machine should be able to generate useful output power as well, besides just running itself. Thanks Floor for the links and the drawing. Hi Nonlinear Thanks for your post. I've been aware of the scales 5 gram resolution limitation and agree it's not ideal for small measurements. The errors % margin will be greatly reduced with the version 2 build as the scale will be in the Kg measurement ranges. Lets see what these new numbers will show. Regards Luc Title: Re: TD replications Post by: Nonlinear on March 04, 2017, 09:31:03 PM If you have the time, I would like to talk over / better understand your suggested approach. Maybe in the topic http://overunity.com/16954/magnets-motion-and-measurement/ I don't think it is a good idea to separate the discussion of measurement techniques from the experimental thread (here) where the action happens. There is a good chance that those who experiment and supposed to read and implement the suggestions will not find them. Like in this post: http://overunity.com/16954/magnets-motion-and-measurement/msg499599/#msg499599 telecom has explained how to measure and correctly calculate the input and output work already on the 9th February. His suggestion was ignored and the averaging continued as if nothing happened. If you ignore good advice then why would anyone be willing to help? Anyway, if you don't' understand telecom's explanation, then I can explain it again. What is it that you don't understand? Title: Re: TD replications Post by: gotoluc on March 04, 2017, 10:54:03 PM I would tend to agree with you Nonlinear. Better keep the measurement techniques of a replication in this topic. Now, concerning telecom's advice "below" "Re Gotoluc measurements: work = force x distance I would like Gotoluc to measure force for each segment of his input and output dials, and multiply this force by the length of the segment. Then add them together for the input and output. This will give us input and output work. The more segments he has, the more precise would be the calculations. In fact, he already has everything in place, just needs to do the above operations." I do remember reading it and re-read but can't seem to understand or find what I have not provided. So I guess you'll have to explain what I have not done. BTW, your calculations came to the same as I had calculated, so again I fail to see what I forgot. Kind regards Luc Title: Re: TD replications Post by: Floor on March 05, 2017, 02:25:27 AM @Nonlinear A request of / upon Telecom by floor @ http://overunity.com/16987/td-replications/msg499531/#msg499531 Telecom's rseponce next day @ http://overunity.com/16954/magnets-motion-and-measurement/msg499599/#msg499599 Have I given a correct interpretation of the basics of the process for calculating the work in these magnet interactions here ........ @ http://overunity.com/14311/work-from-2-magnets-19-output-2/msg498005/#msg498005 ? If so, and with your permission :) ..... then I will re post those three pages / files here ? A COP of 161.2838 .... does this mean basically the same thing as 61.2838 % more out than in ? thanks floor Title: Re: TD replications Post by: verpies on March 05, 2017, 12:31:39 PM I do remember reading it and re-read but can't seem to understand or find what I have not provided. I just popped in here without reading the entire thread. Please give me some links to your work, in which you had summed the force*distance for the input and output of your system, so I can evaluate it. If I notice anything you have omitted or any errors, I'll let you know. Title: Re: TD replications Post by: gotoluc on March 05, 2017, 03:50:13 PM I just popped in here without reading the entire thread. Please give me some links to your work, in which you had summed the force*distance for the input and output of your system, so I can evaluate it. If I notice anything you have omitted or any errors, I'll let you know. Hi verpies, Nice to see you here. The below videos (in order) relate to the around 60% over unity https://www.youtube.com/watch?v=oUlDMY1iE5A (https://www.youtube.com/watch?v=oUlDMY1iE5A) https://www.youtube.com/watch?v=dpBaeJD38HI (https://www.youtube.com/watch?v=dpBaeJD38HI) https://www.youtube.com/watch?v=z6sbIgr2L8A (https://www.youtube.com/watch?v=z6sbIgr2L8A) And the below videos are v 2.0 which is a Super build of the above Just completed the build on Friday so no measurements yet. I'll have to bolt down this beast to measure her ;) https://www.youtube.com/watch?v=BMVES42VbzA (https://www.youtube.com/watch?v=BMVES42VbzA) https://www.youtube.com/watch?v=OsEbX8yJ91I (https://www.youtube.com/watch?v=OsEbX8yJ91I) Regards Luc Title: Re: TD replications Post by: dieter on March 05, 2017, 07:31:43 PM Nice build. Reminds me a bit of Teslas Earthquake machine tho ^^ Make sure to have a sledge hammer at hand during the test run, in case of any runaway /evac situation. Title: Re: TD replications Post by: Floor on March 05, 2017, 08:27:47 PM @ Nonlinear Here in PDF (easy to down load / contemplate off line) form is my responce to your postings. Please find the attached file "MagnetForceIntegration.PDF" Thanks for your input best wishes floor @ all readers The above PDF file is not a private message. Title: Re: TD replications Post by: Nonlinear on March 05, 2017, 08:52:07 PM Quote BTW, your calculations came to the same as I had calculated, so again I fail to see what I forgot. Here is a parable: Joe and Fred have calculated the surface area of a gate that they want to paint, which is a square. They know how much paint is needed per square meter. So if they calculate the surface area of the gate then they will know how much paint they will have to buy, and how much that will cost. One side of the square is 2m long. Joe calculates the surface area as S=(2[m])^2=2^2[m^2]=4[m^2]. Fred prefers to calculate the same as S=2+2=4. As you can see in this specific case the numerical result of both calculations are the same (4) and correct, but Fred is calculating it the wrong way. If the length of one side is not 2m but let’s say 3m, then Joe will get a correct result as S=(3[m])^2=9[m^2], but Fred’s result of S=3+3=6 will be wrong. The fact that two different methods of calculation give the same result for a specific case (or even for several specific cases) does not mean that both methods of calculation are equally valid for all possible cases. Quote I do remember reading it and re-read but can't seem to understand or find what I have not provided. You have provided useful measurement data, and doing a great work on testing the energy balance of different magnet arrangements. I did not say that you did not do anything useful, or that you have not provided something essential. Your data already merits serious investigation (if true) and that is the reason I have chimed in and trying to help. Even if it finally turns out that there is no real COP>1 in these permanent magnet arrangements, the measurements are still of value if they are scientifically correct and sufficiently accurate. In such a case future experimenters can already know that it might not be the best idea to look for overunity in this area. But both your method of setting the measurement points and the method of calculating the COP are not the most scientific and accurate, and therefore not very convincing for the scientifically minded. You can fix this with no extra effort, and obtain/present neat measurement results for the same cost and work spent. The correct approach will also be valid for any possible measurement point distribution. Quote So I guess you'll have to explain what I have not done. You have done it (calculated the COP), just not the right way, which was also suggested by telecom, but now I see that it has been nicely described even earlier on January 04 by Floor: http://overunity.com/14311/work-from-2-magnets-19-output-2/msg498005/#msg498005 If you read the explanations in the pdf that is attached below this post, you will see that in general case it is not wise to use uniform segment sizes. In the regions where the curvature of the force function that you are measuring is large, one supposed to use small displacement sizes. Where the curve is nearly straight line (nearly constant increase or decrease of force per same displacement) one can use larger displacement increments. If the segment sizes are not uniform, then your method of simply averaging the forces, and ignoring the lengths of individual displacements will give a wrong final results. It is also wise to conform with the established scientific method of calculating the COP as the ratio of the output and input work (not average forces). Quote Have I given a correct interpretation of the basics of the process for calculating the work in these magnet interactions here ........ http://overunity.com/14311/work-from-2-magnets-19-output-2/msg498005/#msg498005 ? Yes, except for the minor math error in this formula: Pf+if/2=avf which would be correct in this form Fa=(Fp+Fi)/2. First one must add the two forces together, and then divide the result by 2. Your version first divides Fi by 2 and then adds Fp to it, which gives a wrong result. Quote If so, and with your permission ..... then I will re post those three pages / files here ? This is your thread, you don’t need my permission. It indeed makes sense to post everything relevant into this thread as well. Although I have also attached a similar pdf document to this message to clarify the calculation methods, yours is also useful, because it explains the subject in more layman terms and it may help those with less technical knowledge. If we want to implement the best method of COP calculation, then I (of someone else) will have to slightly modify the earlier posted spreadsheet as well (but the change is trivially simple). Quote A COP of 161.2838 .... does this mean basically the same thing as 61.2838 % more out than in ? Yes, it does. Whatever you get above 100% is free excess energy. Title: Re: TD replications Post by: Floor on March 05, 2017, 10:16:38 PM QUOTE from Nonlinear "Yes, except for the minor math error in this formula: Pf+if/2=avf which would be correct in this form Fa=(Fp+Fi)/2. First one must eadd together the two forces, and th results is divided by 2. Your version first divided Fi by 2 and then adds Fp to it, which gives a wrong result." END QUOTE OK Average force = (peak force - initial force) / 2 ............ I see, previously .... I left out the parentheses. Force applied = average force times displacement. ......................................... ......................................... note also.... proof reading on the fly often misses errors e.g. except for your minor math error " Pf+if/2=avf which would be correct in this form Fa=(Fp+Fi)/2." First one must eadd together the two forces, and th results is divided by 2. should read (Pf - if)/2=avf which would be correct in this form Fa=(Fp - Fi)/2. First one must subtract the two forces, and the results is divided by 2. Corrections are duly noted and requested, welcomed,... this is, in part, why the subject matter is in a public forum. However please understand that this is NOT a conventional class room. You will not be accorded a special status based upon any degrees. I am neither a math wiz nor an expert in magnets nor physics. But then neither do I have the kind of brain damage that some times results from the traditional abuses in the course of academic conditioning. (not that you do either, I don't know ?) People on this forum, that are here to learn, are here to learn..... what, where and why they want to learn .... not your or some other specific curriculum. This topic is not a competition, cooperation is the goal. Many trolls are very knowledgeable. If you become a disruption to the topic, no matter how cleverly you do so, the topic will become moderated. and posts simple deleted. You can contribute, but just know that we don't need your "help". Hopeing you can continue to stay involved, sincerely floor Title: Re: TD replications Post by: Nonlinear on March 05, 2017, 11:35:21 PM note also.... proof reading on the fly often misses errors e.g. except for your minor math error " Pf+if/2=avf which would be correct in this form Fa=(Fp+Fi)/2." First one must eadd together the two forces, and th results is divided by 2. should read (Pf - if)/2=avf which would be correct in this form Fa=(Fp - Fi)/2. First one must subtract the two forces, and the results is divided by 2. What… what? I have made few typos in my last post, which have been corrected within few minutes of posting, but I certainly did not post anything like (Pf - if)/2=avf or Fa=(Fp - Fi)/2. Subtract the two forces? From where did you get this nonsense idea that it “should read” like a subtraction? It makes no sense at all. Quote You will not be accorded a special status based upon any degrees. I did not ask for any special status, and I don’t respect any special status of anybody else here either. As long as it is correct and true what one posts, I respect it and appreciate it. But if something is incorrect, then I don’t care if even the owner of this forum or God himself made the false statement, or made the error, it has to be corrected, and if I have time and interested enough I will do that. Quote People on this forum, that are here to learn, are here to learn..... what, where and why they want to learn .... not your or some other specific curriculum. Please don’t speak in the name of other people who read this forum! The calculation methods that I have described are correct (anybody can verify that), and if you or anybody else here wants to keep the exclusive right to “teach the readers” some lousy methods of “doing science” and research, then that is very wrong. People have the right to know what is scientifically correct, and we should let everybody decide for himself which methods and explanations he prefers to learn and accept (if he didn’t know them already). When I wrote that this is your thread, I didn’t mean that therefore I consider you to be the almighty here. Only that you may politely direct the flow of discussion, but not that I or anybody else is obliged to obey your commands. You have no more authority here than I do; not even in this thread. Quote Many trolls are very knowledgeable. If you become a disruption to the topic, no matter how cleverly you do so, the topic will become moderated. and posts simple deleted. Wow! I have offered truth and correct knowledge, and in return now I have been accused of being a troll! Now that is quite something! I am a disruption to the topic? Nice one! As far as I know only Stefan can moderate and delete posts. Why do you speak in his name? But even if you could do it, I would not care. Quote You can contribute, but just know that we don't need your "help". Perhaps you don’t ”need” my input for ego reasons, which is easy to remedy. Just simply ignore my posts. But intending to prevent other readers to read my posts by threatening with deleting my writings is outrageous. I am not posting regularly on this forum because I have better things to do. In this case I have made an exception because Luc’s latest measurements seemed promising, and wanted to make sure the results are not due to simple mistakes that are easy to correct. But now after seeing you egotistic reactions I realize that this may be a deliberate deception, which can only thrive on pseudoscience. Title: Re: TD replications Post by: Floor on March 06, 2017, 05:24:28 PM @Nonlinear I did not say that you are a troll, I implied that you might be. Luc's experiments / innovations are great and his own, I do not direct them. Great.... Good to see you have some salt / passion. Lets not fight, let us use your knowledge. QUOTE from Nonlinear "What… what? I have made few typos in my last post, which have been corrected within few minutes of posting, but I certainly did not post anything like (Pf - if)/2=avf or Fa=(Fp - Fi)/2. Subtract the two forces? From where did you get this nonsense idea that it “should read” like a subtraction? It makes no sense at all. " END QUOTE See the files below "subtraction of force in sets.png" .............................................................................. ............................................................................. The topic is moving away from Gotoluc's project, not what I wanted. So lets move this "discussion" to the other topic ? @ http://overunity.com/16954/magnets-motion-and-measurement/msg496713/#msg496713 Like I originally suggested. @GotoLuc There are some other filesattached below, as well. regards floor Title: Re: TD replications Post by: Floor on March 06, 2017, 05:28:54 PM @gotoLuc The "MeasPhy 10-5.PDF" file is posted in between the two PNG files above. It would be really easy to miss that PDF file if I didn't mention it speciffically. regards floor Title: Re: TD replications Post by: gotoluc on March 08, 2017, 02:15:47 AM Here is the first test update of the Mechanical Magnet Torque Amplifier v2.0 Link to video: https://www.youtube.com/watch?v=4x_fCow3qR4 Luc Title: Re: TD replications Post by: telecom on March 08, 2017, 05:09:48 AM A very impressive machine and remarkable craftsmanship! Title: Re: TD replications Post by: lota on March 08, 2017, 09:23:07 AM HelloIt is an interesting machine.How is the input without the lamp? Lota Title: Re: TD replications Post by: Cairun on March 08, 2017, 01:17:39 PM Luc, It is an interesting approach you've taken to measure the input vs output work. And, again, impressive work! Do you plan to measure the input work and output work by measuring force over distance like you've done for your previous build? I think that is still beneficial. I thought about ways to measure the input work and one way to do it is by attaching a string on the outer diameter of the wheel and wrapping around the wheel then attach the string to your pull scale. This allows your to measure in linear force over distance. Regards, Alex Title: Re: TD replications Post by: Nonlinear on March 08, 2017, 01:37:56 PM Luc, This is again nice workmanship, but the concept is wrong because it can not prove anything. You have yourself recognized that there is at least 40% loss in the 2 motors alone, and then we still didn’t count the losses in the gear mechanism which will be a lot again, and the friction losses elsewhere in the machine. This is just a waste of effort really. With all this work and expense you could have made a purely mechanical feedback loop from the output to the input with much less loss. All you need to do is attach a large enough flywheel to the shaft that will store energy, and then drive it with the slider. This can be done by using 2 ratchet mechanisms like the ones in a bicycle rare wheel hub. This way you can utilize and rectify the strokes in both directions. The flywheel can drive the rotor. Since the expected torque from the slider will be greater than the one of the rotor, the slider torque will need to be fitted to the rotor torque to be in the right proportion. Title: Re: TD replications Post by: gotoluc on March 08, 2017, 03:41:08 PM A very impressive machine and remarkable craftsmanship! Thanks telecom HelloIt is an interesting machine.How is the input without the lamp? Lota Sorry lota, I don't understand your question. It is an interesting approach you've taken to measure the input vs output work. And, again, impressive work! Do you plan to measure the input work and output work by measuring force over distance like you've done for your previous build? I think that is still beneficial. I thought about ways to measure the input work and one way to do it is by attaching a string on the outer diameter of the wheel and wrapping around the wheel then attach the string to your pull scale. This allows your to measure in linear force over distance. Regards, Alex Thanks Alex Yes, I will do force over distance measurement as well. I agree! a string around the rotor would be a good way to measure distance and force at the same time. Luc Title: Re: TD replications Post by: gotoluc on March 08, 2017, 04:44:05 PM Luc, This is again nice workmanship, but the concept is wrong because it can not prove anything. You have yourself recognized that there is at least 40% loss in the 2 motors alone, and then we still didn’t count the losses in the gear mechanism which will be a lot again, and the friction losses elsewhere in the machine. This is just a waste of effort really. With all this work and expense you could have made a purely mechanical feedback loop from the output to the input with much less loss. Thanks for your comment. I don't agree that this won't prove anything. We will see. As for wasted expense, the only cost was20. for the gear head motor from a surplus salvage store. The rest I had on hand, even all the 3/4 inch plywood used to build the device was salvaged and free. You see, I live a very frugal life style, on a $100 a week, so I know not to waste. All you need to do is attach a large enough flywheel to the shaft that will store energy, and then drive it with the slider. This can be done by using 2 ratchet mechanisms like the ones in a bicycle rare wheel hub. This way you can utilize and rectify the strokes in both directions. The flywheel can drive the rotor. Since the expected torque from the slider will be greater than the one of the rotor, the slider torque will need to be fitted to the rotor torque to be in the right proportion. Don't underestimate what I know needs to be done to convert the mechanical output back to the input. Some of what you suggest would be needed but it's far more complex then what you suggest!... since the rotor could not just freewheel. Rotor and slider would have to be mechanically linked to keep the timing. I'm sure Alex also knows this. As well, he and I know it would benefit to slow down the rotor once the magnets are in ideal position to the slider magnets to deliver maximum force stroke. Regards Luc Title: Re: TD replications Post by: Floor on March 08, 2017, 11:32:28 PM @ all readers This latest addition to the magnets motion and measurements project, is a detailed explanation of how to calculate the work done by a force that is changing with distance. (like a magnetic force). Please find the attached file "MagnetForceIntegration 2.PDF" best wishes floor PS Nice Luc Title: Re: TD replications Post by: Cairun on March 09, 2017, 06:09:47 AM I'm sure Alex also knows this. As well, he and I know it would benefit to slow down the rotor once the magnets are in ideal position to the slider magnets to deliver maximum force stroke. Luc, Yes, you are exactly right about this. In order to achieve maximum output the input magnet has to come to a complete stop and wait for the output magnet to finish its stroke before the input magnet can move again. And vise versa, the output magnet has to stop and wait for the input magnet to finish its stroke before it can move to achieve minimal input work. A cam and follower(or track and follower)setup should allow us to mechanically link the input and output to create a self runner. I will attempt to model something up to show what I am referring to. Regards, Alex Title: Re: TD replications Post by: Nonlinear on March 09, 2017, 03:40:05 PM In order to achieve maximum output the input magnet has to come to a complete stop and wait for the output magnet to finish its stroke before the input magnet can move again. And vise versa, the output magnet has to stop and wait for the input magnet to finish its stroke before it can move to achieve minimal input work. In a generator designed to optimally utilize all the available excess energy at arbitrary speeds of rotation yes. One has to synchronize them and allow the slider to finish the complete stroke before the rotor is moved away. But if your aim is only to produce a closed loop as proof of concept and proof of excess energy, then one can accomplish the task in a simpler way. If there is really 60% excess energy, then the following device should be at least self running. The proposed operation is this (referring to the earlier version where force measurements were done): 1) The stroke length of the slider was already about 10 times shorter than the travel length of the rotor magnet. If you allow the slider to deliver its work even faster say 10 times faster than the speed of the rotor magnet, then the freely rotating rotor will travel only about 1/100th distance of the stroke during the movement of the slider. This is negligible, and it nicely approximates a perfectly synchronized rotor-slider. It is also possible that a slower movement of the slider would be also satisfactory. Like for example just let both slider and rotor move at the same speed. In that case the rotor would travel 1/10th of the rotor’s stroke distance while the slider completes its stroke. One can calculate how much efficiency gets lost this way and find an optimum, a compromise between practicality and ideal condition. 2) The synchronous operation can be guaranteed by using a toque brake on the shaft, and keeping the RPM of the rotor at sufficiently low level, so that the slider should be able to complete the stroke before the rotor travels a significant distance away from the synchronous position. The torque developed on the brake can be measured, just like the RPM, from which one can calculate the output power. 3) A large enough flywheel will absorb and smooth out any jerky movement, and contribute to the slow synchronous operation. 4) A timing latch could be utilized (similar to the one used in old pendulum clocks) to time and synchronize the release of the slider magnets at the right moments, only slightly before the rotor completely covers the slider magnet. 5) The linear bidirectional movement of the slider can be rectified and converted to unidirectional rotation using two bicycle hubs (or similar ratchet mechanism), one on each side. One on the left side drives the flywheel while moving forward, and the other on the right side drives it while moving backwards. 6) this way a continuous rotatory movement can be sustained, with an easy and handy way of measuring the output power. No need for accelerating and decelerating the output wheel, or stroke. But it would make sense to start designing such a machine only after precise reliable measurements prove the existence of at least 20-30% of excess energy. Anything below that would make it challenging to overcome the losses, and it would have no practical significance anyway. Title: Re: TD replications Post by: gotoluc on March 09, 2017, 04:10:57 PM Luc, Yes, you are exactly right about this. In order to achieve maximum output the input magnet has to come to a complete stop and wait for the output magnet to finish its stroke before the input magnet can move again. And vise versa, the output magnet has to stop and wait for the input magnet to finish its stroke before it can move to achieve minimal input work. A cam and follower(or track and follower)setup should allow us to mechanically link the input and output to create a self runner. I will attempt to model something up to show what I am referring to. Regards, Alex Hi Alex, User name TinselKoala made a suggestion of using a Scotch Yoke (1st pic) to convert the linear output to rotary. The problem with it is there's no rest time. However, I thought there could be a way to modify the Scotch Yoke to create a pause time and found a variation that does exactly that (2nd pic) Link to animation: http://www.mekanizmalar.com/uk012.html (http://www.mekanizmalar.com/uk012.html) The same site also has an Indexing mechanism (3rd pic). Link to animation: http://www.mekanizmalar.com/four-slot-two-pin-geneva-mechanism.html (http://www.mekanizmalar.com/four-slot-two-pin-geneva-mechanism.html) This mechanism may do what we need to turn the rotor in four segments of rotation, stop, lock and pause. As it is my magnet rotor only has 2 sets of magnets but I could add a second set without too much work and expense to advantage of the 4 position of this indexing mechanism. I like both of these mechanism instead of gears since I could cut them out of plywood with a router. Food for thought Luc Title: Re: TD replications Post by: citfta on March 09, 2017, 04:49:14 PM Hi Luc, Great videos as always from you. I do have a suggestion. The Geneva drive is an ingenious device but needs to be built with very close tolerances to work properly without it wanting to hang up. We had some CNC machines where I worked that had them as part of the automatic tool change mechanism. I am afraid it might not work well made from plywood. I think the modified scotch yoke would be much easier to get working properly. Respectfully, Carroll Title: Re: TD replications Post by: gotoluc on March 09, 2017, 04:56:39 PM Thanks Carroll for your input. If the suggestion was to be used I was thinking of a very large scale like 20+ inches in order to allow for toleration differences. Do you still think it's not possible? Regards Luc Title: Re: TD replications Post by: citfta on March 09, 2017, 05:59:31 PM Making it that large will certainly help with the precision problem. Most of the ones on the machines I worked on were about 15 to 20 inches in diameter as I recall. Of course they were used to move some pretty heavy tool change equipment so that made them more susceptible to problems. With your skill at building I think you can probably make a 20 inch one work. I would like to see one made from plywood. That would be impressive. They are pretty interesting to watch them work. Once they change to the next position they hold that position very accurately until told to change again. Respectfully, Carroll Title: Re: TD replications Post by: Cairun on March 09, 2017, 07:10:03 PM Hi Alex, User name TinselKoala made a suggestion of using a Scotch Yoke (1st pic) to convert the linear output to rotary. The problem with it is there's no rest time. However, I thought there could be a way to modify the Scotch Yoke to create a pause time and found a variation that does exactly that (2nd pic) Link to animation: http://www.mekanizmalar.com/uk012.html (http://www.mekanizmalar.com/uk012.html) The same site also has an Indexing mechanism (3rd pic). Link to animation: http://www.mekanizmalar.com/four-slot-two-pin-geneva-mechanism.html (http://www.mekanizmalar.com/four-slot-two-pin-geneva-mechanism.html) This mechanism may do what we need to turn the rotor in four segments of rotation, stop, lock and pause. As it is my magnet rotor only has 2 sets of magnets but I could add a second set without too much work and expense to advantage of the 4 position of this indexing mechanism. I like both of these mechanism instead of gears since I could cut them out of plywood with a router. Food for thought Luc Luc, I've modeled a track follower setup which captures the stop and go motion. This is a linear reciprocating design with both input and output magnets moving linearly. This video shows a the basic idea of a track follower setup. https://www.youtube.com/watch?v=wsqyiLaUw5g&feature=youtu.be (https://www.youtube.com/watch?v=wsqyiLaUw5g&feature=youtu.be) This setup does not take advantage of the self reset(because I just wanted to model quickly and show the basic idea), however, a self reset design can be achieved. I will have to think a little more about how to mechanically loop your latest build/setup. Regards, Alex Title: Re: TD replications Post by: gotoluc on March 09, 2017, 08:05:40 PM That mechanism looks great Alex ;) This kind of cam follower design I can make with a router. So that's the best to start with. Thanks for taking the time to help! Luc Title: Re: TD replications Post by: Cairun on March 10, 2017, 01:55:25 AM That mechanism looks great Alex ;) This kind of cam follower design I can make with a router. So that's the best to start with. Thanks for taking the time to help! Luc Luc, Thank you, I am glad I can help! The geometry of the track/cam may need some tweaking to enable a smoother motion, but this is a minor problem. The one thing that bothers me is that I am not sure of the efficiency of a cam and follower, more specifically the one that I've shown in the video. If anyone is familiar with the efficiency of a cam and follower, please chime in. If you print the cam/track out and glue the print onto a piece of plywood and then you can cut the track with a router. With your workmanship, I am sure you can make it ;) . But, if accuracy becomes too much of a problem, I can make the cam/track with my CNC mill after I get back from my travels. However, my CNC mill is quite small and can only make small parts. But I can always break larger components into smaller parts and assemble into a larger component later on. Let me know how you would like to proceed. I can design a cam/track with info provided by you and send you the drawing so you can attempt to make it, or I can make it when I get back. Regards, Alex Title: Re: TD replications Post by: Cairun on March 10, 2017, 02:12:43 AM The proposed operation is this (referring to the earlier version where force measurements were done): 1) The stroke length of the slider was already about 10 times shorter than the travel length of the rotor magnet. If you allow the slider to deliver its work even faster say 10 times faster than the speed of the rotor magnet, then the freely rotating rotor will travel only about 1/100th distance of the stroke during the movement of the slider. This is negligible, and it nicely approximates a perfectly synchronized rotor-slider. It is also possible that a slower movement of the slider would be also satisfactory. Like for example just let both slider and rotor move at the same speed. In that case the rotor would travel 1/10th of the rotor’s stroke distance while the slider completes its stroke. One can calculate how much efficiency gets lost this way and find an optimum, a compromise between practicality and ideal condition. 2) The synchronous operation can be guaranteed by using a toque brake on the shaft, and keeping the RPM of the rotor at sufficiently low level, so that the slider should be able to complete the stroke before the rotor travels a significant distance away from the synchronous position. The torque developed on the brake can be measured, just like the RPM, from which one can calculate the output power. 3) A large enough flywheel will absorb and smooth out any jerky movement, and contribute to the slow synchronous operation. 4) A timing latch could be utilized (similar to the one used in old pendulum clocks) to time and synchronize the release of the slider magnets at the right moments, only slightly before the rotor completely covers the slider magnet. 5) The linear bidirectional movement of the slider can be rectified and converted to unidirectional rotation using two bicycle hubs (or similar ratchet mechanism), one on each side. One on the left side drives the flywheel while moving forward, and the other on the right side drives it while moving backwards. 6) this way a continuous rotatory movement can be sustained, with an easy and handy way of measuring the output power. No need for accelerating and decelerating the output wheel, or stroke. But it would make sense to start designing such a machine only after precise reliable measurements prove the existence of at least 20-30% of excess energy. Anything below that would make it challenging to overcome the losses, and it would have no practical significance anyway. Nonlinear, Thank you for you detailed input. Your proposed design is a good way to to mechanically loop Luc's design. However, I think a cam and follower setup(like it is shown in this video[size=78%]https://www.youtube.com/watch?v=wsqyiLaUw5g (https://www.youtube.com/watch?v=wsqyiLaUw5g)[/size]) is simpler and better captures the motion of the operational sequence. Perhaps, I place too much emphasis on maximizing output and minimizing input, but every little bit helps ;) . Regards, Alex Title: Re: TD replications Post by: Drak on March 10, 2017, 04:57:37 AM Hi gotoluc, I think it would probably be best if you did not lock the output to the input. I would make sure that your output is able to ADD to the input instead of having to wait on the input before it can move. As long as they are in resonance with each other (the timing is correct) it should work. You would need a mechanical capacitor to store the energy but still have the rotor turn at the same speed (the hard part). You wouldn't be able to have it going faster then the slider can handle or it will go out of resonance like in your video when you have to adjust the speed to get the slider working at full swings. If you mechanically lock the output to the input it will be like trying observe an electron without disturbing it. They both need to move freely on their own. Just my thoughts. Great builds I love your work! drak Title: Re: TD replications Post by: Nonlinear on March 10, 2017, 02:16:34 PM Thank you for you detailed input. Your proposed design is a good way to to mechanically loop Luc's design. However, I think a cam and follower setup(like it is shown in this video[size=78%]https://www.youtube.com/watch?v=wsqyiLaUw5g (https://www.youtube.com/watch?v=wsqyiLaUw5g)[/size]) is simpler and better captures the motion of the operational sequence. Perhaps, I place too much emphasis on maximizing output and minimizing input, but every little bit helps ;) . Alex, Your cam follower design is good; it will allow the mechanism to rotate at higher speeds as well, while keeping the synchronous movement. Regarding efficiency though, I am not convinced that the cam follower would waste less energy than what I have proposed, because the roller bearings are wasting energy along the whole path of the tracks, which can get excessive at high speeds of rotation. But if there would be really 60% excess energy, then both designs should be able to at least self-run. The problem is not with the feedback mechanism, but rather with the claim of excess energy. Despite my original reluctance, I have forced myself to read through the other related threads of Floor, and now my suspicions of deliberate deception have been confirmed. There is definitely no excess energy in such purely permanent magnet arrangements, just as theory predicts. Lumen’s improved measurements have proven this already (in the now closed thread) here: http://overunity.com/14311/work-from-2-magnets-19-output-2/msg498010/#msg498010 and Stefan has closed the thread as well, because the subject is closed (no overunity). Despite the correct disproof, the agenda to mislead and deceive is still in full swing with a show of nice looking contraptions and fake (or grossly erroneous) measurement results. Another example of disproof is webby1’s attempt to convince Luc that he has to measure the 4th part of the cycle as well, in one of his earlier devices. He finally succeeded in this effort in post: http://overunity.com/16987/td-replications/msg496971/#msg496971 and Luc has reluctantly provided the data in: http://overunity.com/16987/td-replications/msg496974/#msg496974 When this 4th part of the cycle was taken into account, then Luc’s averaging calculations showed only 10% of excess work, which can be very easily attributed to the other bad measurement practices mentioned earlier. Without free energy being created, the whole show of nice devices and designs are nothing more than the shiny paint on the car from which the engine is missing (useless). As an illustration of this absurdity there is a famous example called overbalanced wheel. https://en.wikipedia.org/wiki/Perpetual_motion The gravitational field is conservative just like a spring. You can not get more continuous work from gravity than what you have to invest, and this is completely independent of the path of movement. One can not get more work out from a spring than what one has to invest while pressing it together. It doesn’t matter whether the spring is linear or nonlinear, it is still conservative and it is very easy to prove this. The magnetic field can be imagined as a net made of millions of tiny springs that react only with magnetic materials. It does not matter how complex net you form from such springs, they still remain conservative. The magnetic field is conservative, and Lumen has already proven this, but some people prefer to ignore this fact. I know that it is possible to create overunity generators, for example cold fusion is one of them. Accurate measurements performed by qualified physicists prove that. But purely permanent magnet arrangements will definitely not produce overunity. Therefore I will not post on this subject for a while, because now my interest is only in observing the psychology of deception. I will just sit back and observe how far a hoax can go before some readers get fed up with the nonsense and start kicking some butts. When the whole thing blows over, then I will come back to say: “I told you so… didn’t I” ;D Title: Re: TD replications Post by: citfta on March 10, 2017, 02:47:34 PM Nonlinear, You are way out of line. I don't know Floor that well but I have known Luc for years. To accuse him of deliberately misleading is very wrong. Luc is a dedicated researcher looking for the truth. He has tried to follow any suggestions from anyone to make his measurements more accurate. He has not claimed OU anywhere that I am aware of. He only presents the results of his tests. When he sees results that look promising he will pursue those results until he is convinced they do not lead to an OU device. That is research, not deception. You owe him an apology for suggesting he is deliberately misleading others. As far as OU goes, you are certainly entitled to your opinion. I worked in electronics for over 50 years and have seen several times things that left me scratching my head. So I do believe OU MAY be possible. I am not convinced it IS possible nor am I convinced it is NOT possible. So I continue with my own research and follow the research of others like Luc that are willing to share their efforts. Respectfully, Carroll Title: Re: TD replications Post by: Cairun on March 10, 2017, 02:49:40 PM Nonlinear, Luc's current design is different than the original TD setup. I will perform measurements to help verify Luc's measurement results after I get back from my business travels(hopefully I will be back in about 3 weeks). Regards, Alex Title: Re: TD replications Post by: webby1 on March 10, 2017, 03:34:41 PM I do not see any deliberate deception, rather what I see is a lack of experience. This very lack of experience can allow for tests to be run that others, with more experience, may consider useless or absurd. These tests may have not been tried before because to those with more experience they seem to be "useless". In time I think that Luc will think about having the output motor spin in only one direction and allow that motor to freewheel and add a large flywheel to the motor shaft,, after all, that rate of acceleration coming from the slider mechanism is fairly impressive and spring stops could conserve more of that mass in motion energy if it is not stored within the flywheel,, and things will go from there :) Will it work?? you do not know until you do. Title: Re: TD replications Post by: gotoluc on March 10, 2017, 04:13:30 PM Well Nonlinear There you have it, you're confused and creating confusion. It's obviously you haven't read all the topics and posts to come to the conclusions in your last post. What I'm working on at present is different then what Floor originally proposed and I first tested. Here is a link to my last report (posted Feb. 8th 2017) on my presision tests done to Floor's twist drive concept: https://www.youtube.com/watch?v=MMqBISjwieY (https://www.youtube.com/watch?v=MMqBISjwieY) And here is my new device concept which was posted on the same day introducing my own design which I named "Mechanical Magnet Torque Amplifier": https://www.youtube.com/watch?v=oUlDMY1iE5A (https://www.youtube.com/watch?v=oUlDMY1iE5A) This is the design that is being discuss and tested at this time and is not related to the old information in your post above. Your error may cause others to question your integrity and reasons for being here. We will see Luc Title: Re: TD replications Post by: Floor on March 10, 2017, 04:40:05 PM @ All readers I threw up..... a new video. It shows an effective magnet shield in action. http://www.dailymotion.com/video/x5eg7kk_magnetshield-1_tech As far as I'm concerned, this is all open source and public domain. All in common...that's the only real over unity there is. Thanks for every thing Luc. Peace... Out floor Title: Re: TD replications Post by: Floor on March 10, 2017, 04:50:21 PM @ Allreaders QUOTE from Nonlinear "Stefan has closed the thread as well, because the subject is closed (no overunity). Despite the correct disproof, the agenda to mislead and deceive is still in full swing with a show of nice looking contraptions and fake (or grossly erroneous) measurement results. " END QUOTE That thread was not "closed", but rather it was locked at my (floor's) request. Title: Re: TD replications Post by: webby1 on March 10, 2017, 04:59:34 PM I am throwing this out deliberately vague. The force manifested between the slider and disc is independent of any existing velocities. Title: Re: TD replications Post by: Nonlinear on March 10, 2017, 08:41:09 PM Luc's current design is different than the original TD setup. Quote from: gotoluc There you have it, you're confused and creating confusion. It's obviously you haven't read all the topics and posts to come to the conclusions in your last post. What I'm working on at present is different then what Floor originally proposed and I first tested. Well, you fellas don’t get it, do you? I’m fully aware that Luc’s magnet arrangement is slightly different than the original of Floor. It absolutely does not matter along which path you move or twist the two magnets in relation to one another, they still behave like a system of passive and conservative springs. If the force is greater in one direction, then the path to travel will be shorter, and vice versa. If you accurately measure and correctly integrate the total work, you will see that there is no overunity. Not in Floor’s design, not in Luc’s design, not in Joe’s, Fred’s, and Julie’s designs, not in anybodies designs of purely passive permanent magnet systems. The difference between Floor’s and Luc’s versions are analogous to the difference between this design of Bhaskara’s overbalanced wheel: http://www.trevorbaylisbrands.com/tbbnew/technology/perpetual/unbalanced.asp and this modified version called Chain Drive Gravity Machine: http://pesn.com/2012/01/05/9602001_Free_Energy_Chain_Drive_Gravity_Machine_Open_Source_Project_Launched/ The crackpot line of thinking goes like this: Quote I have built the Bhaskara wheel, and damn… it doesn’t work. But wait! I am smarter than Bhaskara was, I will design a chain drive instead, which is completely different and therefore it has nothing to do with Bhaskara’s failed idea. Then someone who knows physics comes along, and tells the new inventor that his gadget, which is trying to use the same principle of overbalanced weights (as Bahaskara did) to extract free energy from the conservative field of gravity will not work. This can not possibly extract free energy form the conservative field of gravity, just like Bhaskara’s wheel didn’t work, because they both are basically and principally the same. But our zealous inventor accuses the commenter that he is totally confused and spreads confusion, because he can not even see that the chain drive is totally different from the Bahskara’s drive. Therefore, he must be a crackpot, lacking any integrity, so the audience should despise the commenter and applaud the inventor. LOL. ;D Quote Your error may cause others to question your integrity and reasons for being here. Oh, really? I am so ashamed that my stupidity and utter ignorance did not allow me to see the difference between your design and Floor’s design. LOL … LOL … LOL ;D In fact this implicit call of yours that others should question my integrity is one sign of mean agenda and deliberate deception, but there are many more such signs. I have a long list of such signs and symptoms collected during my reading the threads of Floor and this one, which all together indicate deliberate deception. I don’t claim such a thing lightly, but I do that only because all the telltale signs are present, which are characteristic of an organized hoax. I don’t want to post this list of symptoms (yet) because it would only help the culprits to refine their methods of deception. If the readers can read between the lines, know some physics, and observe the actions and reactions of the participants, then they will be able to see what I mean. But, if one is a staunch believer in crackpottery, and despite my warnings still believes there is 60% overunity in this system, then he should build the machine himself, and wake up to the reality the hard way. OK, I said earlier that I will not comment on this subject for a while, but this issue needed to be clarified first. I will withdraw now and observe the show. Will be back at the end of the performance, and respond to the accusations and slander that will be probably aimed at me while I am not around to defend myself. ;D Title: Re: TD replications Post by: dieter on March 10, 2017, 09:53:17 PM Nonlinear, so you say ferromagnetism and gravity is the same. I would not agree with that. There is a certain basic understanding of magnets, the fridge magnet level. But the longer you really observe and investigate magnetism, the more you'll see that there's more to it. The fact that we used Teslas design without to improve it for over 100 years shows, how closedminded the establishment really is. Who would finance development of energy-efficient machines when energy is the most lucrative economy in the world? And as soon as something is against the establishment, any pro will drop it immediately. Which is why there are up to this day incredibly simple ways to violate the law of energy conservation, completely unnoticed by mainstream science. I asked this elsewhere too, please explain me the following: a certain exact DC pulse on a coil will repell the coil further away from a PM, the stronger thw PM is. Where does that additional energy come from? Title: Re: TD replications Post by: ARMCORTEX on March 10, 2017, 10:42:21 PM Nonlinear, so you say ferromagnetism and gravity is the same. I would not agree with that. There is a certain basic understanding of magnets, the fridge magnet level. But the longer you really observe and investigate magnetism, the more you'll see that there's more to it. The fact that we used Teslas design without to improve it for over 100 years shows, how closedminded the establishment really is. Who would finance development of energy-efficient machines when energy is the most lucrative economy in the world? And as soon as something is against the establishment, any pro will drop it immediately. Which is why there are up to this day incredibly simple ways to violate the law of energy conservation, completely unnoticed by mainstream science. I asked this elsewhere too, please explain me the following: a certain exact DC pulse on a coil will repell the coil further away from a PM, the stronger thw PM is. Where does that additional energy come from? You are an idiot, if it was simple I would have seen it, I have watched this forum since 2009. If it was simple, I would not see the same old gang try russian coils for 5 years now on the Kapanadze forum. I am ready to say, almost impossible, extremely difficult, and now even more so as the misinformation machine is fully oiled and greasy. John Bedini was a fraud, the guy did not achieve OU. And this device was stupid to begin with, another plywood idea from gotoluc. If only a nice build was done in metal, with gearing a precision made CAM to get the timing right But its always the idiots way of making things, wood, no precision, no design research, no machine shop quotes. A bunch of silly ass talking idiots wankin around instead of giving money to gotoluc so that we can definitly cross out that idea. They will however talk their ass off as soon as they see it might not be working, what have you proposed that is better than gotoluc? Nothing... What have you done? Nothing... Talkers, not walkers... Gotoluc at least is a walker, texas ranger. Fucking ass lemmings dont even help gotoluc, the man should not even help you assholes anymore. Title: Re: TD replications Post by: webby1 on March 10, 2017, 11:23:51 PM Well, you fellas don’t get it, do you? The crackpot line of thinking goes like this: OK, I said earlier that I will not comment on this subject for a while, but this issue needed to be clarified first. I will withdraw now and observe the show. Will be back at the end of the performance, and respond to the accusations and slander that will be probably aimed at me while I am not around to defend myself. ;D The tone of your diatribe and accusations sounds very familiar,, Floor, Luc,, you should take solace in the understanding that if the "list" was applied to many from the past you would be on a list including the likes of Tesla. Title: Re: TD replications Post by: webby1 on March 11, 2017, 12:02:23 AM I asked this elsewhere too, please explain me the following: a certain exact DC pulse on a coil will repell the coil further away from a PM, the stronger thw PM is. Where does that additional energy come from? Hi Dieter, I think you would be beter served if you answered your own question. I will ask you a few question. What is equal and opposite? Are the interactions for all parts the same by distance? You should be able to run a few tests and then answer your question for yourself. Title: Re: TD replications Post by: Cairun on March 11, 2017, 01:32:20 PM Nonlinear, Well, you fellas don’t get it, do you? I’m fully aware that Luc’s magnet arrangement is slightly different than the original of Floor. It absolutely does not matter along which path you move or twist the two magnets in relation to one another, they still behave like a system of passive and conservative springs. If the force is greater in one direction, then the path to travel will be shorter, and vice versa. If you accurately measure and correctly integrate the total work, you will see that there is no overunity. Not in Floor’s design, not in Luc’s design, not in Joe’s, Fred’s, and Julie’s designs, not in anybodies designs of purely passive permanent magnet systems. It is because we can see the profound difference between and potential of Floor's and Luc's designs that we continue with our support. Minor differences in a design can mean the difference between a working product and a failed product. Maybe you are unable to see the difference or you simply refuse to see the difference. I believe your intentions at an earlier time were good, so please stop with the defamation of Floor and Luc and provide constructive criticism instead. Perhaps, you think that your earlier input was not well received, but Floor and Luc can only do so much at any given time. They are offering their knowledge to the public without asking for anything in return, deliberate deception will not amount to any gain for them. Quote If the readers can read between the lines, know some physics, and observe the actions and reactions of the participants, then they will be able to see what I mean. But, if one is a staunch believer in crackpottery, and despite my warnings still believes there is 60% overunity in this system, then he should build the machine himself, and wake up to the reality the hard way. It is a good suggestion that anyone able should experiment themselves to see if an idea will work or not. I will certainly conduct independent measurements to verify Luc's measurement results. Regards, Alex Title: Re: TD replications Post by: Floor on March 12, 2017, 12:44:20 AM @ Cairun Nice concept / design...very nice. thanks for sharing floor Title: Re: TD replications Post by: Floor on March 13, 2017, 02:54:33 AM @ Gotoluc I think you / we have done... really a superior job, especially given that the experiments and presentations have been done on the fly, and in a public forum as well ! As far as I am aware of, and as of this point in time... ALL points of have been well covered by us without our objection in terms of the full .... presentation of the devices used presentation of the methods of measurement presentation of the mathematics used answering all posed questions with our goal.... the legitimate satisfaction by the questioner. We have acknowledged all of the apparent dead ends with out dispute. We have openly discussed all advice and all suggested approaches to our processes. We have pursued and achieved significant improvement in the designs. And at this point we are about to make some real progress in terms of coming to a clear and valid determination of a margin of error. This was the point we had at already arrived at and were preparing for by GotoLuc's larger build (before the recent flaming outbreaks). no big deal Greater forces and greater displacement, can give a greater precision and reduce the margin of error. wow... its all, still intact, still progressing and still its amazing..... cheers ! floor Title: Re: TD replications Post by: gotoluc on March 13, 2017, 06:56:18 PM Hi Floor and everyone Over the weekend I setup a scale measuring apparatus to the v.2 magnet torque amplifier as I was getting way under unity from the generators output. After 10 hours of detailed scale samples and calculations to my surprise the input force for one cycle is exactly the same as the output force :( It was hard the believe since the first model sowed around 60% gain. So this morning I decided to re-measure the first device with the most care to details. Now the first device is showing a 10% gain which could be caused by accumulative errors from the 5 gram resolution scale. What I found could of cause the 60% gain error is by using a different input rotor magnet then the one used for the 11mm output. I check the rotor magnets and found they have different magnetization force. So most likely that's what happened plus the scale resolution problem. Sorry but looks like this configuration also has no gain. Not a big loss (other then time) as the v.2 costs were the magnets and sliders$90. and I can still use them on other experiments. Kind regards Luc Title: Re: TD replications Post by: Floor on March 13, 2017, 11:12:52 PM @ GotoLuc OK thanks Luc floor Title: Re: TD replications Post by: dieter on March 14, 2017, 03:08:27 AM Armcortex and Webby, why are you quoting and attacking me, just to disgustingly suck up to Gotoluc? I just trued to support his point. But you even didn't get that. And you could not answer my question. Just some angry ejaculation of bs, like a mental kid in a sandbox, parroting his violent parents. Go seek the responsibles for your traumata, but get off my back. Title: Re: TD replications Post by: dieter on March 14, 2017, 03:17:49 AM Hey Luc, sorry bout that offtopic steam... Kudos for having the balls to report the outcome. This serious and rational behaviour helps all of us. Title: Re: TD replications Post by: webby1 on March 14, 2017, 05:53:07 AM Armcortex and Webby, why are you quoting and attacking me, just to disgustingly suck up to Gotoluc? You asked a question and I proposed that you would benefit more if you answered your own question and then tried to suggest how to view the setup and how to interpret what you could see IF you looked at it differently than you are. Have you found the answer to your question?  another hint,, the coil needs something to push against. Title: Re: TD replications Post by: dieter on March 14, 2017, 08:39:55 AM Webby, as long as you give mysterious hints, you seem more like a wannabe teacher. Answering questions with a question is also symptomatic btw. Title: Re: TD replications Post by: Cairun on March 15, 2017, 01:11:51 PM Luc, Thanks for sharing your latest test results, it takes great courage to do that. Even though the test results are not what we have hope it would be, it is still an advancement to our knowledge. Regards, Alex Title: Re: TD replications Post by: gotoluc on March 15, 2017, 04:55:15 PM Luc, Thanks for sharing your latest test results, it takes great courage to do that. Even though the test results are not what we have hope it would be, it is still an advancement to our knowledge. Regards, Alex Thanks Alex, I'm happy to help by sharing what I find, even if the results are not favorable. It's nothing out of the ordinary for me!...  I've been doing the same for the past 10 years. What fuels me is hope that one day we find an energy solution for those in need. Not for fame or fortune. Thanks for your willingness to help Luc Title: Re: TD replications Post by: dieter on March 15, 2017, 11:22:17 PM When I first read "Twist Drive" I thought it would utilize sheering force, rather than attraction / repulsion. Maybe it does? There is a force, turning a parallel 2nd magnet. When stopped at 90deg, it can be removed fro the 1st magnet without force, eg. by gravity. Then again by gravity it can brought in parallel position. The torque of the sheering is significantly higher than the gravity force alone. Maybe that is also a TD. I have made a little Toy to demonstrate it, maybe I'll post a picture later. Title: Re: TD replications Post by: Floor on March 18, 2017, 12:09:35 AM @Gotoluc I'm not even close to being done with the PMs yet. Your last design was,   I guess,    near unity,  don't really know though?.  My own examinations of interactions similar to that design left me with no understanding of why that design should  have been more than unity.  Although I did let myself get a bit carried away with your initial report. From 60 % plus to 10% plus is a major oversight.  Can you give us some details of that over sight ? also Your energy, enthusiasm and many hours of work in the shop are much appreciated. regards floor Title: Re: TD replications Post by: Floor on March 18, 2017, 02:35:24 PM @Gotoluc Notes.. The integration of the work done in the inputs via the rotating bicycle rim experiment.... 1. position magnet by rotation above the sliding magnet 2. remove magnet by rotation from near the sliding magnet may together (attractions and repulsions) come to a net work of less than either 1. or 2.  alone (just above)...... except that their peak forces were not matched / canceling one the other out. Other wise your complete set might have shown some OU ? floor Title: Re: TD replications Post by: gotoluc on March 18, 2017, 05:03:31 PM @Gotoluc I'm not even close to being done with the PMs yet. Great to hear!... please make a video demo once you have found something so I can evaluate it as well From 60 % plus to 10% plus is a major oversight.  Can you give us some details of that over sight ? This link to the below quote explained the oversight: http://overunity.com/16987/td-replications/msg501737/#msg501737 (http://overunity.com/16987/td-replications/msg501737/#msg501737) this morning I decided to re-measure the first device with the most care to details. Now the first device is showing a 10% gain which could be caused by accumulative errors from the 5 gram resolution scale. What I found could of cause the 60% gain error is by using a different input rotor magnet then the one used for the 11mm output. I check the rotor magnets and found they have different magnetization force. So most likely that's what happened plus the scale resolution problem. So to revise what may of caused the 60% gain error. 1. I must of use a different rotor magnets to measure the input force then the output force when I first measured the v.1 device. Seems this alone can cause a 30% difference. I was surprised of how much each rotor magnet vary in force. 2. The scale I use are 20kg max luggage scale. It has a 5 gram resolution. The rotor input force of the first device range from 1g to 85g max. However, the scale only starts to display at 15g and above. So I figure it's unsuitable for accuracy when measuring below 100 grams. I would estimate the math averaging on the input of the first device could be off by 10 to 25% based on this resolution issue alone. ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- The below are the measurements done on the v.2 device The input rotor force measured between 110g to 2.4kg, so very good data was obtained as far as the scales resolution ability. Half of the input rotor distance (to make one output stroke) is 16 1/8 inches of circumference. Samples were taken at every 1/8 inch distance, so 129 input distance samples were recorded in total!... giving a very good input average calculated to be 1.1kg over the 16 1/8 inch half rotor circumference. The output force was adjusted to slide 5 inches of distance. The gram pull force measurements varied between 2.3Kg to 14.5Kg. 40 samples were taken at every 1/8 inch making an average of 6.46Kg over the 5 inch output stroke. I've just realized I made an error (a few days back) on my final math!!!... I had the calculations of the rotor input engaging and disengaging averages calculated separately and added them together 1.24Kg + 0.957Kg = 2.2Kg but the error is, the 2.2Kg should then be divided by 2 = 1.1Kg to get the correct input rotor average over the 16.125" for half of the rotor circumference as I correctly did above. So if we take the 16.125" rotor input travel distance and divide it by the 5" output slider distance = 3.23 times more distance the input rotor needs to travels at 1.1Kg average compared to the 5"output distance at 6.46Kg average. So if we multiply the input average 1.1Kg x 3.23 times =  3.55Kg of comparable input force to distance needed compared to the output. Now if we subtract this 3.55Kg of comparable input force to the 6.46Kg output force = 2.91Kg left over which is a 82% Gain over the Input. Your energy, enthusiasm and many hours of work in the shop are much appreciated. regards floor Thanks Can someone look over my calculations to see if the reasoning looks to be correct. Regards Luc Title: Re: TD replications Post by: Floor on March 18, 2017, 06:56:18 PM @Gotoluc Ok,  ha ! interesting.... Well I'll contemplate all this till it sinks in well / then give you some feed back, when I can. regards floor Title: Re: TD replications Post by: dieter on March 19, 2017, 03:05:17 AM I was just saying - let us verify the negative results with the same care like the positive ones. This is great news, Luc. Title: Re: TD replications Post by: Cairun on March 19, 2017, 01:00:12 PM Luc, This is very encouraging news indeed. I tried to wrap my head around this, and the attached Excel spreadsheet is the result. Work is just force x distance and I calculated an 82% excess output work. Hopefully, this can help others to understand it better as well. Regards, Alex Title: Re: TD replications Post by: gotoluc on March 19, 2017, 03:47:39 PM Thanks Alex Looks very good I'll be remeasuring the output and doubling the 40 samples to 80 sample over the 5 inch stroke. We'll see if  that changes anything. I'll be posting all the sample data soon. Regards Luc Title: Re: TD replications Post by: gotoluc on March 19, 2017, 06:06:55 PM I've attached a xls file of my v.2 Magnet Torque Amplifier measurement data which was prepared by user webby1 Measurement Steps: First Step: Output slider is in rest position (already delivered output force) and is locked in position by adding a wood block. In this position the input rotor magnet is in most attraction to the output slider magnet and this is the beginning point of the scale pull force measurements in grams. Second Step: Input Magnet Rotor is pulled from the rotors outer edge using a flexible steel strapping tape. Steps 1 to 129 is a total of 16.125 linear inches of the magnet rotor outer edge travel (180 degrees) with scale pull force samples taken every 0.124 inches. 129 samples in total. The first 57 Samples is the pull force needed to Disengage the rotor magnets from the attraction force of the output slider magnets. Then samples 58 to 74 (in red) is the rotor magnet being temporarily attracted to the output slider magnets and why they are a negative (additive force) to the rotor. Finally, sample 75 to 129 is the remaining pull force needed to position the rotor magnets in ideal resting position for the output slider magnets deliver maximum force. Third Step: Input Rotor is locked at this position which is 16.125 inch, 180 degrees from beginning measurement position. The Output slider is released and the output slider magnets pull force is measured over its 5 linear inches (taken every 0.125 inch) of the magnet sliders output travel force. Please note the input rotor and output slider force has been measured 180 degrees of input rotor which delivers one output stroke. I have not bothered (at this time) to measure the other 180 degrees since in theory it should be a mirror image of the prior. Regards Luc Title: Re: TD replications Post by: gotoluc on March 19, 2017, 07:03:56 PM The below are input and output charts Luc Title: Re: TD replications Post by: woopy on March 20, 2017, 12:16:44 AM Yep Luc Fantastic work If i understand well, the measurement begins after the stroke ends,  when the  the rotor and slider are at full stop. I enclose a rotating drawing made on your graph, so correct me if i am wrong Seems that  i have to order some magnets and sliding bearings  tomorrow he he !!  :) Thank's so much for sharing Laurent Title: Re: TD replications Post by: gotoluc on March 20, 2017, 04:45:38 AM Bonjour Laurent It's always a pleasure having you participate Your understanding of the test device and timing drawings are perfect. Thank you for posting it. Looking forward to your build and test results Kind regards Luc Title: Re: TD replications Post by: webby1 on March 21, 2017, 04:23:24 PM It was brought to my attention that I made an error in the spreadsheet,, my sum total for some of the columns  did not include the complete column. I do believe I have fixed that. It now shows a gain of 70.26% Title: Re: TD replications Post by: woopy on March 21, 2017, 05:23:33 PM Hi webby and Luc Just received my ferrites magnets this morning. To Luc I am planning a setup and i rewatched your  video on the V2 device for details. It seems that you say that the rotor diameter is 22 inches so the outer rotor distance is 22 x 3.1416 = about 69 inches, so half distance (180 degrees)  is 69 / 2 = about 34.5 inches. So i don't see where the 16.125 inches (in your last calculation for 180 degrees ) are coming from ? Have you installed a second rotor for the measurement or i am missing something ? To Webby Thank's for input Sorry if you have already done it, but may i ask you to explain how you get the Joules datas from the colum " gram pull" Laurent Title: Re: TD replications Post by: webby1 on March 21, 2017, 05:43:17 PM To Webby Thank's for input Sorry if you have already done it, but may i ask you to explain how you get the Joules datas from the colum " gram pull" Laurent 1 joule is 1N of force for 1m of distance,, the step that Luc is using is 0.125 inch, I convert that into meters and the grams into N,, so 0.003175m per increment for distance and 0.00980665N per gram. I should of included the distance information within the spreadsheet better,, there is column A that has step inch and then step m and I should of noted that this was the step distance per measurement. If desired I can add that verbiage. Title: Re: TD replications Post by: webby1 on March 21, 2017, 06:11:47 PM I have added some words to hopefully clarify what is in the spreadsheet. Please let me know if any other changes are needed or wanted. Title: Re: TD replications Post by: gotoluc on March 21, 2017, 06:40:14 PM Hi webby and Luc Just received my ferrites magnets this morning. To Luc I am planning a setup and i rewatched your  video on the V2 device for details. It seems that you say that the rotor diameter is 22 inches so the outer rotor distance is 22 x 3.1416 = about 69 inches, so half distance (180 degrees)  is 69 / 2 = about 34.5 inches. So i don't see where the 16.125 inches (in your last calculation for 180 degrees ) are coming from ? Have you installed a second rotor for the measurement or i am missing something ? To Webby Thank's for input Sorry if you have already done it, but may i ask you to explain how you get the Joules datas from the colum " gram pull" Laurent Oh no Laurent, I now see a terrible error!... the 16.125 inches was for each section of disengage then engage. So the 16.125 inches would have to be multiply by 2 = 32.25 inches of outer rotor traveled for 180 degrees, making the rotor 20.53 inches in diameter and the device under unity from the calculated math. So the input rotor traveled 6.45 times the distance of the output. So input average is 1.1Kg x 6.45 = 7.1Kg input to 6.25Kg of output so under unity by about 12% I'm so sorry for your trouble and expenses. At everyone, please accept my apology for the trouble my over site may of caused Kind regards Luc Title: Re: TD replications Post by: woopy on March 21, 2017, 07:35:53 PM Hi Luc Yep, don't worry at all, we are here to test everything possible under the "trial and error" rule for the doer. Furthermore ferrite magnets are not expensive, and i will anyway use them for other stuff. Ah those magnets will keep the mystery  for some more time. Anyway thank's for sharing your work and for your determination in searching new way for the future. Keep going on, as i will, once will be perhaps  the....... Hi webby Good night at all Laurent Title: Re: TD replications Post by: webby1 on March 21, 2017, 09:28:45 PM Well a 22 inch pulley would then have an 84 degree rotation,, looking at the sneak peek that might be about correct. The free play between covering the magnets could be partially balanced out force wise,, just sayin :) Title: Re: TD replications Post by: webby1 on March 22, 2017, 01:39:06 AM 0.409575m            pull distance left 3.132728958J       energy left 7.6487308991N    newtons to pull distance left,, J/distance left 779.95G                 grams average pull Just for completeness,, this is what it needs to be unity. Title: Re: TD replications Post by: Cairun on March 22, 2017, 01:49:43 PM Oh no Laurent, I now see a terrible error!... the 16.125 inches was for each section of disengage then engage. So the 16.125 inches would have to be multiply by 2 = 32.25 inches of outer rotor traveled for 180 degrees, making the rotor 20.53 inches in diameter and the device under unity from the calculated math. So the input rotor traveled 6.45 times the distance of the output. So input average is 1.1Kg x 6.45 = 7.1Kg input to 6.25Kg of output so under unity by about 12% I'm so sorry for your trouble and expenses. At everyone, please accept my apology for the trouble my over site may of caused Kind regards Luc Luc, It seems to me, based on your description of measurement steps, that you've accounted for all the input work required for one output stroke. Your measurement range only needs to be in between when the rotor magnet first feels a magnetic force and when it last feels a magnetic force from the slider magnet. The graphs from the Excel spreadsheet shows that the pull forces diminishes to almost zero on both ends which indicates that you've accounted for the full range. Any distances beyond that range should be free wheel. Regards, Alex Title: Re: TD replications Post by: gotoluc on March 22, 2017, 02:42:25 PM Luc, It seems to me, based on your description of measurement steps, that you've accounted for all the input work required for one output stroke. Alex Yes Alex, I've accounted for all the input work required to produce one output stroke. Your measurement range only needs to be in between when the rotor magnet first feels a magnetic force and when it last feels a magnetic force from the slider magnet. Alex Yes, I understand what you are saying but I don't see that helping as it would not account for the area of rotation where the rotor is being assisted by attraction to the slider, (negative red data) making the result even worse. The graphs from the Excel spreadsheet shows that the pull forces diminishes to almost zero on both ends which indicates that you've accounted for the full range. Any distances beyond that range should be free wheel. Alex I think there is a miss understanding. A full rotation is 360 degrees and I measured 180 degrees. There are 2 output strokes for 1 rotor rotation. I measured the first 180 degrees of the rotor which created 1 output. The balance of the other 180 degrees is for the second output stroke and as I wrote before should be a mirror image of the first measured 180 degree. So how can that be free wheeling?... the same work would have to be put into the rotor to complete the 2nd output. Kind regards Luc Title: Re: TD replications Post by: webby1 on March 22, 2017, 03:55:41 PM You measured for 90 degrees, not 180 but to get to the second output you need to keep turning the rotor another 90 degrees,, if this additional 16.125 inches of pull is for less than an average of 780 grams there is a gain,, more than 780 grams it has a loss. To complete the measurements you would need to measure from start of engage to start of engage, you have the first half done. You did say you were planning on a full 360 degrees of pull measurements,, keep the output at your 40 steps. Title: Re: TD replications Post by: webby1 on March 22, 2017, 04:08:08 PM My prediction on the rest of the turn is that the rotor will see another small area of negative force, it will then ramp up in positive force needed and ramp back down to low force needed and then back up as engage starts. Title: Re: TD replications Post by: gotoluc on March 22, 2017, 04:27:28 PM Dear Webby1 I am re-posting the perfect 5 step illustration Woopy has added at the bottom of the chart you provided. Do you not see it illustrate a 180 degrees of rotor travel? The 16.125 inches of rotor distance is 90 degrees worth, so as I previously wrote 180 degrees rotor distance is twice that, being 32.25 inches and 64.5 inches for the complete rotor circumference. Regards Luc Title: Re: TD replications Post by: gotoluc on March 22, 2017, 04:44:53 PM Hi Luc Yep, don't worry at all, we are here to test everything possible under the "trial and error" rule for the doer. Furthermore ferrite magnets are not expensive, and i will anyway use them for other stuff. Ah those magnets will keep the mystery  for some more time. Anyway thank's for sharing your work and for your determination in searching new way for the future. Keep going on, as i will, once will be perhaps  the....... Hi webby Good night at all Laurent Bonjour Laurent, You may want to test Floor's recent suggestion of magnet shielding: http://overunity.com/17097/magnet-force-shield/msg502025/#msg502025 (http://overunity.com/17097/magnet-force-shield/msg502025/#msg502025) Floor's video demo: http://overunity.com/17097/magnet-force-shield/msg502025/#msg502025 (http://overunity.com/17097/magnet-force-shield/msg502025/#msg502025) Kind regards Luc Title: Re: TD replications Post by: webby1 on March 22, 2017, 05:45:13 PM Dear Webby1 I am re-posting the perfect 5 step illustration Woopy has added at the bottom of the chart you provided. Do you not see it illustrate a 180 degrees of rotor travel? The 16.125 inches of rotor distance is 90 degrees worth, so as I previously wrote 180 degrees rotor distance is twice that, being 32.25 inches and 64.5 inches for the complete rotor circumference. Regards Luc Luc, How can the 16.125 inches of pull data that you collected be 180 degrees? Since it can not be 180 degrees then the illustrations that were added can not be correct.  They should then have the rotor magnet at 45 degrees to the slide magnet, then rotate 45 degrees (8.125 inches of pull) so they are now in the output position and the slide released, then the slide is held and the rotor is turned another 45 degrees (8 inches of pull) thus covering the 90 degrees of pull distance you measured. Now if the 16.125 inches of pull took the rotor magnets from 90 degrees to the slide magnets and placed them in the output position and then the slide was released and then you pulled another 16.125 inches for the next 90 degrees of rotor rotation and if that had the same force measurements as the first 16.125 inches of pull,, THEN your assumption is correct This is where I am confused. Title: Re: TD replications Post by: gotoluc on March 22, 2017, 08:53:36 PM Luc, How can the 16.125 inches of pull data that you collected be 180 degrees? Since it can not be 180 degrees then the illustrations that were added can not be correct.  They should then have the rotor magnet at 45 degrees to the slide magnet, then rotate 45 degrees (8.125 inches of pull) so they are now in the output position and the slide released, then the slide is held and the rotor is turned another 45 degrees (8 inches of pull) thus covering the 90 degrees of pull distance you measured. Now if the 16.125 inches of pull took the rotor magnets from 90 degrees to the slide magnets and placed them in the output position and then the slide was released and then you pulled another 16.125 inches for the next 90 degrees of rotor rotation and if that had the same force measurements as the first 16.125 inches of pull,, THEN your assumption is correct This is where I am confused. That's where the error was. It never was 16.25 inches!!!... it was twice that!, being 32.5 inches. Do you understand now? Luc Title: Re: TD replications Post by: webby1 on March 22, 2017, 09:31:13 PM I have 129 steps times 0.125 = 16.125 I understand that the rotor has a 20.53 inch diameter. At this point I am assuming that your 129 measurements are for the engage only. Could it be that with the magnets at a lower potential at the start of disengage it takes less work to move the rotor to where you started measuring the engage process? I think it will,, but how much?  enough to show a gain? not sure. Title: Re: TD replications Post by: gotoluc on March 22, 2017, 10:50:01 PM I have 129 steps times 0.125 = 16.125 Yes, I see your point. I'm going to go over it all to see how that part could of happened but maybe it's as simple as the 129 samples were taken every .25 inches and I thought it was .125 inches? I'll go over it to try to figure it out but for sure there's no error with 129 samples for 180 degrees. Regards Luc Title: Re: TD replications Post by: webby1 on March 22, 2017, 11:02:30 PM Well at .25 that has your input at 8.92J and the output still at .125 staying at 7.59J Title: Re: TD replications Post by: gotoluc on March 22, 2017, 11:16:16 PM @webby1 I looked over my original papers and I can confirm the 129 samples were taken every .25 inches. It was the output sample that were taken every .125 inches. Regards Luc Title: Re: TD replications Post by: webby1 on March 23, 2017, 12:50:00 AM Mystery solved! Thanks for all that Luc. Title: Re: TD replications Post by: woopy on March 23, 2017, 11:13:59 AM Bonjour Laurent, You may want to test Floor's recent suggestion of magnet shielding: http://overunity.com/17097/magnet-force-shield/msg502025/#msg502025 (http://overunity.com/17097/magnet-force-shield/msg502025/#msg502025) Floor's video demo: http://overunity.com/17097/magnet-force-shield/msg502025/#msg502025 (http://overunity.com/17097/magnet-force-shield/msg502025/#msg502025) Kind regards Luc Hi Luc Yes i have tried the Floor's config. So first, yes the shielding magnet seems to slide easily through the first stuck of magnet, but at the expense of a strong up or down force (in fact this is the slider's stroke force of your V2 version), so this up or down force (depending of the vertical orientation  of the stack of magnets) should be strongly directed with very good build and efficient sliding gear to maintain the magnet on the path and avoid too much friction losses. And so to be able to precisely measure the engage and disengage energy. By hand due to all the mixing forces it is of course impossible to correctly estimate . The second thing i notice, is that the shielding effect seems to be some how effective when the second (moving "piston"  ) magnet is very near to the shielding magnet, but at some distance, some repelling force is always present. So there is not a  complete cancellation of the repelling force , anyway with my magnets crude manual  test and ,another time, by hand it is not possible to really estimate, due to all the mixing forces. So it seems that we  always need some mechanical force to approach the "piston" magnet towards the shielding magnets. Adding that this "piston " magnet exhibit also strong torque and up and down forces, that have to be correctly directed by a efficient  mechanical device to maintain the path. I tried to double the thickness of the shielding magnet, but all the above stay more or less the same. So to be able to test properly , we need to invest time and energy for a very well and sturdy build, with very fine scale and a lot of time to detect if there is any OU possibility. But by doing those crude manual test, i am sorry to say that i did not get the "waouuuhhh" feeling, that could motivate me to go further on this delicate experiment. So to me, all those systems are very effective and interesting magnetic coupling device , but so far no OU. But i can be and i hope to be wrong of course. Perhaps Floor will elaborate more and find the right path to the Graal. Hope this helps Laurent Title: Re: TD replications Post by: gotoluc on March 23, 2017, 01:46:12 PM Thank you Laurent for sharing your preliminary shielding test. One thing for sure is, if there's a magnet configuration that can go thru a complete cycle an have a gain it's still unknown. I'll be keeping an eye on floor's research. Kind regards Luc Title: Re: TD replications Post by: woopy on March 23, 2017, 03:08:59 PM Hi Luc and all just for fun and without any pretention it is what i name a "waouuuhhh" moment. But is it good or not must be tested much bigger, because i don't feel any forces at so small scale. https://youtu.be/qoHsCzt2uvA Laurent Title: Re: TD replications Post by: ramset on March 23, 2017, 08:36:44 PM More seasoned members here will remember Butch Lafonte for consideration ? and Butch's channel apologies for the interruption ,Butch was always a favorite here, not sure what happened [if he is still a member?] respectfully Chet K Title: Re: TD replications Post by: Cairun on March 24, 2017, 02:32:32 AM @webby1 I looked over my original papers and I can confirm the 129 samples were taken every .25 inches. It was the output sample that were taken every .125 inches. Regards Luc Luc, I understand now.  Thanks for the clarification. Regards, Alex Title: Re: TD replications Post by: Floor on March 24, 2017, 11:35:27 PM GammaRayBurst hasn't posted since march 2015 Here is some his pseudo solid stuff of his I was looking at back around  then. http://overunity.com/14070/super-simple-way-to-see-proof-pseudo-solid-principle-works-using-ring-magnets/msg380041/#msg380041 floor Title: Re: TD replications Post by: Floor on March 24, 2017, 11:55:48 PM @Woopy Here are some drawings and details of that particylar interaction  @ http://overunity.com/17097/magnet-force-shield/msg502025/#msg502025 I'm setting up to measure the inputs and output now. @Ramset This  (below) is more related to the current topic than the Gammarayburst / pseudo solid stuff. http://overunity.com/14412/mag-mirror-engine/ floor Title: Re: TD replications Post by: Floor on March 25, 2017, 12:23:36 AM @woopy Here are some drawings that clairfy the interactions (in vthe video) some what  @ http://overunity.com/17097/magnet-force-shield/msg502025/#msg502025 I am setting up to do the input put put measurements today / tomarrow. @Ramset Here is a device more closely related to the current topic  @ http://overunity.com/14412/mag-mirror-engine/ best wishes floor Title: Re: TD replications Post by: Floor on March 26, 2017, 09:13:19 PM @Gotoluc If it interests you... somethig I would like to see... is a really good  replication / redo of the TD (twist drive) tests I first presented / ask for at the start of these topics. best wishes floor Title: Re: TD replications Post by: gotoluc on March 28, 2017, 05:57:47 PM @Gotoluc If it interests you... somethig I would like to see... is a really good  replication / redo of the TD (twist drive) tests I first presented / ask for at the start of these topics. best wishes floor Hi floor, I'm taking some time off but will keep an eye on your results. Kind regards Luc Title: Re: TD replications Post by: Floor on March 28, 2017, 11:20:30 PM Your efforts / innovatios have given us lots of good information... includeing "Luc's force" and what is needed  to improve our processes. bravo ! best wishes floor Title: Re: TD replications Post by: burnit0017 on May 01, 2017, 01:42:34 PM https://youtu.be/jKIWcJiJs04 hi, possible suggestion for adding a flywheel. use a one way bearing on the flywheel. if a PMA is used for a generator than a buck converter can be used to reduce the (I squared R) loss at the stator. Just a suggestion.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.6064892411231995, "perplexity": 1698.0859630116122}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-30/segments/1500549424564.72/warc/CC-MAIN-20170723142634-20170723162634-00310.warc.gz"}
https://mvtrinh.wordpress.com/2012/12/16/sum-of-three-squares/
## Sum of Three Squares Find the four smallest distinct positive integers $a,b,c,d$ such that $a^2+b^2+c^2=d^2$. Source: NCTM Mathematics Teacher SOLUTION Consider the square integers $1,4,9,16,25,\dots$ We form sets of square integers; find all possible three-element subsets of those sets; and see if the sum of the three elements is a square. $A=\left \{1,4,9\right \}$ Number of three-element subsets $\binom{3}{3}=1$ $1+4+9=14$  Not a solution $A=\left \{1,4,9,16\right \}$ $\binom{4}{3}=4$ $1+4+9=14$  Repeat of previous step $1+4+16=21$  No $1+9+16=26$  No $4+9+16=29$  No $A=\left \{1,4,9,16,25\right \}$ $\binom{5}{3}=10$ $4$ subsets are repeat; $6$ new ones are listed below $25+1+4=30$ No $25+1+9=35$  No $25+1+16=42$  No $25+4+9=38$  No $25+4+16=45$  No $25+9+16=50$  No $A=\left \{1,4,9,16,25,36\right \}$ $\binom{6}{3}=20$ $10$ subsets are repeat; $10$ are new $36+1+4=41$  No $36+1+9=46$  No $36+1+16=53$  No $36+1+25=62$  No $36+4+9=49$  Yes $6^2+2^2+3^2=7^2$ Though not necessary we list the remaining $5$ subsets for completeness $36+4+16=56$  No $36+4+25=65$  No $36+9+16=61$  No $36+9+25=70$  No $36+16+25=77$  No Answer: $2,3,6,7$
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 39, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.6441476345062256, "perplexity": 11706.552105125975}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-43/segments/1508187822625.57/warc/CC-MAIN-20171017234801-20171018014801-00481.warc.gz"}
https://overiq.com/django-1-10/installing-django/
# Installing Django Last updated on July 27, 2020 To create a new Django application you must have the following things installed on your computer: 1. Python. 2. A Virtual Environment. 3. Django. ## Installing Python on Windows # In this tutorial, we will use Python 3.4. Let's start by installing Python on Windows first. Note: Throughout this course instructions are given for Windows, Ubuntu, and Mac. Most of the commands will work no matter which OS you are using. However, there are some commands which vary from one system to another. If that's the case, I have clearly mentioned it and provided commands specific to the system. After downloading the installer double click to open it. Select "Install for all users" and click Next. In the next window, the installer will ask you to select destination directory, keep it to default (i.e C:\Python34\) and click next. This step is the most crucial. In this step, the installer will ask you to customize the Python installation. Scroll down a little and select "Add python.exe to PATH", click the dropdown icon in front of it and select "Will be installed on local hard drive". This allows us to invoke python.exe in the Command Prompt from anywhere in the file system without specifying it's full path. Click Next. At this point, you may get a confirmation window like the following, asking you to confirm the installation. Select Yes to proceed. The installer will take a minute or two, depending upon the speed of your system to complete the installation. Once done. Click Finish to exit the installer. Now you should have Python 3.4 installed on your PC. To verify the Python installation execute the following command in the Command Prompt. C:\Users\Q>python --version Python 3.4.4 C:\Users\Q> ## Python Package Manager # In Python, we use pip (Python Package Index) to install and manage different packages (or libraries) available at https://pypi.python.org/pypi. It is important to note that pip itself is a package and is used to install other packages. The packages you install using pip will be installed in the directory C:/python34/lib/site-packages. Windows installer automatically installs pip by default, so you don't need to do anything else. To check the version of pip installed on your system execute the following command. C:\Users\Q>pip --version pip 7.1.2 from c:\python34\lib\site-packages (python 3.4) C:\Users\Q> As you can see, this system has pip 7.1 and is installed under Python 3.4. ## Installing Python on Linux # On a Linux distribution like Ubuntu or Fedora, it is highly likely that Python 3.4 or above is installed. To check, open terminal and type the following command. Q@VM1:~$python3 --version Python 3.5.1 As you can see, my Ubuntu machine has Python 3.5 installed by default. Although the whole tutorial is geared towards Python 3.4, it would be perfectly fine if you choose to use Python 3.5. We will be using Django 1.10 in this course which only works with Python 3.4 and 3.5. So make sure you have either Python 3.4 or 3.5 installed on your system. Trying to use Django 1.10 with some other version of Python may result in unexpected issues. If you want to work with Python 3.4 instead of Python 3.5 in Ubuntu type the following commands in the terminal. Q@VM1:~$: sudo add-apt-repository ppa:fkrull/deadsnakes Q@VM1:~$: sudo apt-get update Q@VM1:~$: sudo apt-get install python3.4 Execute each of these commands one by one on the command line. These commands first download the Python from ppa:fkrull/deadsnakes repository and then installs it on your system. On a slow connection the whole process may take some time, so please be patient. To invoke Python 3.4 type python3.4 instead of python3 in the command line as follows: Q@VM1:~$python3.4 Python 3.4.5 (default, Jul 15 2016, 16:39:07) [GCC 5.4.0 20160609] on linux Type "help", "copyright", "credits" or "license" for more information. >>> To exit the Python shell hit Ctrl+D or type quit() in the shell. Python installer for Ubuntu doesn't install pip. To install it execute the following command: Q@VM1:~$ sudo apt-get install python3-pip To verify the pip installation execute the following command. Q@VM1:~$pip3 --version pip 8.1.1 from /usr/lib/python3/dist-packages (python 3.5) Q@VM1:~$ Notice that pip3 is installed as a package which belongs to Python 3.5, not Python 3.4. ## Installing Python on Mac # Most Mac comes with Python 2.7 installed by default. To install Python 3.4 on a Mac visit https://www.python.org/downloads/release/python-344/ and scroll down to bottom of the page, under Files select the installer corresponding to your version of Mac OS. If you are using 32-bit version of Mac OS X or later download Mac OS X 32-bit i386/PPC installer and if you are using 64-bit version of Mac OS X or later download Mac OS X 64-bit/32-bit installer. On a 64-bit system, both installers will work. Python installer for Mac OS also installs pip so you don't need to install anything else. To verify Python and pip installation execute the following commands in the terminal. Qs-Mac:~ q$python3 --version Python 3.4.4 Qs-Mac:~ q$ Qs-Mac:~ q$pip3 --version pip 9.0.1 from /Users/q/TGDB/env/lib/python3.4/site-packages (python 3.4) Qs-Mac:~ q$ Notice that in Ubuntu and Mac, we are checking the version of pip using pip3 --version instead of pip --version command as we did in Windows. This is because Linux/Mac usually comes pre-installed with Python 2 and most of the time pip --version command is associated with pip installed under Python 2. ## Installing Python Virtual Environment # Create a new directory named TGDB (short for "The Great Django Blog") using the mkdir command. C:\Users\Q>mkdir TGDB C:\Users\Q> We will use this folder to store our Django application. You can create this directory anywhere, location doesn't really matter. I am using Windows and I have created this directory in C:\Users\Q\ where Q is my username. Once done, change your current working directory to TGDB using the cd command, as follows: C:\Users\Q>cd TGDB C:\Users\Q\TGDB> Now we are ready to install Virtual Environment. So what is this Virtual Environment? A virtual environment helps us to run isolated instances of Python/Django projects on a machine without conflicting with one another. To understand the philosophy behind Virtual Environment, consider the following example: Let's say we are working on two projects, a blog and a forum for two different clients. Our blog uses a library called super_library_v02, on the other hand, our forum uses super_library_v01. At a given point in time, we can only have a single version of super_library installed on our system, we can't have both versions simultaneously. A Virtual Environment helps us to tackle these kinds of problems easily. A Virtual Environment solves this problem by creating a separate Python installation. So, no matter what libraries you install on a particular virtual environment using pip, will not conflict with the libraries available at the system-wide Python installation. The package required to create these isolated environments is called virtualenv. To install virtualenv on Windows open command prompt and type the following command. C:\Users\Q\TGDB>pip install virtualenv To install virtualenv in Ubuntu/Mac type the following command. Q@VM1:~/TGDB$pip3 install virtualenv To create virtual environment type virtualenv command followed by the name of the virtual environment. Here is how you can create a virtual environment in Windows: C:\Users\Q\TGDB>virtualenv env Using base prefix 'c:\\python34' New python executable in C:\Users\X\TGDB\env\Scripts\python.exe Installing setuptools, pip, wheel...done. C:\Users\X\TGDB> By default, virtualenv creates virtual environment using the version of Python under which it is installed. In the case of Windows, it is Python 3.4 whereas in Ubuntu it is Python 3.5. C:\Users\Q>pip --version pip 7.1.2 from c:\python34\lib\site-packages (python 3.4) To specify any other version of the Python use -p option as follows: C:\Users\Q\TGDB>virtualenv env -p C:/Python27/python.exe The above command will create a virtual environment using Python 2.7. Before specifying the version of the python using the -p option make sure that the version of the Python specified is installed on your computer. We can use the same command as Windows to create a virtual environment in Ubuntu/Mac: Q@VM1:~/TGDB$ virtualenv env Using base prefix '/usr' New python executable in /home/q/my_workspace/env/bin/python3 Also creating executable in /home/q/my_workspace/env/bin/python Installing setuptools, pip, wheel...done. Q@VM1:~/TGDB$ In Ubuntu, the above command will create a virtual environment using Python 3.5 because virtualenv was installed as a package of Python 3.5. In case you want to follow along using Python 3.4 in Ubuntu, delete the env directory created by virtualenv command and then create a new virtual environment using Python 3.4, as follows: Q@VM1:~/TGDB$ virtualenv env -p /usr/bin/python3.4 Running virtualenv with interpreter /usr/bin/python3.4 Using base prefix '/usr' New python executable in /home/Q/my_workspace/env/bin/python3.4 Also creating executable in /home/Q/my_workspace/env/bin/python Installing setuptools, pip, wheel...done. Q@VM1:~/TGDB$ To know the absolute path of Python 3.4 use the which command. Q@VM1:~/TGDB$ which python3.4 /usr/bin/python3.4 Q@VM1:~/TGDB$ So what actually virtualenv command does? The virtualenv command creates an isolated environment, a directory which we named env for developing applications using Django/Python. Once virtualenv finished setting up a new virtual environment, open Windows Explorer or Nautilus in Ubuntu to view the files and folders virtualenv has created for us inside the env directory. In Windows, the contents of env directory should look like this: In Ubuntu/Mac, the contents of env directory should look like this: So what these files and folder contain? These files and folder constitute a separate python installation. Any libraries or packages you install here will be available only inside this virtual environment, so you can work on your project without conflicting with other packages installed on the system. ## Activating virtualenv # We have created a virtual environment in the last step, to use it we first have to activate it. ### Activating virtualenv in windows # To activate virtual environment in Windows type the following command. C:\Users\Q\TGDB>env\Scripts\activate.bat (env) C:\Users\Q\TGDB> ### Activating virtualenv in Ubuntu/Mac # On Linux/Mac, we use source command to activate virtual environment. Q@VM1:~/TGDB$ source env/bin/activate (env) Q@VM1:~/TGDB$ Notice (env) in front of the prompt string, it indicates that your virtual environment is up and running. From this point on, any package you add or remove using pip will only affect this virtual environment. Your system-wide Python installation will remain intact. We can use pip list command to view packages installed in this virtual environment. (env) C:\Users\Q\TGDB>pip list DEPRECATION: The default format will switch to columns in the future. You can us e --format=(legacy|columns) (or define a format=(legacy|columns) in your pip.con f under the [list] section) to disable this warning. pip (9.0.1) setuptools (36.4.0) wheel (0.29.0) (env) C:\Users\Q\TGDB> This virtual environment has 3 packages installed. It is important to note that once the virtual environment is active you can invoke pip either using pip or pip3. This is true for Window, Ubuntu as well as Mac. To deactivate virtual environment issue the following command. (env) C:\Users\Q\TGDB>deactivate This command is same for Windows, Ubuntu, and Mac. Now we are out of the virtual environment. Run pip list command again, but this time it will show you all the system-wide packages installed on your system. C:\Users\Q\TGDB>pip list certifi (2017.4.17) chardet (3.0.4) colorama (0.3.9) decorator (4.0.11) httpie (0.9.9) idna (2.5) ipython (6.1.0) ipython-genutils (0.2.0) jedi (0.10.2) olefile (0.44) pickleshare (0.7.4) pip (7.1.2) ... C:\Users\X\TGDB> Note: ... indicates that the code snippet is truncated to save space. On Ubuntu/Mac, you should use pip3 list to view system-wide packages installed on the system. In Ubuntu: Q@VM1:~$ pip3 list apturl (0.5.2) beautifulsoup4 (4.4.1) Brlapi (0.6.4) chardet (2.3.0) checkbox-support (0.22) command-not-found (0.3) cryptography (1.2.3) defer (1.0.6) feedparser (5.1.3) guacamole (0.9.2) html5lib (0.999) ... httplib2 (0.9.1) In Mac: Qs-Mac:TGDB q$pip3 list pip (7.1.2) setuptools (18.2) virtualenv (15.1.0) You are using pip version 7.1.2, however version 9.0.1 is available. You should consider upgrading via the 'pip install --upgrade pip' command. Qs-Mac:TGDB q$ So these packages are available on my system-wide Python installation. Yours could be different. In the next step, we will install Django. ## Installing Django # In this tutorial, we will use Django 1.10. To install Django open terminal and type the following command. (env) C:\Users\Q\TGDB>pip install django==1.10 This command fetches the Django framework from PyPI and installs it into your virtual environment. The output of the command should look like this: (env) C:\Users\Q\TGDB>pip install django==1.10 Collecting django==1.10 100% |################################| 6.8MB 80kB/s Installing collected packages: django Successfully installed django-1.10 (env) C:\Users\Q\TGDB> If you just want to install current stable version simply issue the following command. (env) C:\Users\Q\TGDB>pip install django ## Testing the Installation # Inside the virtual environment start the Python shell by typing python command. (env) C:\Users\Q\TGDB>python Python 3.4.4 (v3.4.4:737efcadf5a6, Dec 20 2015, 20:20:57) [MSC v.1600 64 bit (AM D64)] on win32 >>> If you are on Ubuntu/Mac start the Python shell using python3 (or python3.4) command. To verify whether the installation was successful or not, import django package and call get_version() method as follows. 1 2 3 4 >>> import django >>> django.get_version() '1.10' >>> To exit the Python shell type Ctrl + Z (in Windows), Ctrl + D (in Linux/Mac) or just type quit(). 1 2 3 4 5 6 >>> import django >>> django.get_version() '1.10.5' >>> ^Z (env) C:\Users\Q\TGDB>
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.17698557674884796, "perplexity": 6742.841311581518}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-40/segments/1600401585213.82/warc/CC-MAIN-20200928041630-20200928071630-00064.warc.gz"}
http://math.stackexchange.com/questions/203995/proof-p-vdash-q-rightarrow-p
# Proof $p \vdash q \Rightarrow p$ Firstly, how do I read it? Is below right? With braces $$p \vdash (q \Rightarrow p)$$ The given proof is: 1. ${p}$, premise 2. ${q}$, assumption 1. $p$ by (1) // what??? 3. ${q} \Rightarrow p$ by implication introduction with 2 and 2.1 QED... I can't really get the link between $p$ and $q$ in 2.1 ... - What's your formal system? What rules of inference and axioms do you have? –  Doug Spoonwood Sep 28 '12 at 15:01 Doesn't $p \vdash (q \implies p)$ mean $(p \land q) \implies ( p \land p )$? –  xavierm02 Sep 28 '12 at 16:26 The point of the claim is that there is no link between $p$ and $q$ in 2.1. Once $p$ is a premise, anything implies $p$, because an implication can only be false if its consequent is false. - "[A]n implication can only be false if its consequent is false." I wonder if you really believe that! :-) Do you really think, e.g. that Gödel's Incompleteness Theorem implies Fermat's Last Theorem? Or wouldn't you, in ordinary mathematical chat, in fact distinguish a genuine implication relation from the material conditional (which is what is in play here)?? –  Peter Smith Sep 28 '12 at 18:51 Of course I don't believe that every English statement of the form, for instance "foo is true because bar" is true whenever "foo" is. Nor does my claim hold in various modern logics. I'm personally not picky about using the word "implication" for formal sentences including arrows when the context, as now, is clear. –  Kevin Carlson Sep 28 '12 at 20:11 There doesn't have to be any intuitive connection. If you can somehow derive $p$ after (not necessarily because) you assume $q$, then you're allowed to conclude $q\Rightarrow p$. In this case, $p\vdash\cdots$ means that you're explicitly allowing yourself to prove $p$ from nothing. Then you can also prove $q\Rightarrow p$. - Many/most systems of natural deduction for classical (non-relevantist) logic allow (i) reiteration, and also (ii) unrestricted discharge of assumptions -- so we are allowed to write 1. $p\quad\quad\quad$ Premiss 2. $\quad|\quad q\quad$ Supposition 3. $\quad|\quad p\quad$ From (1), by reiteration 4. $q \to p\quad\$ Conditional Proof, by proof from (2) to (3) There is indeed no 'link' between $p$ and $q$ at step (3). But that isn't needed at step (4), in most systems. The CP rule is: given a sub-proof starting from $A$ and concluding $B$ we can discharge the assumption $A$ and infer $A \to B$ (on the remaining assumptions/premisses). We don't, in typical classical systems, have to check that the assumption $A$ is actually invoked in getting to $B$. Does that mean we shouldn't like reiteration and/or should restrict discharge? Well, actually that wouldn't much affect things in the presence of other standard rules. Thus consider the proof 1. $p\quad\quad\quad\quad$ Premiss 2. $\quad|\quad q\quad\quad$ Supposition 3. $\quad|\quad p \land q\quad$ From (1), (2) 4. $\quad|\quad p\quad\quad$ From (3) 5. $q \to p\quad\quad\$ Conditional Proof, by proof from (2) to (4) And now $q$ is invoked en route to getting to line (4). - Another method involving a natural deduction system would involve invoking p as another supposition (note all suppositions have scope) as follows. 1. p [premise], 2. | q [supposition] 3. || p [premise] 4. | (p->p) [3-3 conditional introduction] 5. |p [1., 4. conditional elimination] 6. (q->p) [2.-5. conditional introduction] –  Doug Spoonwood Sep 28 '12 at 15:08
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8365629315376282, "perplexity": 1556.356613472694}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2015-27/segments/1435375097710.32/warc/CC-MAIN-20150627031817-00094-ip-10-179-60-89.ec2.internal.warc.gz"}
http://cs.stackexchange.com/questions/6419/proving-the-language-of-words-with-equal-numbers-of-symbols-non-context-free
# Proving the language of words with equal numbers of symbols non-context-free [duplicate] Possible Duplicate: How to prove that a language is not context-free? I'm having a hard time figuring this out, any help is appreciated. Let EQUAL be the language of all words over $\Sigma = \{a,b,c\}$ that have the same number of $a$’s, $b$’s and $c$’s $\qquad \text{EQUAL} = \{ w \in \Sigma^* \mid |w|_a = |w|_b = |w|_c \}$ The order of the letters doesn't matter. How can you prove that EQUAL is non-context-free? - Check the various methods given at this question –  Ran G. Nov 1 '12 at 6:31 It's actually an example in this answer. Closing as duplicate. –  Raphael Nov 1 '12 at 14:35 It is known that the intersection of a context-free language with a regular language is context-free. If EQUAL were context-free, so would be its intersection with $a^*b^*c^*$, which is the well-known non-context-free $\{a^nb^nc^n : n \in \mathbb{N}\}$.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.6715980172157288, "perplexity": 498.7330738607482}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2013-48/segments/1386163047545/warc/CC-MAIN-20131204131727-00053-ip-10-33-133-15.ec2.internal.warc.gz"}
https://www.newcastle.edu.au/profile/john-furst?group=c
# Dr John Furst Senior Lecturer School of Mathematical and Physical Sciences (Physics) ## Career Summary ### Biography I have worked extensively in the US in the field of fundamental atomic physics. Between 2000 and 2006 I was involved in multidisciplinary teams that had expertise in both Food Technology and Marine Science. My work in that period involved a combination of very applied physics, such as the cooking of wheat and limpet adhesion mechanisms, together with more fundamental physics, such as the characterisation of conjugated polymers and the photo-dissociation of molecules. I am currently working in several teams studying diverse topics such as fundamental molecular physics, food science and human nutrition. Research Expertise My research work is multidisciplinary. I have done research in various areas of Applied Science, such as Marine Science, Food Science and Nanotechnology as well as fundamental atomic and molecular physics. Thus I have looked at physical processes in a variety of topics e.g. -The physical properties of cooked wheat in breakfast cereals. -Limpet adhesion mechanisms in the intertidal zone -The characterisation of conjugated polymers using photoluminescence techniques. -The photodissociation of simple molecules. My most recent research into the photodissociation of molecules is part of a collaborative effort with researchers in the US who were developing a new set of experiments at the Advanced Light Source (ALS) at the Lawrence Berkeley National Laboratory on California. I am the team co-leader and involved in the planning of the experiments and their implementation while at the ALS. Teaching Expertise My teaching spans disciplines: -I teach extensively in all areas of first year physics. -I have also taught extensively in a first year course involving the ways in which computers are used in Science. -I have taught in a Marine Science course, Underwater Research Methods and diving -I teach a specialist course in atomic and molecular physics at third year level and Special Relativity and Atomic Physics at Second Year level. I am currently Head of the Physics Discipline. Collaborations I am part of a successful collaboration with groups at the University of Nebraska and Lawrence Berkeley National Laboratory in the USA. We do work on fundamental molecular physics at the Advanced Light Source in California. ### Qualifications • PhD, Flinders University • Diploma in Education, Flinders University • Bachelor of Science (Honours), Flinders University ### Keywords • Atomic and Molecular Physics • Food Material characterisation • physics ### Fields of Research Code Description Percentage 020299 Atomic, Molecular, Nuclear, Particle and Plasma Physics not elsewhere classified 15 020599 Optical Physics not elsewhere classified 15 090899 Food Sciences not elsewhere classified 70 ### Professional Experience #### UON Appointment Title Organisation / Department Senior Lecturer University of Newcastle School of Mathematical and Physical Sciences Australia Dates Title Organisation / Department 1/01/2007 -  Senior Lecturer University of Newcastle School of Mathematical and Physical Sciences Australia 1/01/2006 - 1/12/2006 Lecturer University of Newcastle School of Mathematical and Physical Sciences Australia 1/01/1998 - 1/12/2005 Lecturer University of Newcastle Applied Science & Technology (AS&T) Australia 1/02/1995 - 1/12/1997 Associate Lecturer University of Newcastle Applied Science & Technology (AS&T) Australia 1/09/1992 - 1/02/1995 Senior Research Associate The University of Western Australia Department of Physics Australia 1/07/1989 - 1/08/1992 Research Assistant Professor University of Missouri-Rolla Department of Physics United States 1/07/1985 - 1/07/1989 Research Scientist University of North Texas Department of Physics United States 1/04/1983 - 1/06/1985 Visiting Research Associate University of Oklahoma Department of Physics and Astronomy United States #### Membership Dates Title Organisation / Department 22/06/2015 -  Newcastle Representative - NSW Branch of Australian Institute of Physics NSW Branch of Australian Institute of Physics Australia Edit ## Publications For publications that are currently unpublished or in-press, details are shown in italics. ### Journal article (44 outputs) 2017 Truong BQ, Buckow R, Nguyen MH, Furst J, 'Gelation of barramundi (Lates calcarifer) minced muscle as affected by pressure and thermal treatments at low salt concentration', Journal of the Science of Food and Agriculture, 97 3781-3789 (2017) [C1] DOI 10.1002/jsfa.8242 Minh Nguyen 2017 Lucock M, Beckett E, Martin C, Jones P, Furst J, Yates Z, et al., 'UV-associated decline in systemic folate: implications for human nutrigenetics, health, and evolutionary processes', AMERICAN JOURNAL OF HUMAN BIOLOGY, 29 (2017) [C1] DOI 10.1002/ajhb.22929 Scopus - 2Web of Science - 2 Emma Beckett, Zoe Yates, Martin Veysey, Mark Lucock, Patrice Jones 2017 Beckett EL, Jones P, Veysey M, Duesing K, Martin C, Furst J, et al., 'VDR gene methylation as a molecular adaption to light exposure: Historic, recent and genetic influences', AMERICAN JOURNAL OF HUMAN BIOLOGY, 29 (2017) [C1] DOI 10.1002/ajhb.23010 Patrice Jones, Martin Veysey, Emma Beckett, Zoe Yates, Mark Lucock 2017 Truong BQ, Buckow R, Nguyen MH, Furst J, 'Effect of high-pressure treatments prior to cooking on gelling properties of unwashed protein from barramundi (Lates calcarifer) minced muscle', INTERNATIONAL JOURNAL OF FOOD SCIENCE AND TECHNOLOGY, 52 1383-1391 (2017) [C1] DOI 10.1111/ijfs.13409 Minh Nguyen 2016 Beckett EL, Duesing K, Martin C, Jones P, Furst J, King K, et al., 'Relationship between methylation status of Vitamin D-related genes, Vitamin D levels, and methyl-donor biochemistry', Journal of Nutrition and Intermediary Metabolism, 6 8-15 (2016) [C1] © 2016 The Authors. Published by Elsevier Inc. Vitamin D is known for its role in the regulation of gene expression via the Vitamin D receptor, a nuclear transcription factor. Mo... [more] © 2016 The Authors. Published by Elsevier Inc. Vitamin D is known for its role in the regulation of gene expression via the Vitamin D receptor, a nuclear transcription factor. More recently, a role for Vitamin D in regulating DNA methylation has been identified as an additional mechanism of modulation of gene expression. How methylation status influences Vitamin D metabolism and response pathways is not yet clear. Therefore, we aimed to assess the relationship between plasma 25-hydroxycholecalciferol (25(OH)D) and the methylation status of Vitamin D metabolism enzyme genes (CYP2R1, CYP27B1 and CYP24A1) and the Vitamin D receptor gene (VDR). This analysis was conducted in the context of dietary Vitamin D, and background methyl donor related biochemistry, with adjustment for several dietary and lifestyle variables. Percentage methylation at CpG sites was assessed in peripheral blood cells using methylation sensitive and dependent enzymes and qPCR. Standard analytical techniques were used to determine plasma 25(OH)D and homocysteine, and serum folate and B12, with the relationship to methylation status assessed using multi-variable regression analysis. CYP2R1 and VDR methylation were found to be independent predictors of plasma 25(OH)D, when adjusted for Vitamin D intake and other lifestyle variables. CYP24A1 was related to plasma 25(OH)D directly, but not in the context of Vitamin D intake. Methyl-group donor biochemistry was associated with the methylation status of some genes, but did not alter the relationship between methylation and plasma 25(OH)D. Modulation of methylation status of CYP2R1, CYP24A1 and VDR in response to plasma 25(OH)D may be part of feedback loops involved in maintaining Vitamin D homeostasis, and may explain a portion of the variance in plasma 25(OH)D levels in response to intake and sun exposure. Methyl-group donor biochemistry, while a potential independent modulator, did not alter this effect. DOI 10.1016/j.jnim.2016.04.010 Scopus - 1 Patrice Jones, Emma Beckett, Katrina King, Martin Veysey, Mark Lucock, Zoe Yates 2015 Lucock M, Jones P, Martin C, Beckett E, Yates Z, Furst J, Veysey M, 'Vitamin D: Beyond Metabolism', Journal of Evidence-Based Complementary and Alternative Medicine, 20 310-322 (2015) [C1] © 2015, The Author(s) 2015. Interest in vitamin D and the VDR gene is increasing as putative roles in human health and evolutionary processes are explored. This review looks beyo... [more] © 2015, The Author(s) 2015. Interest in vitamin D and the VDR gene is increasing as putative roles in human health and evolutionary processes are explored. This review looks beyond the classic biochemistry that links vitamin D to calcium homeostasis; it explores how vitamin D interacts with light in a broader perspective than simple skin photosynthesis. It examines how the vitamin influences circadian rhythm, and how it may have helped drive the evolution of skin pigmentation. To this end, the nutrient¿nutrient relationship with folate is also explored. The VDR gene is additionally examined as a factor in the evolutionary selection of skin depigmentation at higher latitudes to allow vitamin D synthesis. Evidence is given to show that VDR polymorphisms exhibit a latitudinal gradient in allele prevalence consistent with such a paradigm. Overall, the review examines new evo-devo ideas that link light-sensitive vitamins to human health/phenotype, both within and across the lifecycle. DOI 10.1177/2156587215580491 Scopus - 3 Martin Veysey, Emma Beckett, Patrice Jones, Zoe Yates, Mark Lucock 2015 Beckett EL, Martin C, Duesing K, Jones P, Furst J, Yates Z, et al., 'Vitamin D receptor genotype modulates the correlation between vitamin D and circulating levels of let-7a/b and vitamin D intake in an elderly cohort', Journal of Nutrigenetics and Nutrigenomics, 7 264-273 (2015) [C1] DOI 10.1159/000381676 Scopus - 9Web of Science - 7 Zoe Yates, Martin Veysey, Mark Lucock, Patrice Jones, Emma Beckett 2014 Lucock M, Yates Z, Martin C, Choi JH, Boyd L, Tang S, et al., 'Vitamin D, folate, and potential early lifecycle environmental origin of significant adult phenotypes.', Evolution, Medicine, and Public Health, 2014 69-91 (2014) [C1] DOI 10.1093/emph/eou013 Martin Veysey, Mark Lucock, Zoe Yates, Paul Roach 2013 Sharafutdinova G, Cvetanovski M, Walton D, Guest B, Foster J, Piszczuk G, Furst J, 'Distance learning laboratory: A remote Atomic and Nuclear Physics experiment', Australian Physics, 50 55-59 (2013) [C2] While it is clear that on-line education offers new opportunities for students to study in a way that suits their particular learning style, there are laboratory-based disciplines... [more] While it is clear that on-line education offers new opportunities for students to study in a way that suits their particular learning style, there are laboratory-based disciplines that require hands-on experience with real world systems. This is especially true in Physics where a standard curriculum involves a large amount of laboratory-based work. While computer-based simulation software has been used extensively in engineering and various science-based areas these are not a direct substitute for traditional Physics experimental labs which provide students with the practical skills of handling real equipment, and measuring with sufficient accuracy and precision to allow comparison with various theoretical predictions. On-line laboratories where distance students have remote control over real laboratory equipment is a practical alternative to traditional physics laboratories, at least for some classes of experiments. This paper describes the development of a remote-controlled Cosmic Ray Telescope experiment at the University of Newcastle. Citations Scopus - 2 Galiya Sharafutdinova 2012 Lucock MD, Glanville T, Yates ZR, Walker J, Furst JE, Simpson N, 'Solar cycle predicts folate-sensitive neonatal genotypes at discrete phases of the first trimester of pregnancy: A novel folate-related human embryo loss hypothesis', Medical Hypotheses, 79 210-215 (2012) [C1] Citations Scopus - 8Web of Science - 6 Zoe Yates, Mark Lucock 2012 Furst JE, Gay TJ, Machacek J, Kilkoyne D, McLaughlin KW, 'Orientation of doubly excited states in N2', Physical Review A, 86 1-5 (2012) [C1] Citations Scopus - 2Web of Science - 2 2011 Machacek JR, Andrianarijaona VM, Furst JE, Kilcoyne ALD, Landers AL, Litaker ET, et al., 'Production of excited atomic hydrogen and deuterium from H-2, HD and D-2 photodissociation', Journal of Physics B - Atomic Molecular and Optical Physics, 44 (2011) [C1] DOI 10.1088/0953-4075/44/4/045201 Scopus - 7Web of Science - 7 2010 Machacek JR, Andrianarijaona VA, Furst JE, Gay TJ, Kilcoyne ALD, Landers AL, et al., 'Production of Atomic Hydrogen and Deuterium from H $_ $$2$$$, D $_ $$2$$$ and HD Photodissociation', Bulletin of the American Physical Society, 55 (2010) 2009 Machacek JR, Andrianarijaona VM, Furst JE, Gay TJ, Kilcoyne ALD, Landers AL, McLaughlin KW, 'Production of Excited Atomic Hydrogen from Methane', Bulletin of the American Physical Society, 54 (2009) [E3] 2006 Srikaeo K, Furst JE, Ashton JF, Hosken RW, 'Microstructural changes of starch in cooked wheat grains as affected by cooking temperatures and times', LWT - Food Science and Technology, 39 528-533 (2006) [C1] DOI 10.1016/j.lwt.2005.04.004 Scopus - 21Web of Science - 19 2006 Bozek JD, Furst JE, Gay TJ, Gould H, Kilcoyne ALD, Machacek JR, et al., 'Production of excited atomic hydrogen and deuterium from H2 and D2 photodissociation', Journal of Physics B: Atomic, Molecular and Optical Physics, 39 4871-4882 (2006) [C1] DOI 10.1088/0953-4075/39/23/006 Scopus - 24Web of Science - 22 2005 Srikaeo K, Furst JE, Ashton JF, Hosken RW, Sopade PA, 'Wheat grain cooking process as investigated by modulated temperature differential scanning calorimetry', Carbohydrate Polymers, 61 203-210 (2005) [C1] DOI 10.1016/j.carbpol.2005.05.002 Scopus - 8Web of Science - 7 2005 Srikaeo K, Furst JE, Ashton JF, 'Characterization of wheat-based biscuit cooking process by statistical process control techniques', Food Control, 16 309-317 (2005) [C1] DOI 10.1016/j.foodcont.2004.03.010 Scopus - 22Web of Science - 11 2005 Srikaeo K, Furst JE, Ashton JF, Hosken RW, 'Variability of some physical properties of Australian wheat biscuit breakfast cereals', Food Australia, 57 151-155 (2005) [C1] 2005 Srikaeo K, Furst JE, Hosken RW, Ashton JF, 'Physical properties of cooked wheat grains as affected by cooking temperature and duration', International Journal of Food Properties, 8 469-479 (2005) [C1] DOI 10.1080/10942910500267547 Scopus - 4Web of Science - 3 2004 McNeill CR, Frohne H, Holdsworth JL, Furst JE, King BV, Dastoor PC, 'Direct photocurrent mapping of organic solar cells using a near-field scanning optical microscope', NANO LETTERS, 4 219-223 (2004) [C1] DOI 10.1021/nl0349496 Scopus - 55Web of Science - 52 Paul Dastoor, John Holdsworth, Bruce King 2002 Ellem GK, Furst JE, Zimmerman KD, 'Shell clamping behaviour in the limpet Cellana tramoserica', The Journal of Experimental Biology, 205 539-547 (2002) [C1] Citations Scopus - 17Web of Science - 13 1997 Yu DH, Hayes PA, Williams JF, Furst JE, 'Polarised electron study of angular momentum coupling in the neon 3p states', AUSTRALIAN JOURNAL OF PHYSICS, 50 639-644 (1997) 1997 Yu DH, Hayes PA, Furst JE, Williams JF, 'Spin-orbit and exchange effects in neon 3p states', Physical Review Letters, 78 2724-2727 (1997) [C1] Citations Scopus - 10Web of Science - 12 1997 Yu DH, Hayes PA, Williams JF, Furst JE, 'Spin-polarized electron excitation of neon 3p(J=1) states', Journal of Physics B: Atomic Molecular and Optical Physics, 30 1799-1812 (1997) [C1] Citations Scopus - 22Web of Science - 25 1996 Furst JE, Yu DH, Hayes PA, Dsouza CM, Williams JF, 'Liquid crystal variable retarders in atomic scattering', Review of Scientific Instruments, 67 3813-3817 (1996) [C1] Citations Scopus - 11Web of Science - 11 1996 Hayes PA, Yu DH, Furst J, Donath M, Williams JF, 'Excitation of He 3(3)P and Ne 3p states by polarized electrons', JOURNAL OF PHYSICS B-ATOMIC MOLECULAR AND OPTICAL PHYSICS, 29 3989-4000 (1996) DOI 10.1088/0953-4075/29/17/019 Scopus - 24Web of Science - 26 1996 Gay TJ, Furst JE, Trantham KW, Wjayaratna W, 'Optical electron polarimetry with heavy noble gases', Physical Review A: Atomic Molecular and Optical Physics, 53 1623-1629 (1996) [C1] Citations Scopus - 27Web of Science - 27 1995 WANG JB, WILLIAMS JF, STELBOVICS AT, FURST JE, MADISON DH, 'COHERENT EXCITATION OF THE SINGLET-TRIPLET MIXED 1S4F STATE OF HELIUM', PHYSICAL REVIEW A, 52 2885-2900 (1995) DOI 10.1103/PhysRevA.52.2885 Scopus - 9Web of Science - 11 1994 GAY TJ, BRAND JA, FRITTS MC, FURST JE, KHAKOO MA, MELL ER, et al., 'CLEAN ULTRAHIGH-VACUUM SYSTEM WITH SINGLE-STRUCTURE DIFFUSION PUMPS', JOURNAL OF VACUUM SCIENCE & TECHNOLOGY A-VACUUM SURFACES AND FILMS, 12 2903-2910 (1994) DOI 10.1116/1.578963 Web of Science - 1 1993 FURST JE, WIJAYARATNA WMKP, MADISON DH, GAY TJ, 'INVESTIGATION OF SPIN-ORBIT EFFECTS IN THE EXCITATION OF NOBLE-GASES BY SPIN-POLARIZED ELECTRONS', PHYSICAL REVIEW A, 47 3775-3787 (1993) DOI 10.1103/PhysRevA.47.3775 Scopus - 48Web of Science - 48 1992 GAY TJ, KHAKOO MA, BRAND JA, FURST JE, MEYER WV, WIJAYARATNA WMKP, DUNNING FB, 'EXTRAPOLATION PROCEDURES IN MOTT ELECTRON POLARIMETRY', REVIEW OF SCIENTIFIC INSTRUMENTS, 63 114-130 (1992) DOI 10.1063/1.1143118 Scopus - 38Web of Science - 43 1992 BRAND JA, FURST JE, GAY TJ, SCHEARER LD, 'PRODUCTION OF A HIGH-DENSITY STATE-SELECTED METASTABLE NEON BEAM', REVIEW OF SCIENTIFIC INSTRUMENTS, 63 163-165 (1992) DOI 10.1063/1.1143000 Scopus - 12Web of Science - 9 1992 GAY TJ, FURST JE, GEESMANN H, KHAKOO MA, MADISON DH, WIJAYARATNA WMKP, BARTSCHAT K, 'OPTICAL STUDIES OF POLARIZED-ELECTRON NOBLE-GAS COLLISIONS', INSTITUTE OF PHYSICS CONFERENCE SERIES, 265-274 (1992) 1992 FURST JE, GAY TJ, WIJAYARATNA WMKP, BARTSCHAT K, GEESMANN H, KHAKOO MA, MADISON DH, 'AN ATTEMPT TO OBSERVE MOTT SCATTERING OPTICALLY', JOURNAL OF PHYSICS B-ATOMIC MOLECULAR AND OPTICAL PHYSICS, 25 1089-1096 (1992) DOI 10.1088/0953-4075/25/5/019 Scopus - 15Web of Science - 15 1989 KALIAPERUMAL R, SEARS REJ, NI QW, FURST JE, 'PROTON CHEMICAL-SHIFTS IN SOME HYDROGEN-BONDED SOLIDS AND A CORRELATION WITH BOND LENGTHS', JOURNAL OF CHEMICAL PHYSICS, 91 7387-7391 (1989) DOI 10.1063/1.457262 Web of Science - 24 1989 FURST JE, GOLDEN DE, MAHGEREFTEH M, ZHOU JX, MUELLER D, 'ABSOLUTE LOW-ENERGY E--AR SCATTERING CROSS-SECTIONS', PHYSICAL REVIEW A, 40 5592-5600 (1989) DOI 10.1103/PhysRevA.40.5592 Web of Science - 41 1988 TEUBNER PJO, RILEY JL, BRUNGER MJ, FURST JE, 'ANGULAR-CORRELATIONS IN THE ELECTRON-IMPACT EXCITATION OF SODIUM', PHYSICAL REVIEW A, 37 1476-1478 (1988) DOI 10.1103/PhysRevA.37.1476 Scopus - 3Web of Science - 3 1985 TEUBNER PJO, RILEY JL, FURST JE, BUCKMAN SJ, 'ON THE COHERENCE OF THE EXCITATION OF THE P-32 STATE IN SODIUM BY 100 EV ELECTRONS', JOURNAL OF PHYSICS B-ATOMIC MOLECULAR AND OPTICAL PHYSICS, 18 351-367 (1985) DOI 10.1088/0022-3700/18/2/021 Scopus - 12Web of Science - 11 1985 TEUBNER PJO, RILEY JL, TONKIN MC, FURST JE, BUCKMAN SJ, 'TOTAL CROSS-SECTIONS FOR THE PRODUCTION OF METASTABLE NEON ATOMS BY ELECTRON-IMPACT', JOURNAL OF PHYSICS B-ATOMIC MOLECULAR AND OPTICAL PHYSICS, 18 3641-3652 (1985) DOI 10.1088/0022-3700/18/17/023 Scopus - 13Web of Science - 16 1984 GOLDEN DE, FURST J, MAHGEREFTEH M, 'ABSOLUTE ELASTIC ELECTRON-HELIUM SCATTERING CROSS-SECTION MEASUREMENTS FROM 2 TO 19 EV', PHYSICAL REVIEW A, 30 1247-1254 (1984) DOI 10.1103/PhysRevA.30.1247 Web of Science - 17 1984 FURST J, MAHGEREFTEH M, GOLDEN DE, 'ABSOLUTE TOTAL ELECTRONICALLY ELASTIC DIFFERENTIAL EPSILON-(-)H-2 SCATTERING CROSS-SECTION MEASUREMENTS FROM 1 TO 19 EV', PHYSICAL REVIEW A, 30 2256-2260 (1984) DOI 10.1103/PhysRevA.30.2256 Web of Science - 17 1982 TEUBNER PJO, FURST JE, RILEY JL, 'ELECTRON PHOTON COINCIDENCE STUDIES IN SODIUM', AUSTRALIAN JOURNAL OF PHYSICS, 35 501-511 (1982) Citations Web of Science - 3 1981 TEUBNER PJO, FURST JE, TONKIN MC, BUCKMAN SJ, 'ZERO-FIELD QUANTUM BEATS IN THE ELECTRON-IMPACT EXCITATION OF SODIUM', PHYSICAL REVIEW LETTERS, 46 1569-1571 (1981) DOI 10.1103/PhysRevLett.46.1569 Scopus - 10Web of Science - 10 Show 41 more journal articles ### Conference (30 outputs) 2016 Beckett EL, Duesing K, Martin C, Jones P, Furst J, King K, et al., 'Plasma calcidiol and serum folate levels independently predict the methylation status of the vitamin D receptor gene CpG island' (2016) Co-authors Katrina King, Martin Veysey, Zoe Yates, Mark Lucock, Emma Beckett 2016 Beckett EL, Duesing K, Martin C, Jones P, Furst J, King K, et al., 'Plasma 25-hydroxycholecalciferol and serum folate levels are independent predictors of the methylation status of the vitamin D receptor gene' (2016) Co-authors Martin Veysey, Katrina King, Emma Beckett, Mark Lucock, Zoe Yates 2015 Furst J, Holdsworth JL, Gladys M, Dastoor P, King BV, 'First Year Physics at Newcastle: A Longitudinal Study', Proceedings of the Australian Conference on Science and Mathematics Education (2015) [E3] Co-authors Paul Dastoor, Michael Gladys, John Holdsworth, Bruce King 2014 Holdsworth JL, Furst J, Gladys M, King BV, 'OUTCOMES OF ALTERED TEACHING STRATEGY IN FIRST YEAR PHYSICS INSTRUCTION', ACSME Proceedings (2014) [E3] Co-authors Michael Gladys, Bruce King, John Holdsworth 2014 Gladys M, Furst J, Holdsworth JL, Dastoor P, King B, 'Do Students Need Face-to-face Teaching?', ACSME Proceedings (2014) [E3] Co-authors Bruce King, Michael Gladys, Paul Dastoor, John Holdsworth 2011 Sharafutdinova G, Cvetanovski M, Walton D, Guest B, Foster J, Piszczuk G, Furst JE, 'Remote laboratory for interactive learning', Technet 2011 National Conference Handbook (2011) [E3] Co-authors Galiya Sharafutdinova 2008 Machacek JR, Bozek JD, Furst JE, Gay TJ, Gould H, Kilcoyne ALD, McLaughlin KW, 'Production of excited atomic hydrogen and deuterium from HD photodissociation', Bulletin of the American Physical Society (2008) [E3] 2007 Furst JE, Gay TJ, Gould H, Kilcoyne ALD, Machacek JR, McLaughlin KW, 'Ionization and dissociation of N2 from 17.5 to 36.5 eV by linearly and circularly polarized light', 38th Annual Meeting of the Division of Atomic, Molecular, and Optical Physics. Abstracts (2007) [E3] 2007 Gay TJ, Bozek JD, Furst JE, Gould H, Kilcoyne ALD, Machacek JR, et al., 'Production of excited atomic hydrogen and deuterium from H2 and D2 photodissociation', 38th Annual Meeting of the Division of Atomic, Molecular, and Optical Physics. Abstracts (2007) [E3] 2006 Gay TJ, Machacek JR, Furst JE, Kilcoyne ALD, Bozek JD, Gould H, et al., 'H\alpha, H\beta, and Ly\alpha flourescence from the photodissociation of H_{2} and D_{2}', 37th Meeting of the Division of Atomic, Molecular and Optical Physics (2006) [E3] 2006 Machacek JR, Gay TJ, Furst JE, Kilcoyne ALD, Bozek JD, Gould H, et al., 'N2 flourescence in photoionization of N2 by 16.3-150 eV photons', 37th Meeting of the Division of Atomic, Molecular and Optical Physics (2006) [E3] 2006 Gay TJ, Bozek JD, Furst JE, Gallup GA, Green AS, Kilcoyne ALD, et al., 'Angular Momentum Partitioning in the Dissociation of Diatomic Molecules', Ionization, Correlation, and Polarization in Atomic Collisions: International Symposium on (e,2e), Double Photoionization, and Related Topics (2006) [E1] 2005 Srikaeo K, Furst JE, Ashton JF, Hosken RW, 'Flavour Volatiles Generated in Wheat Grain Cooking Process', 55th Australian Cereal Chemistry Conference and Pacific Rim Symposium - Connect 2005 (2005) [E3] 2005 McLaughlin K, Yenen O, Jaecks D, Gay T, Machacek J, Furst JE, 'A Complete Relativistic Determination of Photoelectron Partial Wave Probabilities by Polarization Analysis of the Fluorescence from an Excited Argon Photoion', 2005 36th Meeting of the American Physical Society's Division of Atomic, Molecular and Optical Physics - online abstracts (2005) [E3] 2005 Gay TJ, Furst JE, Jaecks DH, Machacek J, McLaughlin KW, Yenen O, 'Circularly-Polarized Photo-Fragmentation of Molecular Hydrogen', 2005 36th Meeting of the American Physical Society's Division of Atomic, Molecular and Optical Physics - online abstracts (2005) [E3] 2005 Maseberg JW, Furst JE, Gay TJ, 'Polarized Fluorescence from Nitrogen Molecules Excited by Polarized Electron Impact', 2005 36th Meeting of the American Physical Society's Division of Atomic, Molecular and Optical Physics - online abstracts (2005) [E3] 2005 Srikaeo K, Furst JE, Ashton JF, Hosken RW, 'Investigation of starch gelatinization in wheat grain cooking process', 2005 IFT Annual Meeting + Food Expo - online abstracts (2005) [E3] 2005 Srikaeo K, Furst JE, Ashton JF, Hosken RW, 'Effects of cooking temperatures and times on cooked wheat properties', 2nd International Conference on Innovations in Food Processing Technology and Engineering - online abstracts (2005) [E3] 2005 Machacek JR, Furst JE, Jaecks DH, McLaughlin KW, Yenen O, Gay TJ, 'First observation of angular momentum orientation transfer in photodissociation of H2', XXIV ICPEAC Conference Program (2005) [E3] 2005 Srikaeo K, Furst JE, Ashton JF, Hosken RW, 'Effects of wheat protein content and processing conditions on physical properties of cooked wheat grains', passion 4 food - skills for success (2005) [E3] 2004 Srikaeo K, Furst JE, Ashton JF, Hosken RW, 'Application of a risk assessment approach for wheat grain cooking process', First Announcement & Call for Papers (2004) [E3] 2004 Srikaeo K, Furst JE, Ashton JF, Hosken RW, 'Microscopy and image analysis of cooked wheat grains', Program & Abstracts (2004) [E3] 2003 Furst JE, McNeill CR, Clifton-Smith M, Holdsworth J, King BV, Dastoor PC, 'Optical and electrical characteristics of phenyl-capped oligothiopher', Proceedings (2003) [E2] Co-authors Paul Dastoor, Bruce King 2003 Williams AP, Furst JE, Cleary K, Dastoor PC, 'An Integrated Learning Module: Teaching Physics to Engineers', IEE (2003) [E1] Co-authors Paul Dastoor 2002 Furst JE, Dastoor P, Cleary K, Williams A, 'Integrated Learning: a New Strategy for teaching Physics to Scientists and Engineers', Australian Institute of Sydney, Physics 15th Biennial Congress 2002 (2002) [E1] 2000 Ellem GK, Furst JE, Zimmerman KD, 'Limpet behavioural strategies in resisting wave action', Australian Marine Sciences Association Inc: Annual Conference (2000) [E3] 1993 GAY TJ, FURST JE, WIJAYARATNA WMKP, 'SOME NEW DEVELOPMENTS IN POLARIZED ELECTRON SCIENCE AND TECHNOLOGY', PHYSICS OF ELECTRONIC AND ATOMIC COLLISIONS (1993) 1993 MUELLER DW, CHALAMALA BR, FURST JE, NEWELL CT, PAK SM, SMITH RS, et al., 'ION ACCELERATORS AND ELECTRON-SCATTERING', NUCLEAR INSTRUMENTS & METHODS IN PHYSICS RESEARCH SECTION B-BEAM INTERACTIONS WITH MATERIALS AND ATOMS (1993) DOI 10.1016/0168-583X(93)95292-D 1992 GAY TJ, FURST JE, GEESMANN H, KHAKOO MA, MADISON DH, WIJAYARATNA WMKP, BARTSCHAT K, 'OPTICAL STUDIES OF POLARIZED-ELECTRON NOBLE-GAS COLLISIONS', CORRELATIONS AND POLARIZATION IN ELECTRONIC AND ATOMIC COLLISIONS AND ( E,2E ) REACTIONS (1992) 1991 ZHOU JX, GOLDEN DE, FURST JE, MUELLER DW, 'A HIGH-ENERGY RESOLUTION MODULATED GAAS PHOTOELECTRON SOURCE', NUCLEAR INSTRUMENTS & METHODS IN PHYSICS RESEARCH SECTION B-BEAM INTERACTIONS WITH MATERIALS AND ATOMS (1991) DOI 10.1016/0168-583X(91)95123-U Web of Science - 3 Show 27 more conferences ### Other (1 outputs) 2014 Furst J, Gladys M, Holdsworth JL, 'The impact of on-line e-learning systems and student engagement', ( pp.1-1). Canberra: ANU (2014) [O1] Edit Number of grants 10 $67,501 Click on a grant title below to expand the full details for that specific grant. ## 20071 grants /$2,500 ### A new approach to quality control in the macadamia industry.$8,000 Funding body: University of Newcastle Funding body University of Newcastle Doctor John Furst Project Grant Lead 2000 2000 G0178983 Internal INTE Y ### Atomic and Molecular Physics and Quantum Chemistry meeting of the Australian Institute of Physics Congress, Adelaide, 12-15 December 2000$410 Funding body: University of Newcastle Funding body University of Newcastle Doctor John Furst Travel Grant Lead 2000 2000 G0180569 Internal INTE Y ## 19981 grants / $10,000 ### Application of the near infra-red spectroscopy (NIR) as a predictor of marbling in the beef export industry.$10,000 Funding body: University of Newcastle Funding body University of Newcastle Dr Fa'Ale Tumaalii, Doctor John Furst Project Grant Investigator 1998 1998 G0177222 Internal INTE Y ## 19972 grants / $22,400 ### Use of near-infrared relectance spectrometry and optical imaging systems as a rapid method of quality grading macadamia nuts.$12,000 Funding body: ARC (Australian Research Council) Funding body ARC (Australian Research Council) Emeritus Professor Ronald Wills, Doctor John Furst Small Grant Investigator 1997 1997 G0176778 Scheme excluded from IGS EXCL Y ### Medium Energy Ion Scattering from Caesium: A study of the ionisation of caesium by He+\$13,000 Funding body: University of Newcastle Funding body University of Newcastle Doctor John Furst New Staff Grant Lead 1996 1996 G0176115 Internal INTE Y Edit ## Research Supervision Completed2 Current1 PhD0.2 ### Current Supervision Commenced Level of Study Research Title Program Supervisor Type 2013 PhD High Pressure Processing of Barramundi Fish (Lates Calcarifer) PhD (Food Science), Faculty of Science, The University of Newcastle Co-Supervisor ### Past Supervision Year Level of Study Research Title Program Supervisor Type 2006 PhD Variability in Wheat Biscuits and the Influence of Wheat Protein Content, Cooking Temperature and Time on Cooked Wheat Properties PhD (Food Science), Faculty of Science, The University of Newcastle Principal Supervisor 2002 PhD On the BIomechanics, ecology and evolution of Limpet Shell Design Marine Science, University of Newcastle Co-Supervisor Edit ## Dr John Furst ### Position Senior Lecturer School of Mathematical and Physical Sciences Faculty of Science Physics ### Contact Details Email john.furst@newcastle.edu.au (02) 4348 4117 (02) 4348 4145 ### Office Room E121 (Ourimbah)/P108 (Callaghan) Sciences Offices. Ourimbah10 Chittaway RoadOurimbah, NSW 2258Australia Edit
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.2977125346660614, "perplexity": 14734.339398573435}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-43/segments/1508187824820.28/warc/CC-MAIN-20171021152723-20171021172723-00276.warc.gz"}
https://xianblog.wordpress.com/tag/summary-statistics/
## not Bayesian enough?! Posted in Books, Statistics, University life with tags , , , , , , , on January 23, 2015 by xi'an Our random forest paper was alas rejected last week. Alas because I think the approach is a significant advance in ABC methodology when implemented for model choice, avoiding the delicate selection of summary statistics and the report of shaky posterior probability approximation. Alas also because the referees somewhat missed the point, apparently perceiving random forests as a way to project a large collection of summary statistics on a limited dimensional vector as in the Read Paper of Paul Fearnhead and Dennis Prarngle, while the central point in using random forests is the avoidance of a selection or projection of summary statistics.  They also dismissed ou approach based on the argument that the reduction in error rate brought by random forests over LDA or standard (k-nn) ABC is “marginal”, which indicates a degree of misunderstanding of what the classification error stand for in machine learning: the maximum possible gain in supervised learning with a large number of classes cannot be brought arbitrarily close to zero. Last but not least, the referees did not appreciate why we mostly cannot trust posterior probabilities produced by ABC model choice and hence why the posterior error loss is a valuable and almost inevitable machine learning alternative, dismissing the posterior expected loss as being not Bayesian enough (or at all), for “averaging over hypothetical datasets” (which is a replicate of Jeffreys‘ famous criticism of p-values)! Certainly a first time for me to be rejected based on this argument! ## ABC with emulators Posted in Books, Statistics with tags , , , , , , , on January 9, 2015 by xi'an A paper on the comparison of emulation methods for Approximate Bayesian Computation was recently arXived by Jabot et al. The idea is to bypass costly simulations of pseudo-data by running cheaper simulation from a pseudo-model or emulator constructed via a preliminary run of the original and costly model. To borrow from the paper introduction, ABC-Emulation runs as follows: 1. design a small number n of parameter values covering the parameter space; 2. generate n corresponding realisations from the model and store the corresponding summary statistics; 3. build an emulator (model) based on those n values; 4. run ABC using the emulator in lieu of the original model. A first emulator proposed in the paper is to use local regression, as in Beaumont et al. (2002), except that it goes the reverse way: the regression model predicts a summary statistics given the parameter value. The second and last emulator relies on Gaussian processes, as in Richard Wilkinson‘s as well as Ted Meeds’s and Max Welling‘s recent work [also quoted in the paper]. The comparison of the above emulators is based on an ecological community dynamics model. The results are that the stochastic version is superior to the deterministic one, but overall not very useful when implementing the Beaumont et al. (2002) correction. The paper however does not define what deterministic and what stochastic mean… “We therefore recommend the use of local regressions instead of Gaussian processes.” While I find the conclusions of the paper somewhat over-optimistic given the range of the experiment and the limitations of the emulator options (like non-parametric conditional density estimation), it seems to me that this is a direction to be pursued as we need to be able to simulate directly a vector of summary statistics instead of the entire data process, even when considering an approximation to the distribution of those summaries. ## an ABC experiment Posted in Books, pictures, R, Statistics, University life with tags , , , , , , , , on November 24, 2014 by xi'an In a cross-validated forum exchange, I used the code below to illustrate the working of an ABC algorithm: #normal data with 100 observations n=100 x=rnorm(n) #observed summaries #normal x gamma prior priori=function(N){ return(cbind(rnorm(N,sd=10), 1/sqrt(rgamma(N,shape=2,scale=5)))) } ABC=function(N,alpha=.05){ prior=priori(N) #reference table #pseudo-data summ=matrix(0,N,2) for (i in 1:N){ xi=rnorm(n)*prior[i,2]+prior[i,1] } #normalisation factor for the distance #distance #selection posterior=prior[dist<quantile(dist,alpha),]} Hence I used the median and the mad as my summary statistics. And the outcome is rather surprising, for two reasons: the first one is that the posterior on the mean μ is much wider than when using the mean and the variance as summary statistics. This is not completely surprising in that the latter are sufficient, while the former are not. Still, the (-10,10) range on the mean is way larger… The second reason for surprise is that the true posterior distribution cannot be derived since the joint density of med and mad is unavailable. After thinking about this for a while, I went back to my workbench to check the difference with using mean and variance. To my greater surprise, I found hardly any difference! Using the almost exact ABC with 10⁶ simulations and a 5% subsampling rate returns exactly the same outcome. (The first row above is for the sufficient statistics (mean,standard deviation) while the second row is for the (median,mad) pair.) Playing with the distance does not help. The genuine posterior output is quite different, as exposed on the last row of the above, using a basic Gibbs sampler since the posterior is not truly conjugate. ## Sequentially Constrained Monte Carlo Posted in Books, Mountains, pictures, Statistics, University life with tags , , , , , , , , , , on November 7, 2014 by xi'an This newly arXived paper by S. Golchi and D. Campbell from Vancouver (hence the above picture) considers the (quite) interesting problem of simulating from a target distribution defined by a constraint. This is a question that have bothered me for a long while as I could not come up with a satisfactory solution all those years… Namely, when considering a hard constraint on a density, how can we find a sequence of targets that end up with the restricted density? This is of course connected with the zero measure case posted a few months ago. For instance, how do we efficiently simulate a sample from a Student’s t distribution with a fixed sample mean and a fixed sample variance? “The key component of SMC is the filtering sequence of distributions through which the particles evolve towards the target distribution.” (p.3) This is indeed the main issue! The paper considers using a sequence of intermediate targets hardening progressively the constraint(s), along with an SMC sampler, but this recommendation remains rather vague and hence I am at loss as to how to make it work when the exact constraint implies a change of measure. The first example is monotone regression where y has mean f(x) and f is monotone. (Everything is unidimensional here.) The sequence is then defined by adding a multiplicative term that is a function of ∂f/∂x, for instance Φ(τ∂f/∂x), with τ growing to infinity to make the constraint moving from soft to hard. An interesting introduction, even though the hard constraint does not imply a change of parameter space or of measure. The second example is about estimating the parameters of an ODE, with the constraint being the ODE being satisfied exactly. Again, not exactly what I was looking for. But with an exotic application to deaths from the 1666 Black (Death) plague. And then the third example is about ABC and the choice of summary statistics! The sequence of constraints is designed to keep observed and simulated summary statistics close enough when the dimension of those summaries increases, which means they are considered simultaneously rather than jointly. (In the sense of Ratmann et al., 2009. That is, with a multidimensional distance.) The model used for the application of the SMC is the dynamic model of Wood (2010, Nature). The outcome of this specific implementation is not that clear compared with alternatives… And again sadly does not deal with the/my zero measure issue. ## Relevant statistics for Bayesian model choice [hot off the press!] Posted in Books, Statistics, University life with tags , , , , , , on October 30, 2014 by xi'an Our paper about evaluating statistics used for ABC model choice has just appeared in Series B! It somewhat paradoxical that it comes out just a few days after we submitted our paper on using random forests for Bayesian model choice, thus bypassing the need for selecting those summary statistics by incorporating all statistics available and letting the trees automatically rank those statistics in term of their discriminating power. Nonetheless, this paper remains an exciting piece of work (!) as it addresses the more general and pressing question of the validity of running a Bayesian analysis with only part of the information contained in the data. Quite usefull in my (biased) opinion when considering the emergence of approximate inference already discussed on this ‘Og… [As a trivial aside, I had first used fresh from the press(es) as the bracketted comment, before I realised the meaning was not necessarily the same in English and in French.] ## reliable ABC model choice via random forests Posted in pictures, R, Statistics, University life with tags , , , , , , , on October 29, 2014 by xi'an After a somewhat prolonged labour (!), we have at last completed our paper on ABC model choice with random forests and submitted it to PNAS for possible publication. While the paper is entirely methodological, the primary domain of application of ABC model choice methods remains population genetics and the diffusion of this new methodology to the users is thus more likely via a media like PNAS than via a machine learning or statistics journal. When compared with our recent update of the arXived paper, there is not much different in contents, as it is mostly an issue of fitting the PNAS publication canons. (Which makes the paper less readable in the posted version [in my opinion!] as it needs to fit the main document within the compulsory six pages, relegated part of the experiments and of the explanations to the Supplementary Information section.) ## insufficient statistics for ABC model choice Posted in Books, Kids, Statistics, University life with tags , , , , , , , , , , , on October 17, 2014 by xi'an [Here is a revised version of my comments on the paper by Julien Stoehr, Pierre Pudlo, and Lionel Cucala, now to appear [both paper and comments] in Statistics and Computing special MCMSki 4 issue.] Approximate Bayesian computation techniques are 2000’s successors of MCMC methods as handling new models where MCMC algorithms are at a loss, in the same way the latter were able in the 1990’s to cover models that regular Monte Carlo approaches could not reach. While they first sounded like “quick-and-dirty” solutions, only to be considered until more elaborate solutions could (not) be found, they have been progressively incorporated within the statistican’s toolbox as a novel form of non-parametric inference handling partly defined models. A statistically relevant feature of those ACB methods is that they require replacing the data with smaller dimension summaries or statistics, because of the complexity of the former. In almost every case when calling ABC is the unique solution, those summaries are not sufficient and the method thus implies a loss of statistical information, at least at a formal level since relying on the raw data is out of question. This forced reduction of statistical information raises many relevant questions, from the choice of summary statistics to the consistency of the ensuing inference. In this paper of the special MCMSki 4 issue of Statistics and Computing, Stoehr et al. attack the recurrent problem of selecting summary statistics for ABC in a hidden Markov random field, since there is no fixed dimension sufficient statistics in that case. The paper provides a very broad overview of the issues and difficulties related with ABC model choice, which has been the focus of some advanced research only for a few years. Most interestingly, the authors define a novel, local, and somewhat Bayesian misclassification rate, an error that is conditional on the observed value and derived from the ABC reference table. It is the posterior predictive error rate $\mathbb{P}^{\text{ABC}}(\hat{m}(y^{\text{obs}})\ne m|S(y^{\text{obs}}))$ integrating in both the model index m and the corresponding random variable Y (and the hidden intermediary parameter) given the observation. Or rather given the transform of the observation by the summary statistic S. The authors even go further to define the error rate of a classification rule based on a first (collection of) statistic, conditional on a second (collection of) statistic (see Definition 1). A notion rather delicate to validate on a fully Bayesian basis. And they advocate the substitution of the unreliable (estimates of the) posterior probabilities by this local error rate, estimated by traditional non-parametric kernel methods. Methods that are calibrated by cross-validation. Given a reference summary statistic, this perspective leads (at least in theory) to select the optimal summary statistic as the one leading to the minimal local error rate. Besides its application to hidden Markov random fields, which is of interest per se, this paper thus opens a new vista on calibrating ABC methods and evaluating their true performances conditional on the actual data. (The advocated abandonment of the posterior probabilities could almost justify the denomination of a paradigm shift. This is also the approach advocated in our random forest paper.)
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 1, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8730115294456482, "perplexity": 841.3805207601363}, "config": {"markdown_headings": true, "markdown_code": false, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2015-14/segments/1427131302318.88/warc/CC-MAIN-20150323172142-00083-ip-10-168-14-71.ec2.internal.warc.gz"}
https://www.physicsforums.com/threads/paulis-wave-mechanics-text-h-vs-hbar.280077/
# Pauli's Wave Mechanics text. h vs. hbar 1. Dec 16, 2008 ### Peeter In this little Dover book "Wave mechanics", by Pauli, it appears to use h for hbar, and includes a footnote right on the very first page "1. In these lectures we use the symbol h to denote the quanity 1.05 x 10^-34 joule.sec. In the older literature this quantity was usually denoted by $\hbar$" However, this is what I've seen in the newer literature too. Are the definitions of these constants in a state of flux? 2. Dec 16, 2008 ### f95toli No, $$\hbar$$ should always represent Planck's constant divided by 2 pi in modern text. I don't think I've ever encountered an example where someone has specifically used $$h$$ instead of $$\hbar$$ or vice versa. There are, however, examples of texts where the author is ex. ignoring factors of 2pi etc simply because they are interested in order-of-magnitude estimates and factors of the order of one does one matter. Also, note that formulas where CGS electromagnetic units are used often differ by a factor of 2pi compared to the equivalent formulas in SI (and there are plenty of examples of papers where the authors have added/removed a 2pi too many). 3. Dec 16, 2008 ### dlgoff Last edited by a moderator: Apr 24, 2017 Similar Discussions: Pauli's Wave Mechanics text. h vs. hbar
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9760199189186096, "perplexity": 1341.2213094688511}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-09/segments/1518891813088.82/warc/CC-MAIN-20180220185145-20180220205145-00664.warc.gz"}
http://www.computer.org/csdl/trans/tg/2003/01/v0003-abs.html
Subscribe Issue No.01 - January-March (2003 vol.9) pp: 3-15 ABSTRACT <p><b>Abstract</b>—We advocate the use of point sets to represent shapes. We provide a definition of a smooth manifold surface from a set of points close to the original surface. The definition is based on local maps from differential geometry, which are approximated by the method of moving least squares (MLS). The computation of points on the surface is local, which results in an out-of-core technique that can handle any point set. We show that the approximation error is bounded and present tools to increase or decrease the density of the points, thus allowing an adjustment of the spacing among the points to control the error. To display the point set surface, we introduce a novel point rendering technique. The idea is to evaluate the local maps according to the image resolution. This results in high quality shading effects and smooth silhouettes at interactive frame rates.</p> INDEX TERMS Surface representation and reconstruction, moving least squares, point sample rendering, 3D acquisition. CITATION Marc Alexa, Johannes Behr, Daniel Cohen-Or, Shachar Fleishman, David Levin, Claudio T. Silva, "Computing and Rendering Point Set Surfaces", IEEE Transactions on Visualization & Computer Graphics, vol.9, no. 1, pp. 3-15, January-March 2003, doi:10.1109/TVCG.2003.1175093
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8047665357589722, "perplexity": 1289.0000073305328}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2015-35/segments/1440645208021.65/warc/CC-MAIN-20150827031328-00333-ip-10-171-96-226.ec2.internal.warc.gz"}
http://www.numdam.org/item/M2AN_2014__48_4_1029_0/
High order semi-lagrangian particle methods for transport equations: numerical analysis and implementation issues ESAIM: Mathematical Modelling and Numerical Analysis - Modélisation Mathématique et Analyse Numérique, Volume 48 (2014) no. 4, p. 1029-1060 This paper is devoted to the definition, analysis and implementation of semi-Lagrangian methods as they result from particle methods combined with remeshing. We give a complete consistency analysis of these methods, based on the regularity and momentum properties of the remeshing kernels, and a stability analysis of a large class of second and fourth order methods. This analysis is supplemented by numerical illustrations. We also describe a general approach to implement these methods in the context of hybrid computing and investigate their performance on GPU processors as a function of their order of accuracy. DOI : https://doi.org/10.1051/m2an/2014009 Classification:  65M12,  65M75,  65Y05,  65Y20 Keywords: advection equations, particle methods, semi-lagrangian methods, GPU computing @article{M2AN_2014__48_4_1029_0, author = {Cottet, G.-H. and Etancelin, J.-M. and Perignon, F. and Picard, C.}, title = {High order semi-lagrangian particle methods for transport equations: numerical analysis and implementation issues}, journal = {ESAIM: Mathematical Modelling and Numerical Analysis - Mod\'elisation Math\'ematique et Analyse Num\'erique}, publisher = {EDP-Sciences}, volume = {48}, number = {4}, year = {2014}, pages = {1029-1060}, doi = {10.1051/m2an/2014009}, mrnumber = {3264345}, language = {en}, url = {http://www.numdam.org/item/M2AN_2014__48_4_1029_0} } Cottet, G.-H.; Etancelin, J.-M.; Perignon, F.; Picard, C. High order semi-lagrangian particle methods for transport equations: numerical analysis and implementation issues. ESAIM: Mathematical Modelling and Numerical Analysis - Modélisation Mathématique et Analyse Numérique, Volume 48 (2014) no. 4, pp. 1029-1060. doi : 10.1051/m2an/2014009. http://www.numdam.org/item/M2AN_2014__48_4_1029_0/ [1] M. Bergdorf, G.-H. Cottet and P. Koumoutsakos, Multilevel adaptive particle methods for convection-diffusion equations. SIAM Multiscale Model. Simul. 4 (2005) 328-357. | MR 2164720 | Zbl 1088.76055 [2] M. Bergdorf and P. Koumoutsakos, A lagrangian particle-wavelet method. SIAM Multiscale Model. Simul. 5 (2006) 980-995. | MR 2272307 | Zbl 1122.65085 [3] F. Büyükkeçeci, O. Awile and I. Sbalzarini, A portable opencl implementation of generic particle-mesh and mesh-particle interpolation in 2d and 3d. Parallel Comput. 39 (2013) 94-111. [4] A. Chorin, Numerical study of slightly viscous flow. J. Fluid Mech. 57 (1973) 785-796. | MR 395483 [5] C. Cocle, G. Winckelmans and G. Daeninck, Combining the vortex-in-cell and parallel fast multipole methods for efficient domain decomposition simulations. J. Comput. Phys. 227 (2008) 9091-9120. | MR 2463200 | Zbl pre05355893 [6] C. Cotter, J. Frank and S. Reich, The remapped particle-mesh semi-lagrangian advection scheme. Q. J. Meteorol. Soc. 133 (2007) 251-260. [7] G.-H. Cottet and P. Koumoutsakos, Vortex methods. Cambridge University Press (2000). | MR 1755095 | Zbl 0953.76001 [8] G.-H. Cottet and L. Weynans, Particle methods revisited: a class of high order finite-difference methods. C.R. Math. 343 (2006) 51-56. | MR 2241959 | Zbl 1096.65084 [9] N. Crouseilles, T. Respaud and E. Sonnendrücker, A forward semi-lagrangian method for the numerical solution of the vlasov equation. Comput. Phys. Commun. 180 (2009) 1730-1745. | MR 2678446 | Zbl 1197.82012 [10] R. Hockney and J. Eastwood, Simulation Using Particles. Inst. Phys. Publ. (1988). [11] A. Klöckner, N. Pinto, Y. Lee, B. Catanzaro, P. Ivanov and A. Fasih, PyCUDA and PyOpenCL: A Scripting-Based Approach to GPU Run-Time Code Generation. Parallel Comput. 38 (2012) 157-174. [12] P. Koumoutsakos, Inviscid axisymmetrization of an elliptical vortex. J. Comput. Phys. 138 (1997) 821-857. | MR 1607496 | Zbl 0902.76080 [13] P. Koumoutsakos and A. Leonard, High resolution simulation of the flow around an impulsively started cylinder using vortex methods. J. Fluid Mech. 296 (1995) 1-38. | Zbl 0849.76061 [14] S. Labbé, J. Laminie and V. Louvet, Méthodologie et environnement de développement orientés objets: de l'analyse mathématique à la programmation. MATAPLI 70 (2003) 79-92. [15] J.-B. Lagaert, G Balarac, and G.-H. Cottet, Hybrid spectral particle method for turbulent transport of passive scalar. J. Comput. Phys. 260 (2014) 127-142. | MR 3151833 [16] A. Leonard. Computing three-dimensional incompressible flows with vortex elements. Annu. Rev. Fluid Mech. 17 (1985) 523-559. | Zbl 0596.76026 [17] R.J. Leveque, High-resolution conservative algorithms for advection in incompressible flow. SIAM J. Numer. Anal. 33 (1996) 627-665. | MR 1388492 | Zbl 0852.76057 [18] A. Magni and G.-H. Cottet, Accurate, non-oscillatory, remeshing schemes for particle methods. J. Comput. Phys. 231 (2012) 152-172. | MR 2846992 | Zbl pre06044227 [19] J. Monaghan, Extrapolating B splines for interpolation. J. Comput. Phys. 60 (1985) 253-262. | MR 805872 | Zbl 0588.41005 [20] J. Monaghan, An introduction to sph. Comput. Phys. Commun. 48 (1988) 89-96. | Zbl 0673.76089 [21] A. Munshi, The OpenCL Specification. Khronos OpenCL Working Group (2011). [22] M. Ould-Salihi, G.-H. Cottet and M. El Hamraoui, Blending finite-difference and vortex methods for incompressible flow computations. SIAM J. Sci. Comput. 22 (2000) 1655-1674. | MR 1813291 | Zbl 0993.76057 [23] T. Respaud and E. Sonnendruücker, Analysis of a new class of forward semi-lagrangian schemes for the 1d Vlasov-Poisson equations. Numer. Math. 118 (2011) 329-366. | MR 2800712 | Zbl 1284.65145 [24] D. Rossinelli, M. Bergdorf, G.H. Cottet and P. Koumoutsakos, GPU accelerated simulations of bluff body flows using vortex methods. J. Comput. Phys. 229 (2010) 3316-3333. | MR 2601102 | Zbl pre05693261 [25] D. Rossinelli, C. Conti and P. Koumoutsakos, Mesh-particle interpolations on graphics processing units and multicorecentral processing units. Philosophical Transactions of the Royal Society A: Mathematical, Phys. Engrg. Sci. 369 (2011) 2164-2175. | MR 2795279 | Zbl 1223.68122 [26] D. Rossinelli and P. Koumoutsakos, Vortex methods for incompressible flow simulations on the GPU. Visual Comput. 24 (2008) 699-708. [27] G. Ruetsch and P. Micikevicius, Optimizing matrix transpose in cuda. NVIDIA CUDA SDK Application Note (2009). [28] I. Sbalzarini, J. Walther, M. Bergdorf, S. Hieber, E. Kotsalis and P. Koumoutsakos, PPM-a highly efficient parallel particle-mesh library for the simulation of continuum systems. J. Comput. Phys. 215 (2006) 566-588. | Zbl 1173.76398 [29] I. Schoenberg, Contribution to the problem of approximation of equidistant data by analytic functions. Q. Appl. Math. 4 (1946) 45-99. | MR 15914 | Zbl 0061.28804 [30] D. Valdez-Balderas, J. Dominguez, B. Rogers and A. Crespo, Towards accelerating smoothed particle hydrodynamics simulations for free-surface flows on multi-gpu clusters. J. Parallel Distrib. Comput. 73 (2012) 1483-1493. [31] F. De Vuyst and F. Salvarani, GPU-accelerated numerical simulations of the knudsen gas on time- dependent domains. Comput. Phys. Commun. 184 (2013) 532-536. | MR 3007037 | Zbl pre06381377 [32] R. Yokota, L. Barba, T. Narumi and K. Yasuoka, Petascale turbulence simulation using a highly parallel fast multipole method. Comput. Phys. Commun. 184 (2013) 445-455. | MR 3007029 [33] Y. Zhang, J. Cohen and J.D. Owens, Fast tridiagonal solvers on the GPU. SIGPLAN Not. 45 (2010) 127-136.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.22240181267261505, "perplexity": 9829.49788020802}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 5, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-10/segments/1581875144429.5/warc/CC-MAIN-20200219214816-20200220004816-00523.warc.gz"}
https://inference-review.com/article/the-hydrogen-bond
The general theory of quantum mechanics is now almost complete, the imperfections that still remain being in connection with the exact fitting in of the theory with relativity ideas. These give rise to difficulties only when high-speed particles are involved, and are therefore of no importance in the consideration of atomic and molecular structure and ordinary chemical reactions, in which it is, indeed, usually sufficiently accurate if one neglects relativity variation of mass with velocity and assumes only Coulomb forces between the various electrons and atomic nuclei. The underlying physical laws necessary for the mathematical theory of a large part of physics and the whole of chemistry are thus completely known… Paul Dirac1 The claim that chemistry has been completely explained in terms of quantum theory is now received wisdom among physicists and chemists. Yet quantum physics is able neither to predict nor explain the strong association of water molecules in liquid or ice. Quantum chemistry algorithms either exclude hydrogen bonded (H-bonded) systems, or treat them by modeling a water molecule as an asymmetric tetrahedron having two positive and two negative electrical charges at its vertices. Recent calculations of the potential energy surface of the simple water dimer {H2O}2 yield 30,000 ab initio energies at the CCSD(T) level.2 But free OH-stretches are below experimental values by 30-40cm-1 and their dissociation energy 1.1kJ·mol-1 below benchmark experimental values. To obtain satisfactory agreement with experiment, it is necessary to replace ab initio potentials with spectroscopically accurate measurements. This is hardly a ringing endorsement of the underlying theory. In this essay, I defend the thesis that the hydrogen bond is an emergent property of matter resulting from a non-linear coupling between quantified energy levels of water molecules and a quantified internal electromagnetic field. Such a coupling leads to the emission of Nambu–Goldstone gauge bosons yielding Bose–Einstein condensates at relatively high temperatures, thus forming liquid water or ice. These ideas have been developed by physicists at the Universities of Milan and Naples, students in fact, or in spirit, of Hiroomi Umezawa, one of the founders of quantum field theory (QFT).3 ## A Short History of the Hydrogen Bond In June 1611, a strong heat wave enveloped Tuscany; and with the heat proving stifling, Galileo Galilei decamped for the nearby villa of his friend Filippo Salviati.4 Since it was hot, Galileo determined to discuss things that were cold. His interlocutors were two professors from the University of Pisa, Vincenzo di Grazia and Giorgio Coresio. According to Aristotelian physics, ice, since it was obviously colder than water, should comprise water minus a certain amount of fire or heat. This led water to condense into a solid. It is in the nature of air or fire to move upwards, di Grazia and Coresio observed, no doubt persuaded that they were recounting the obvious, but in the nature of solids to move downwards. It follows that ice remained buoyant because of its large flat shape, which prevented water from penetrating its surface and dragging it down where it belonged. Galileo demurred. The correct analysis of ice and water should begin with the assumption that ice is less dense than water. It floats because it is less dense. Galileo considered ice as water with more by way of volume; Aristotelian scholars, as water with less by way of heat. The disagreement came to the ears of the philosopher Lodovico delle Colombe, who, having been mocked by Galileo a few years before, saw in the dispute an occasion for revenge. Delle Colombe set up a spectacular experiment in which he compared the behavior of spheres and wafers made of ebony, a substance known to be denser than water. Delle Colombe demonstrated that the spheres always sank when dropped in water; the wafers did not. This was, he supposed, proof that buoyancy was more a matter of shape than density. Stung by these experiments, Galileo began writing a fifteen-page essay in September 1611 dedicated to his mentor, Cosimo II de’ Medici. Galileo’s treatise is the very first book devoted to the subject of hydrogen bonding in water.5 Ice floats in water. True enough. Modern theories affirm that in ice, water molecules are associated by linear hydrogen bonds. On melting, these bonds begin, spaghetti-like, to bend. Bending causes the liquid to become denser than the solid between 0°C and 4°C. But above 4°C, radial expansion wins over angular bending; as with most other liquids, the higher the temperature, the lower the density. For all that, Galileo was still not able clearly to explain why ebony wafers float. The collapse of the Aristotelian conception of nature at the end of the eighteenth century marked a new era in the study of water. A clear distinction emerged between atoms, on the one hand, and molecules, on the other. In 1796, the French chemist Joseph Louis Proust, in affirming the law of definite proportions, suggested that compounds combine by weight and in simple proportions.6 Proust found that 1g of hydrogen would combine with 8g of oxygen, or 2g with 16g, or 4g with 32g, but never in fractional proportions. This observation was refined in 1804 by the English chemist John Dalton. Water, Dalton argued, is HO, where H designates a hydrogen atom, and O, an atom of oxygen.7 This was simple, clear-cut, and wrong. It was the Italian chemist Amedeo Avogadro, who in 1811 deduced that water is, in fact, H2O, and not HO. In order to obtain two volumes of water, Avogadro observed, it is necessary to mix two volumes of hydrogen with one volume of oxygen: 2H2 + O2 = 2H2O.8 Avogadro’s masterful analysis gave force to the idea that, among other things, atoms can form bonds.9 For all the success of the atomic theory of matter, it soon became obvious that something very strange was occurring when hydrogen combined with elements such as oxygen, nitrogen, or fluorine. Studies involving the liquefaction of gases indicated that the higher their molecular weight, the higher their boiling or melting point, and the higher their latent heat of vaporization or melting. This was clearly not the case for HF with respect to HCl-HBr-HI, or H2O with respect to H2S-H2Se-H2Te, or NH3 with respect to PH3-AsH3-SbH3.10 Various anomalies were noted with respect to congelation points and the vapor density curves of several liquid mixtures.11 Nor was it possible to explain on rational grounds the base constants of ammonia and its substituted amines.12 Very soon after the discovery of X-ray diffraction, it became obvious that in ice, water molecules were strongly associated in a clear tetrahedral crystalline structure, one characterized by large hexagonal channels in which various gases were trapped.13 The discovery of the HF2- ion in 1923,14 and the observation in 1925 of strong variations in the stretching frequency of the O-H bond15 (so-called stretch marks), provided clear evidence that chemical bonding was a more complex phenomenon than first thought. It was far more complex. During the first decades of the twentieth century, Wendell M. Latimer, Gilbert Newton Lewis, and Worth H. Rodebush, at the University of California, attempted to understand weak bonds in strong theoretical terms. They were, to a certain extent, forced to fumble. In May 1919, a graduate student at the University of California named Maurice L. Huggins conjectured that the hydrogen nucleus might be held in suspension between the octets of two other atoms. His teacher, William C. Bray, addressed him in words that were as assured as they were incorrect: Huggins, there are several interesting ideas in this paper, but there is one you’ll never get chemists to believe: the idea that a hydrogen atom can be bonded to two other atoms at the same time.16 The British chemist Nevil Sidgwick was apparently one of the believing chemists, arguing strongly in 1924 for the existence of intramolecular hydrogen bonds.17 He seems to have had as much luck as Huggins, the English chemist Henry Edward Armstrong, rejecting, in somewhat florid terms, the very idea of what he called a bigamous hydrogen atom.18 The association between water molecules, he insisted, was a matter of oxygen-oxygen bonds. Chaste English chemists referred to the hydrogen bond with great, but understandable, reluctance during the 1920s. It was Linus Pauling who, in the end, brought something like chemical respectability to the hydrogen bond.19 Pauling was well trained in crystallography; and fascinated by quantum mechanics. Pauling thought at first, that if the hydrogen bond really exists, it must be purely electrostatic.20 In 1934, Pauling and Lawrence Brockway experimentally confirmed Latimer and Rodebush’s conjecture that carboxylic acids could indeed form hydrogen bonds;21 and it is in this important paper that Pauling began to doubt their electrostatic character. A resonance between ionic and covalent forms of carboxylic groups, Pauling suggested, served to establish the stability of the hydrogen bond. Twelve months later, Pauling published a paper in which he argued that the structure and residual entropy of hexagonal ice is linked to the intrinsic asymmetry of the hydrogen bond itself.22 Pauling used only neutral molecular formulas for water. He had discarded for good the idea of electrostatic bonding between hydrogen atoms. It was left to William H. Zachariasen, in his study of the structure of liquid methyl alcohol, to invoke for the first time the dipolar nature of the hydrogen bond. “Every hydrogen atom,” he wrote, “is thus linked to two oxygen atoms.”23 The crucial point now follows: it is “undoubtedly … linked more strongly to one of the oxygen atoms than to the other …”24 “Naturally,” he added serenely, “if we wish to characterize the nature of these hydrogen bonds, we should employ the term dipole bonding.”25 The hydrogen bond gained official recognition during a meeting of the Faraday Society held in Edinburgh in 1936. In his keynote lecture, Joel Hildebrand offered a benediction: It is becoming evident, again, that the term “association” under which we have lumped all departures from normal behavior, must be subdivided into association arising from the interaction of dipoles, and that due to the formation of definite chemical bonds. Of these, perhaps the most interesting are the hydrogen bonds or “bridges” between oxygen, nitrogen, or fluorine atoms, a species of chemical interaction.26 French, German, and Japanese scientists embraced the hydrogen bond in short order, one by one.27 ## An Official Definition In 2011, the International Union of Pure and Applied Chemistry (IUPAC) embraced the hydrogen bond within the folds of an official definition: The hydrogen bond is an attractive interaction between a hydrogen atom from a molecule or a molecular fragment X–H in which X is more electronegative than H, and an atom or a group of atoms in the same or a different molecule, in which there is evidence of bond formation. A typical hydrogen bond may be depicted as X–H•••Y–Z, where the three dots denote the bond. … The evidence for hydrogen bond formation may be experimental or theoretical, or ideally, a combination of both.28 This definition is useless as it stands, an observation not wasted on the IUPAC. In order further to clarify their ideas, they appended to their definition a list of twelve emendations:29 (E1) The forces involved in the formation of a hydrogen bond include those of an electrostatic origin, those arising from charge transfer between the donor and acceptor leading to partial covalent bond formation between H and Y, and those originating from dispersion. (E2) The atoms X and H are covalently bonded to one another and the X–H bond is polarized, the H•••Y bond strength increasing with the increase in electronegativity of X. (E3) The X–H•••Y angle is usually linear (180°) and the closer the angle is to 180°, the stronger is the hydrogen bond and the shorter is the H•••Y distance. (E4) The length of the X–H bond usually increases on hydrogen bond formation leading to a red shift in the infrared X–H stretching frequency and an increase in the infrared absorption cross-section for the X–H stretching vibration. The greater the lengthening of the X–H bond in X–H•••Y, the stronger is the H•••Y bond. Simultaneously, new vibrational modes associated with the formation of the H•••Y bond are generated. (E5) The X–H•••Y–Z hydrogen bond leads to characteristic NMR signatures that typically include pronounced proton deshielding for H in X–H, through hydrogen bond spin–spin couplings between X and Y, and nuclear Overhauser enhancements. (E6) The Gibbs energy of formation for the hydrogen bond should be greater than the thermal energy of the system for the hydrogen bond to be detected experimentally. (C1) The pKa of X–H and pKb of Y–Z in a given solvent correlate strongly with the energy of the hydrogen bond formed between them. (C2) Hydrogen bonds are involved in proton-transfer reactions (X–H•••Y → X•••H–Y) and may be considered the partially activated precursors to such reactions. (C3) Networks of hydrogen bonds can show the phenomenon of cooperativity, leading to deviations from pairwise additivity in hydrogen bond properties. (C4) Hydrogen bonds show directional preferences and influence packing modes in crystal structures. (C5) Estimates of charge transfer in hydrogen bonds show that the interaction energy correlates well with the extent of charge transfer between the donor and the acceptor. (C6) Analysis of the electron density topology of hydrogen-bonded systems usually shows a bond path connecting H and Y and a (3,–1) bond critical point between H and Y.30 It is worth noting that (E3)-(E6) and (C1)-(C6) are purely empirical, and say nothing about the physical origin of H-bonding. (E1) says only that whatever the nature of the hydrogen bond, it is not entirely covalent, a point never at issue; while (E2) refers to a putative bond strength instead of referring to its more physical and measurable stabilization energy. According to classical physics, water is a diamagnetic substance that should display a very low sensitivity to permanent magnetic fields. At the same time, because of its high electric dipolar moment p0 = 1.85498D in the vapor state, it should react strongly with static electric fields. But in this regard, theory and experiment are in conflict. Maxwell equations predict a static relative dielectric constant of εr ≈ 13, far from the experimental value of εr ≈ 80. What is more, Maxwell equations suggest dipolar interaction energies of 0.05eV for direct dipole-dipole Keesom interactions; 0.03eV for Debye interactions between permanent and induced dipoles; and 0.12eV for London dispersive interactions between two induced dipoles. These calculations assume an oxygen-oxygen distance of 3.65Å, corresponding to the O-H covalent bond length (0.95Å), augmented by the sum of van der Waals radii of hydrogen (1.2Å) and oxygen (1.5Å). Such values cannot explain the abnormally high boiling point of liquid water, or the hydrogen bond energy of about 0.22eV. Having been invited to every party for more than a century, the hydrogen bond remains a guest without a face. We still do not understand why, with its ridiculous molecular weight, water is not a gas and why ice should float on water. ## Quantum of Action In classical physics the energy, E, of a system may become larger or smaller, but whether larger or smaller, it becomes larger or smaller continuously. There are no jumps. At the end of the nineteenth century, it became obvious that however plausible the principle, it was not true. In blackbody radiation, continuity lapses. The relationship between the energy E emitted by a black box, and its associated electromagnetic wave, is mediated by Planck’s constant h. The Planck–Einstein equation E = hν, draws a connection between the energy emitted by a black box and the frequency of its atomic oscillations. The connection is counterintuitive because E must be expressed as an integral multiple of , so that, in effect, E = n, where n is a non-negative integer. In 1905, Einstein argued persuasively that light, which is an electromagnetic phenomenon, and thus wave-like in nature, makes its appearance in the world as a particle. The frequency and wavelength of a photon are governed by the equation λν = c, where c is the speed of light. The equation E = hν, now reappears as E = hc / λ, having achieved a new incarnation as the description of the energy possessed by a photon and so by a particle. Some years later, Louis de Broglie, considering these relationships, concluded that if waves could be particles, particles could be waves. If p is a particle’s linear momentum, then the equation λ = h/p describes its associated wavelength λ. By the third decade of the twentieth century, physicists were in possession of a far flung series of correspondences: energy and frequency, linear momentum and wavelength, angular momentum and wave angular orientation, position and momentum. These pairs are conjugate variables, each the Fourier transform of the other. In 1926, Werner Heisenberg demonstrated that conjugate variables within quantum systems are bounded by an ineliminable form of uncertainty: Δx Δp ≥ ħ, where in Dirac’s notation, ħ = h/2π ≈ 10-34 J·s. Twenty years after Planck and Einstein introduced physicists to the imperative of quantum action, Werner Heisenberg, Erwin Schrödinger, and Paul Dirac provided the theory that made sense of the facts. Quantum mechanical affairs are conducted within the confines of a complete infinite dimensional vector space equipped with an inner product—what is now known as a Hilbert space. It is this space that comprises all possible states of a given quantum system. Observables of the system are represented by linear operators, and each eigenstate of an observable corresponds to an eigenvector of an operator. The associated eigenvalue is the value of the observable. These facts make for the furniture of a quantum system. What remains to be determined is the evolution of its states. Both Werner Heisenberg and Erwin Schrödinger addressed and then solved this problem, although in quite different ways; and shortly thereafter physicists came to understand that their schemes were, deep down, the same. Since particles and waves shared in quantum theory an inner, if latent, identity, it seemed perfectly natural to Schrödinger to model the evolution of a quantum system in terms of a function ψ(x,t) that determines the amplitude of a wave. The result is a partial differential equation $ih\frac{\partial }{\partial t}|\psi =\mathrm{H}|\psi$ where ψ is the wave function, and H its quantum Hamiltonian. Thereafter, quantum mechanics is governed by two assumptions. The first specifies the probability of finding a particle in a particular place and at a particular time in terms of the squared absolute value of ψ(x,t): $P\left(x,t\right)=|\psi \left(x,t\right){|}^{2}.$ The second establishes that quantum mechanics conforms to the principles of probability: for all times t. What lends to quantum theory its very great strangeness is just that the wave function encodes a superposition of states, a point brought out vividly by Paul Dirac in his treatise: The general principle of superposition of quantum mechanics applies to the states [undisturbed motions] … of any one dynamical system. It requires us to assume that between these states there exist peculiar relationships such that whenever the system is definitely in one state we can consider it as being partly in each of two or more other states. The original state must be regarded as the result of a kind of superposition of the two or more new states, in a way that cannot be conceived on classical ideas. Any state may be considered as the result of a superposition of two or more other states, and indeed in an infinite number of ways. Conversely any two or more states may be superposed to give a new state.31 The classical distinction between matter and radiation (or matter and fields) now is seen to disappear. The evolution of any system on the atomic scale is limited to integral multiples of Planck’s constant. The first incarnation of quantum mechanics was expressed in terms of Planck’s quantum of action; and it was entirely a matter of a quantized constraint placed on a particle or on its associated wave. Quantization is performed only for particles of matter and not for the fields in which they are embedded. Such fields are treated classically through Newton’s law and Maxwell’s equations for the electromagnetic field. For all of its revolutionary implications, quantum mechanics cannot be reconciled with special relativity. Quantum mechanics is not covariant in quite the sense demanded by relativity; and far more to the point, quantum mechanics takes its particles neat, one at a time. Special relativity, as Paul Dirac realized at once, allows for the creation and annihilation of particles, something unaccounted for and so left unexplained by first quantization methods. ## Molecular Orbital Approximations Nevertheless, one may apply first quantization within the frame of molecular orbital approximations, a technique widely employed since the late 1920s. Molecular orbital theory studies molecular bonding by approximating the positions of bonded electrons via a linear combination of their atomic orbitals. In the end, there is a natural return to quantum mechanics, achieved, for example, by applying the Hartree–Fock model to Schrödinger’s equation. Consider thus the basic C2v-symmetry of the water molecule H2O. Ten electrons must be distributed among five energy levels according to the following electronic configuration: $\left(1{\mathrm{a}}_{1}{\right)}^{2}\left(2{\mathrm{a}}_{1}{\right)}^{2}\left(1{\mathrm{b}}_{2}{\right)}^{2}\left(3{\mathrm{a}}_{1}{\right)}^{2}\left(1{\mathrm{b}}_{1}{\right)}^{2}\left(4{\mathrm{a}}_{1}{\right)}^{0}\left(2{\mathrm{b}}_{2}{\right)}^{0}$ This does not allow the establishment of partial covalence involving the Highest Occupied Molecular Orbital (HOMO) energy level, displaying b1-symmetry, and the Lowest Unoccupied Molecular Orbital (LUMO) energy level, displaying a1-symmetry. Any HOMO-LUMO interaction is doomed to fail because their integral overlap is null. One might, of course, argue that upon hydrogen bonding, symmetry is lowered, thus leading to a possible non-zero overlap. This is unsatisfying. Before hydrogen bonding, both partners display their full C2v-symmetry with zero overlap. From experiment, we know that the final symmetry of water dimers, or higher polymers, is Cs. At what distance does the symmetry change from C2v to Cs? The assumption that Cs-symmetry holds at every distance is of no help, or, at best, little help. HOMO would then represent one symmetry, LUMO another. The overlapping integral would again be zero. Overlap may occur through other molecular orbitals, but at both 2.75Å and 2.98Å, the overlap between the acceptor oxygen and the hydrogen-bonding proton is negative, pointing to a net anti-bonding covalent interaction.32 X-ray emission spectroscopy is evidence for the fact that the 1b1 HOMO-level is not affected by hydrogen bonding.33 Instead, a strong perturbation of the 3a1 (HOMO-1) level is observed, evidence of a rather unconventional HOMO-1/LUMO interaction. Compton scattering experiments in hexagonal ice show evidence of a neat anti-bonding, repulsive interaction between neighboring water molecules despite the quantum-mechanical, multicenter character of the wave functions.34 Topological analysis of electronic density demonstrates that it is not possible to differentiate between hydrogen bonds and mere van der Waals interactions.35 In fact, nothing in the standard quantum mechanical treatment of the water molecule points to the tetrahedral character of the water monomer. Obviously, owing to the linear character of quantum theory, it is easy to build localized wave functions, showing two lone pairs on the oxygen atom, from delocalized molecular orbitals. There is no theoretical reason why this view is better than the fully delocalized one. Given the experimental photoelectron spectrum of the water molecule, its most faithful representation should display three kinds of orbitals (two σ-bonds, one 2s-type lone pair and one 2p-type lone pair), and not two (two σ-bonds and two equivalent lone pairs), as suggested by molecular orbital theory.36 The only way to retrieve a physical picture involving two lone pairs and two σ-bonds approximately oriented towards the vertices of a tetrahedron, is to look at the positions of the largest eigenvalues and corresponding eigenvectors of the Hessian minima in their molecular electrostatic potential.37 This means reverting to a purely electrostatic view of hydrogen bonding. The situation is so confusing that the scientific community is today divided into two opposing camps, one camp promoting water as a random tetrahedral network with flickering hydrogen bonds,38 and the other promoting water in terms of a two-state model, one tetrahedral, the other not.39 ## One to Many Picture liquid water as a flickering network of hydrogen bonds. Neutron scattering experiments, as well as molecular dynamic simulations, have shown that the average residence time of the hydrogen atom around a water molecule is close to 1ps at T = 300K, and increases to 20ps at T = 250K.40 The five-site transferable intermolecular potential (TIP5P) water model has allowed computational chemists to determine a density maximum near 4°C at 1atm by fixing the electrical charge on each hydrogen atom at +0.241e.41 This model was also able to reproduce the density of water between -37.5°C and 62.5°C at 1atm with an average error of 0.006g cm-3, and the density of liquid water at 25°C over the range 1-10,000atm with an average error of 2%. Electromagnetic laws suggest that such a charge, moving on the picosecond timescale, may be expected to generate an electromagnetic field with a frequency of about 1012 Hz. But electromagnetic fields are treated classically when quantization is extended only to particles. A richer theoretical treatment is required. It is certainly possible to quantize a field characterized by infinitely many degrees of freedom. In first quantization, separate Hilbert spaces are required for systems with differing numbers of particles. In second quantization, the familiar Hilbert space framework is expanded to a Fock space. An arbitrary Fock space is a linear combination of n-particle Hilbert spaces. An infinite number of harmonic oscillators is assigned to each point in a Fock space. These oscillators describe every possible field-excitation mode. Two operators permit the creation of new quantum waves or their annihilation. We are now in the domain of quantum field theory. For all that, it is important to stress that the principles of quantum field theory are not new. They are entirely consistent with the fundamental principles of quantum mechanics. When a quantum field is unexcited, the total linear momentum of the field is zero. For each mode having a wave vector p, there exists a similar mode with wave vector –p. But the total energy of the field cannot be zero, if only because each harmonic oscillator is found in its ground state with zero point energy. This ground state is populated by an infinite number of virtual field excitations. From its vacuum state, a quantum field is able to produce any number of quanta. The vacuum is thus filled with unobservable virtual particles, and, yet, virtual or not, these particles are responsible for a wide range of real physical phenomena. The idea is curious enough to merit discussion. It is hardly self-evident. • Virtual particles prevent negatively charged electrons from collapsing onto positively charged atomic nuclei. In classical physics this is unfathomable; in quantum mechanics, the explanation is assigned to Heisenberg’s uncertainty principle. In QFT, a far richer explanation is possible. At ħ·c ≈ 200 MeV·fm, an electron falling onto a nucleus whose size is about 1fm (10-15m) encounters an ocean of virtual photons with a maximum energy of 200MeV. This value allows for the materialization of an electron/positron pair with a rest mass of about 1MeV/c2 and a maximum kinetic energy of 199MeV. Attracted by the positron’s positive charge, the falling electron disintegrates, generating 1MeV of kinetic energy that must be added to the kinetic energy of the other electron popping out of the vacuum. One falling electron disappears from the world, replaced by another electron created from the void. Since they are quanta of the same field, it is impossible to distinguish between the falling electron and the expulsed electron. Electrons appear for this reason to be stationary. • If virtual particles explain why atoms are stable against implosion, they also explain Faraday’s line forces, which emanate from any static electrical charge or permanent magnet.42 A virtual photon covering a distance x carries with it a certain amount of energy ∆E: ∆E·∆x ≈ ħ·c or, what amounts to the same thing, f = ∆E/∆x ≈ ħ·c/(∆x)2, where f designates a force. The result is Coulomb’s law. • The non-zero impedance of the vacuum and the finite value of the speed of light are also illustrations of the existence of virtual particles.43 Virtual particles may also be invoked in order to explain the spontaneous relaxations of atoms between excited states or towards a ground state. • Radioactive decay offers additional evidence of the existence of the vacuum’s virtual particles. Moreover, if an atom remains in an excited state, interaction with the vacuum’s virtual particles will affect the energy value at this level, a phenomenon known as the Lamb shift.44 When an atom absorbs virtual photons emitted by another atom close by, an attraction is expected by physicists, and named the London interaction by chemists. • The ratio ħ/e corresponds to an electric potential times a duration, or to a magnetic vector potential times a distance. Any quantum phase gradient generates a magnetic vector potential, whereas any variation in time generates an electric potential and vice versa. There thus exist effects of potentials on charged particles, even in the region where all the fields, and therefore the force on the particles, vanish. Such is the Aharonov–Bohm effect.45 • The existence of creation/annihilation operators implies that one may also find quantum states characterized by large fluctuations in the number of quanta having well-defined quantum phases. For large fluctuations, this implies the existence of non-orthogonal coherent states. These coherent states represent a bridge of sorts to classical physics inasmuch as the dynamics of a quantum harmonic oscillator resemble closely the yin and yang of a classical harmonic oscillator. A coherence domain may also be viewed as a macroscopic condensate of Nambu–Goldstone bosons, which emerge from the spontaneous symmetry breaking of the vacuum by the dipolar field of water molecules.46 • Virtual particles are also responsible for the static and dynamic Casimir effect.47 ## Water Works In first quantization, the physical and chemical properties of molecules are mainly dependent upon their HOMO and LUMO states; other excited states play a quite minor role, except in spectroscopy. This is not the case in QFT. Electrons of a water molecule are confined within a sphere having a diameter of about 0.3nm. The physical vacuum inside a water molecule is filled with virtual photons. Their energy is not enough to create electron/positron pairs, but it is, nevertheless, enough to self-excite the water molecule over the whole of its energy spectrum. Obviously, self-excitation is only possible for a very short time.48 It may well happen that during relaxation towards their ground state, virtual photons initially absorbed by a water molecule fail to return to the vacuum, but are used to excite another water molecule. This process is dependent on the density, N/V, for a given volume V containing N water molecules. Suppose that ∆E = ħω is the energy of a virtual photon emerging from the vacuum and propagating along x during t. If the average distance between two water molecules is d ≈ (V/N)1/3, two cases may be encountered, and must be distinguished. 1. d = (V/N)1/3 > λ/2π. Virtual excitations concern isolated water molecules and the virtual electromagnetic field around each water molecule fluctuates with a zero time-average. 2. d = (V/N)1/3 < λ/2π. Virtual excitations may be shared between several molecules, thus making possible condensation towards a coherent state. The N molecules form a coherence domain. ## Coherence Domains A coherence domain may be idealized as a sphere surrounding an internal electromagnetic field in which the field’s maximum amplitude is at its center. If such coherence domains are compactly packed, they will be at a certain inter-domain distance from each other. This means that the evanescent parts of internal fields are overlapping, and a detailed analysis of this situation leads to the conclusion that the internal field should reach its minimal value for r0 = 3π/4ωq.49 For an excitation at ωq = 12.07eV, the diameter of one coherence domain is L ≈ 2r0 ≈ 75nm, leading to a volume VDC = πL3/6 = 220,893nm3. Assuming 100% of coherence for a liquid phase at the lowest possible temperature (TS ≈ 228K), with a density very close to that of ice (ρ ≈ 0.92 kg·m-3), yields NDC = 30.8·VCD(nm3) ≈ 6.8 million water molecules. A coherence domain is definitively not a water cluster. If temperatures increase, thermal fluctuations may expel a certain fraction of the water molecules from a coherence domain, thickening the intersurface zone and decreasing the radius of the coherence domain. At x = 0, boiling occurs. At room temperature, an incoherent water film separates domains.50 It is this separation that prevents water from being a good electronic conductor. The existence of coherent and incoherent water molecules is supported by a substantial, but controversial,51 body of experimental data.52 ## Conditions for a Coherence Domain In order to determine the conditions under which a coherence domain may be formed, it is necessary to consider either transitions between ground states and excited discrete levels, or virtual transitions above the ionization threshold. Experimental data suggests the existence of a coherent coupling constant between the electromagnetic field and the photons bouncing back and forth between water molecules. The principle of least action leads to three coherence equations for each discrete excitation.53 The first and the second describe the absorption and emission of a virtual photon between the ground state and a given excited state, when both are mediated by the coupling constant.54 The third describes the state of the electromagnetic field when perturbed by the presence of water molecules in their ground state. This equation may have either three real roots or one real and two complex roots. In the first case, the field amplitude will have a sinusoidal variation describing a stable perturbed state, where the internal field fluctuates around its initial null value. In the second, the amplitude may grow exponentially, as in a laser, but with a flood of virtual photons reaching a non-zero macroscopic value oscillating in phase with the water matter field. This effect involves virtual photons, which means that they remain trapped within a coherence domain, reflected back and forth between water molecules. Hydrogen bonding is nothing less than the bonding energy welding water molecules together. ## Notable Results There are three coherence equations for each accessible discrete excited level of the water monomer. The input to each equation is the experimental excitation spectrum; the output, a set of four coherence parameters. The first parameter is the mixing angle between the ground state and a given excited level. The other two represent the amplitude reached by the trapped internal electromagnetic field, and variations in the quantum phase governing the reduced frequency oscillations of internal fields. A coherence domain characterized by a mixing angle of sin2(β) = 0.1 indicates that electrons in water molecules spend 10% of their time on a very diffuse 5d Rydberg state of oxygen. Coherent water molecules are a little fatter than incoherent water molecules. Moreover, among the five d-states, one pair (z2, x2-y2) transforms as the totally symmetric a1-representation of the C2v group, and could thus be mixed with the two molecular orbitals (2a1, 3a1). This leads to a set of four a1-levels arranged in a more or less tetrahedral configuration to minimize electronic repulsions.55 What is this if not an explanation of the basic tetrahedral structure of ice or liquid water? A coherence gap protects the coherence domain from incoherent thermal fluctuations. It may be expressed as a sum of three terms: Ecoh = Eem + Emat + Ecm, where Eem is the positive energy borrowed from the vacuum and stored in the internal electromagnetic field; Emat is the positive energy borrowed from the vacuum and used for exciting water molecules; and Ecm denotes the negative coupling energy between the electromagnetic field and the electronic currents circulating among excited water molecules. If the first term increases linearly with matter density (N/V), the last is proportional to (N/V)3/2. Consequently, there exists a critical density ρ* for which Ecm cancels Eem + Emat. When this critical density is reached, condensation into a coherence domain occurs spontaneously. The increase in matter density (N/V) and in field amplitude A0 stops as soon as the electronic clouds of the pulsating water molecules begin to overlap. At this point, repulsive energy dominates attractive energy. The coherence domain has now reached an equilibrium configuration. This level is particularly interesting for two reasons: 1. It leads to a critical density very close to the experimental critical density of water vapor. 2. A coherence domain considered as a condensate of bosons leads to a critical temperature close to the freezing point of ice. These are notable results. ## Dissolved Gases, Ionic Species Quantum field theory has deep consequences with respect to the status of dissolved gases or ionic species. Given the phase coherence within a coherence domain, it follows that anything that is not water, or that is unable to resonate in phase with its electromagnetic field, should be rejected outside its coherence domain. Dissolved gases accumulate at the interstices generated by coherence domain packings, and this in a highly unstable configuration favoring coalescence between bubbles. For exactly the same reason, dissolved electrolytes accumulate in competition or synergy with dissolved gases. Highly complex behavior is thus expected for the coalescence properties of nanobubbles as a function of the kind of electrolytes added to the water, a fact fully confirmed by experiments.56 A detailed analysis shows that electrolytes do not behave according to the Debye-Hückel model derived from purely classical considerations. Instead of forming a diffuse layer characterized by a Debye–Hückel length, they form a coherent plasma oscillating in resonance with the electromagnetic field trapped inside the coherence domain.57 The classical view of osmosis is not adequate for describing the status of electrolytes in aqueous solutions. This might explain the fact that very low electromagnetic fields oscillating with frequencies of a few Hz could induce molecules or ions of mesoscopic size to acquire coherent motion.58 The Hofmeister series in biology might well reflect this quantum behavior.59 The existence of coherent water stabilized by biopolymers (protein, DNA, lipidic membrane) may also explain why sodium ions that move in solution with a tightly held water shell60 are excluded from the intracellular medium, in contrast with potassium, which binds water molecules rather weakly. ## Proof of Concept The great challenge facing quantum chemists is to accumulate experimental evidence for the existence of coherence domains in liquid water. Direct observations are apt to be very difficult. Nevertheless, indirect experimental evidence already exists. It is well known that water masers exist in intergalactic space. They justify a clear distinction between the two nuclear spin isomers of water—ortho and para water. The coherent librations of these isomers, if observed at room temperature and in a liquid state, would be strong evidence for the existence of quantum coherence. Consider four-photon Rayleigh-wing spectroscopy of coherent librations in the range 0–50 cm−1 in Milli-Q liquid water. The observation that these coincide perfectly with the rotational spectrum of gaesous H2O is thus very encouraging.61 As the ortho ↔ para conversion is rather fast in small water clusters,62 this shows that such clusters do not exist in liquid and behave quite differently from coherent domains embodying millions of water molecules. From a structural viewpoint, it has recently become possible to encapsulate large water assemblies in giant polyoxomolybdate-based nanocapsules and characterize their inner cavity through single-crystal X-ray diffraction. In some nanocapsules, perfectly ordered structures based on Platonic and Archimedean polyhedrals, and displaying a full tetrahedral structure, are observed.63 The {H2O}100 assembly with an average density ρ ≈ 0.69g·cm-3 may help in visualizing what could be the inner core of a water coherence domain. On the other hand, there exist other nanocapsules displaying non-ordered chain-like and non-tetrahedral water assemblies,64 such as {H2O}59 with an average density ρ ≈ 0.36g·cm-3, that may correspond to fragments of incoherent water. Similarly, in some molecular dynamics simulations, large vortex-like coherent patterns appear, although the orientational memory of individual molecules is quickly lost.65 Finally, topological analysis using Voronoi polyhedra has revealed the existence of tetrahedral and non-tetrahedral patches with isosbestic points.66 ## Conclusion More than four hundred years ago, Galileo argued that ice floated on water because it had more vacuum rather than less heat. In this, he was correct. The vacuum plays an important role in hydrogen bonding. Condensates of such bosons or coherence domains are responsible for the amazing properties of liquid water. From theory, we know that close to its maximum density temperature of 4°C, liquid water must have an overall coherence of 50%. Just for fun, let’s associate the number 1 to each ordered coherence domain, and 0 to each patch of incoherent water. Consider a 1cm3 water droplet. As the volume of one coherence domain is about 105nm3, we should have approximately 1021/105 = 1016 domains for each cm3 of liquid water. This suggests an analogy to memory based on ferromagnetic domains. Encoding all the books of all the libraries in the world would require approximately 0.2l of liquid water. There is plenty of room at the bottom. If the vacuum is empty, as in classical physics and in old-fashioned quantum mechanics, water will reveal only a random flickering network of hydrogen bonds. Any quantum interaction is doomed to be repulsive, leaving only weakly attractive electrostatic forces. This is why the hydrogen bond was first rejected by the scientific community, for reasons that have been forgotten or overlooked. The difference between a covalent bond, a dispersive interaction, and a hydrogen bond is now very clear. Acknowledging the role played by the quantum vacuum in chemical bonding should have implications in biology. A living cell is 70% water by weight and 99% by mole. An elementary calculation shows that owing to the nanometric scale of cell biopolymers, this amount of water corresponds to, at most, four water layers around each component. It is much easier to keep coherence within such layers than in bulk liquid, even at a temperature close to 37°C. Coherence is a marked feature of any living system. Many years ago, Albert Szent-Györgyi wrote that: One of my difficulties with protein chemistry was that I could not imagine how such a protein molecule can “live.” Even the most involved protein structural formula looks “stupid,” if I may say so. … It looks as if some basic fact about life were still missing, without which any real understanding is impossible.67 The missing basic fact is just that the quantum vacuum uses water as a mediator of phase coherence between electromagnetic and matter fields. That ħc ≈ 200eV·nm indicates that within a protein cavity having a size of about 1nm, virtual photons with energy as high as 200eV are available for a very short time. Still, even if the probability of excitation by virtual photons is low, their amplification could be an explanation for the extraordinary catalytic power of various enzymes. Coherent water has a low ionization threshold, and is thus able to generate a tension close to –100mV when associated with incoherent water. Membrane potentials could then arise from water and not necessarily from ions. Water is life. Why should anyone be surprised? 1. Paul Dirac, “Quantum Mechanics of Many-Electron Systems,” Proceedings of the Royal Society of London. Series A, Containing Papers of a Mathematical and Physical Character 123 (1929): 714. 2. Alex Shank et al., “Accurate ab initio and ‘Hybrid’ Potential Energy Surfaces, Intramolecular Vibrational Energies, and Classical ir Spectrum of the Water Dimer,” Journal of Chemical Physics 130, no. 14 (2009): 130. Coupled clustering techniques are useful in computational chemistry, and especially in the analysis of many body systems. CCS analysis incorporates single molecular orbitals, CCSD, single and double orbitals, and CCSD(T), single and double orbitals, with an anzatz, or educated guess, for tertiary orbitals. See Christopher Cramer, Essentials of Computational Chemistry (Chichester: John Wiley & Sons, Ltd., 2002), 191–232. 3. Emilio Del Giudice, Giuliano Preparata, and Giuseppe Vitiello, “Water as a Free Electric Dipole Laser,” Physical Review Letters 61 (1988): 1,085–88. 4. Marie-Christine de La Souchère, “La bataille des corps flottants (The Battle of the Floating Bodies),” La Recherche 457 (2011): 106. 5. Galileo Galilei, Discorso intorno alle cose che stanno in su l'acqua o che in quella si muovono (Discourse on Floating Bodies) (Florence, Italy: Cosimo Giunti, 1612). 6. Joseph Louis Proust, “Recherches sur le bleu de Prusse (Research on Prussian Blue),” Journal de Physique 45 (1794): 334–41. 7. John Dalton, A New System of Chemical Philosophy. 3 vols. (Manchester, UK: 1808, 1810, 1827). 8. Amedeo Avogadro, “Essai d'une manière de déterminer les masses relatives des molécules élémentaires des corps, et les proportions selon lesquelles elles entrent dans ces combinaisons (Essay on Determining the Relative Masses of the Elementary Molecules of Bodies and the Proportions by Which They Enter These Combinations),” Journal de Physique 73 (1811): 58–76. 9. Jean Perrin, “Grandeur des molécules et charge de l’électron (Size of Molecules and Electron Charge),” Comptes Rendus de l'Académie des Sciences CXLVII (1908): 594–96. 10. J. H. Simons, “Hydrogen Fluoride and Its Solution,” Chemical Reviews 8 (1931): 213–35. 11. Walther Nernst, “Verteilung eines Stoffes Zwischen zwei Lösungsmitteln und zwischen Lösungmittel und Dampfraum (Distribution of a Substance between Two Solutions, and between Solution and Vapor Space),” Zeitschrift für Physikalische Chemie 8 (1891): 110–39. 12. T. S. Moore and T. F. Winmill, “The States of Amines in Aqueous Solutions,” Journal of the Chemical Society 101 (1912): 1,635–76. 13. William Bragg, “The Crystal Structure of Ice,” Proceedings of the Physical Society of London 34 (1922): 98–103. 14. Richard Bozorth, “The Crystal Structure of Potassium Hydrogen Fluoride,” Journal of the American Chemical Society 45 (1923): 2,128–32. 15. J. R. Collins, “Change in the Infra-Red Absorption Spectrum of Water with Temperature,” Physical Review 26 (1925): 771–779. 16. Denis Quane, “The Reception of Hydrogen Bonding by the Chemical Community: 1920-1937,” Bulletin for the History of Chemistry 7 (1990): 4. 17. Nevil Sidgwick and Robert Callow, “Abnormal Benzene Derivatives,” Journal of the Chemical Society 125 (1924): 527–38. 18. Henry Armstrong, “Bigamous Hydrogen—a Protest,” Nature 117 (1926): 553–54. 19. Linus Pauling, “The Shared-Electron Chemical Bond,” Proceedings of the National Academy of Sciences of the United States of America 14, no. 4 (1928): 359–62. 20. Linus Pauling, “The Nature of the Chemical Bond. Application of Results Obtained from the Quantum Mechanics and from a Theory of Paramagnetic Susceptibility to the Structure of Molecules,” Journal of the American Chemical Society 53, no. 4 (1931): 1,367–1,400. 21. Linus Pauling and Lawrence Brockway, “The Structure of the Carboxyl Group. I. The Investigation of Formic Acid by the Diffraction of Electrons,” Proceedings of the National Academy of Sciences of the United States of America 20, no. 6 (1934): 336–40. 22. Linus Pauling, “The Structure and Entropy of Ice and of Other Crystals with Some Randomness of Atomic Arrangement,” Journal of the American Chemical Society 57, no. 12 (1935): 2,680–84. 23. Denis Quane, “The Reception of Hydrogen Bonding by the Chemical Community: 1920-1937,” Bulletin for the History of Chemistry 7 (1990): 9. 24. Denis Quane, “The Reception of Hydrogen Bonding by the Chemical Community: 1920-1937,” Bulletin for the History of Chemistry 7 (1990): 9. 25. Denis Quane, “The Reception of Hydrogen Bonding by the Chemical Community: 1920-1937,” Bulletin for the History of Chemistry 7 (1990): 9. 26. Denis Quane, “The Reception of Hydrogen Bonding by the Chemical Community: 1920-1937,” Bulletin for the History of Chemistry 7 (1990): 10. 27. See San-ichiro Mizushima, Tutomu Kubota, and Yonezo Morino, “The OH-Vibration Spectrum in the Photographic Infrared,” Bulletin of the Chemical Society of Japan 12 (1937): 132–35; Fritz Kohlrausch, Der Smekal-Raman-Effekt: Ergänzungsband, 1931-37 (Berlin, J. Springer, 1938), 117. 28. Elangannan Arunan et al., “Definition of the Hydrogen Bond (IUPAC Recommendations 2011),” Pure and Applied Chemistry 83, no. 8 (2011): 1,638. 29. The (E) and (C) labels are explained as follows: The evidence for hydrogen bond formation may be experimental or theoretical, or ideally, a combination of both. Some criteria useful as evidence and some typical characteristics for hydrogen bonding, not necessarily exclusive, are listed below, numbered E# and C#, respectively. The greater the number of criteria satisfied, the more reliable is the characterization as a hydrogen bond. Elangannan Arunan et al., “Definition of the Hydrogen Bond (IUPAC Recommendations 2011),” Pure and Applied Chemistry 83, no. 8 (2011): 1,638. 30. Elangannan Arunan et al., “Definition of the Hydrogen Bond (IUPAC Recommendations 2011),” Pure and Applied Chemistry 83, no. 8 (2011): 1,638–39. 31. Paul Dirac, The Principles of Quantum Mechanics (Oxford: Clarendon Press, 1930), 12. 32. Tapan Ghanty et al., “Is the Hydrogen Bond in Water Dimer and Ice Covalent?” Journal of the American Chemical Society 122, no. 6 (2000): 1,210–14. 33. Anders Nilsson et al., “The Hydrogen Bond in Ice Probed by Soft X-ray Spectroscopy and Density Functional Theory,” Journal of Chemical Physics 122 (2005), doi:10.1063/1.1879752. 34. Aldo Romero, Pier Luigi Silvestrelli, and Michele Parrinello, “Compton Scattering and the Character of the Hydrogen Bond in Ice I-h,” Journal of Chemical Physics 115, vol. 1 (2001): 115–23. 35. Richard Bader, “Atoms in Molecules: A Quantum Theory,” International Series of Monographs on Chemistry 22 (1990): 293–99. 36. Axel Becke and Kenneth Edgecombe, “A Simple Measure of Electron Localization in Atomic and Molecular Systems,” Journal of Chemical Physics 92 (1990): 5,397–403. 37. Anmol Kumar et al., “Lone Pairs: An Electrostatic Viewpoint”, Journal of Physical Chemistry A 118, no. 2 (2014): 526–32. 38. Robert Bukowski et al., “Predictions of the Properties of Water from First Principles,” Science 315, no. 5,816 (2007): 1,249–52. 39. Philippe Wernet et al., “The Structure of the First Coordination Shell in Liquid Water,” Science 304, no. 5,673 (2004): 995–99. 40. José Teixeira et al., “Experimental Determination of the Nature of Diffusion Motions of Water Molecules at Low Temperatures,” Physical Review A 31, no. 3 (1985): 1,913–17. 41. Michael Mahoney and William Jorgensen, “A Five-Site Model for Liquid Water and the Reproduction of the Density Anomaly by Rigid, Nonpolarizable Potential Functions,” Journal of Chemical Physics 112 (2000): 8,910–22. 42. Michael Faraday, “Experimental Researches in Electricity,” Philosophical Transactions of the Royal Society of London 122 (1832): 155. 43. Marcel Urban et al., “The Quantum Vacuum as the Origin of the Speed of Light,” European Physical Journal D 67, no. 3 (2013), doi:10.1140/epjd/e2013-30578-7. 44. Willis Lamb and Robert Retherford, “Fine Structure of the Hydrogen Atom by a Microwave Method,” Physical Review 72, no. 3 (1947): 241–43. 45. See Yakr Aharonov and David Bohm, “Significance of Electromagnetic Potentials in Quantum Theory,” Physical Review 115 (1959): 485–91; Akira Tonomura et al., “Evidence for Aharonov-Bohm Effect with Magnetic Field Completely Shielded from Electron Wave,” Physical Review Letters 56 (1986): 792–95. 46. Hiroomi Umezawa, “Development in Concepts in Quantum Field Theory in Half Century,” Mathematica Japonica 41 (1995): 109–24. 47. See Hendrik Casimir, “On the Attraction Between Two Perfectly Conducting Plates,” Proceedings of the Koninklijke Nederlandse Akademie van Wetenschappen 51 (1948): 793–95; Christopher Wilson et al, “Observation of the Dynamical Casimir Effect in a Superconducting Circuit,” Nature 479 (2011): 376–79. 48. See Peter Gürtler, Volker Saile, and E. E. Koch, “Rydberg Series in the Absorption Spectrum of Water and Deuterium Oxide in the Vacuum Ultraviolet,” Chemical Physics Letters 51 (1977): 386–91. Wing Fat Chan, Gyn Cooper, and C. E. Brion, “The Electronic Spectrum of Water in the Discrete and Continuum Regions. Absolute Optical Oscillator Strengths for Photoabsorption (6–200 eV),” Chemical Physics 178 (1993): 387–400. 49. Raffaella Arani et al., “QED Coherence and the Thermodynamics of Water,” International Journal of Modern Physics B 9, no. 15 (1995): 1,813–42. 50. Raffaella Arani et al., “QED Coherence and the Thermodynamics of Water,” International Journal of Modern Physics B 9, no. 15 (1995): 1,813–42. 51. Jared Smith et al., “Unified Description of Temperature-Dependent Hydrogen-Bond Rearrangements in Liquid Water,” Proceedings of the National Academy of Sciences of the United States of America 102, no. 40 (2005): 14,171–74. 52. Anders Nilsson and Lars Pettersson, “Perspective on the Structure of Liquid Water,” Chemical Physics 389 (2011): 1–34. 53. Giuliano Preparata, An Introduction to Realistic Quantum Physics (River Edge, NJ: World Scientific, 2002). 54. For computational details, see Raffaella Arani et al., “QED Coherence and the Thermodynamics of Water,” International Journal of Modern Physics B 9, no. 15 (1995): 1,813–42; Ivan Bono et al., “Emergence of the Coherence Structure of Liquid Water,” Water 4, no. 3 (2012): 510–32. 55. Emilio Del Giudice et al., “Electrodynamical Coherence in Water: A Possible Origin of the Tetrahedral Coordination,” Modern Physics Letters B 9, no. 15 (1995): 953–61. 56. Vincent Craig, Barry Ninham, and Richard Pashley, “The Effect of Electrolytes on Bubble Coalescence in Water,” Journal of Physical Chemistry 97, no. 39 (1993): 10,192–97. 57. Emilio del Giudice, Giuliano Preparata, and Martin Fleishmann, “QED Coherence and Electrolyte Solutions,” Journal of Electroanalytical Chemistry 482, no. 2 (2000): 110–16. 58. Emilio del Giudice et al., “On the ‘Unreasonable’ Effects of ELF Magnetic Fields upon a System of Ions,” Bioelectromagnetics 23, no. 7 (2002): 522–30. 59. Pierandrea Lo Nostro and Barry Ninham, “Hofmeister Phenomena: An Update in Ion Specificity in Biology,” Chemical Reviews 112, no. 4 (2012): 2,286–322. 60. Kim Collins, “Sticky Ions in Biological Systems,” Proceedings of the National Academy of Sciences of the United States of America 92, no. 12 (1995): 5,553–57. 61. Aleksei Bunkin, Serguei Pershin, and Alisher Nurmatov, “Four-Photon Spectroscopy of Ortho/Para Spin-Isomer H2O Molecule in Liquid Water in Sub-Millimeter Range,” Laser Physics Letters 3, no. 6 (2006): 275–77. 62. Russell Sliter, Melissa Gish, and Andrey Vilesov, “Fast Nuclear Spin Conversion in Water Clusters and Ices: A Matrix Isolation Study,” Journal of Physical Chemistry A 115, no. 34 (2011): 9,682–88. 63. Achim Müller and Marc Henry, “Nanocapsule Water-based Chemistry,” Comptes Rendus Chimie 6, no. 8–10 (2003): 1,201–208. 64. Marc Henry et al., “Chameleon Water: Assemblies Confined in Nanocapsules,” Journal of Molecular Liquids 118, no. 1–3 (2005): 155–62. 65. Junichi Higo et al., “Large Vortex-Like Structure of Dipole Field in Computer Models of Liquid Water and Dipole-Bridge between Biomolecules,” Proceedings of the National Academy of Sciences of the United States of America 98, no. 11 (2001): 5,961–64. 66. Jing-Ping Shih, Shey-Yi Shen, and Chung-Yuan Mou, “A Voronoi Polyhedra Analysis of Structures of Liquid Water,” Journal of Chemical Physics 100, no. 3 (1994): 2,202–12. 67. Albert Szent-Györgyi, “Towards a New Biochemistry,” Science 93, no. 2,426 (1941): 610–11. ## More From This Author • ### Super-Saturated Chemistry On the complicated relationship between chemistry and physics. ( Chemistry / Critical Essay / Vol. 2, No. 4 )
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 3, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.757087767124176, "perplexity": 1637.8566298144283}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-51/segments/1544376828697.80/warc/CC-MAIN-20181217161704-20181217183704-00443.warc.gz"}
http://forums.wolfram.com/mathgroup/archive/1998/Jan/msg00172.html
Services & ResourcesWolfram Forums MathGroup Archive 1998 January February March April May June July August September October November December # RE: Re: Rotate3D bug solution I followed with great interest all the discussions about Eulerian angles, I have not got much to say about them, (the name is the same in French litterature) but I remember having spent some time on the following code, which uses a generalization of Eulerian angles in unspecified dimension. The code uses Eulerian angles, and mostly shows the way in which these angles are defined: (* this module generates a random point on a sphere of center "center" and The algorithm uses generalized polar angles as random numbers and transforms the polar coordinates in cartesian co ordinates. *) {dim,rp,rc,x}, dim=Length[center]; x[i_Integer,pc_]:=pc[[1]] Switch[i, 1,Product[Sin[pc[[j]] ],{j,2,dim}], _,Product[Sin[pc[[k]] ],{k,i+1,dim}] Cos[pc[[i]]] ]; rp=Flatten[ Pi//N}],Table[Random[Real,{0,Pi//N}],{dim-2}]}]; rc=Table[x[i,rp],{i,dim}]; Return[rc+center] ] (* end *) Eulerian angles are here called polar angles. I'm not a mathematician, so the terminology might be wrong. Anyway this code returns random points on a sphere, random in the sense of uniform density on the surface of the sphere. Hope this helps, ----------------------------------------------- Jean-Marie THOMAS Conseil et Audit en Ingenierie de Calcul jmthomas@cybercable.tm.fr +33 (0)3 88 32 93 64 www.cybercable.tm.fr/~jmthomas ======================= -----Message d'origine----- De: John Sidles [SMTP:sidles@u.washington.edu] Date: lundi 12 janvier 1998 10:10 A: mathgroup@smc.vnet.net Objet: [mg10382] Re: Rotate3D bug solution In article <68csvb$aa4@smc.vnet.net>, Selwyn Hollis <shollis@peachnet.campus.mci.net> wrote: >Mark Evans wrote: > >> Paul Abbott wrote: >> > >> > The use of Eulerian angles for specifying rotations in 3D IS >> standard >> > (at least in maths and physics -- especially in quantum mechanics, >> > crystallography, and angular momemntum theory). Note that the >> eulerian >> > angle parametrization avoids the singularities that arise in other >> > parametrizations. >> > >> >> In the same sense, you could say that sea shells are standard legal >> tender if you live in a certain part of the world. >> >> Paul is right that there is nothing technically wrong with this kind >> of >> rotation. My point was that Mathematica packages should be written >> for >> a wider audience. It seems intuitive that the most common >> understanding of a rotation matrix is one that rotates sequentially >> about each of the three coordinate axes. The fact that Mathematica >> does not offer this rotation by default is a slip-up in my mind. > >Right on, Mark! > >I've never heard of Eulerian angles before encountering them in >Mathematica. Maybe they're the usual tricks-of-the-trade to a few >quantum physicists, but you'll be hard-pressed to find a reference to >them in any but the most esoteric mathematics literature. > Well, the latitude and longitude coordinates on the globe are (essentially) Eulerian coordinates -- so they're not *too* esoteric. Here's a very important and useful, yet simple, theorem which anyone working with rotations needs to know -- it explains why seemingly inequivalent conventions are actually precisely equivalent. Let$R(v)$be a function which computes the three-by-three matrix associated with a three-vector$v$, where the direction of$v$gives the axis of rotation, and the magnitude of$v$gives the angle of rotation. (Exercise: program this useful utility function in Mathematica, together with its inverse function v(R) -- answer given at end!). Let$U$be an arbitrary rotation matrix, with$U^{t}$the matrix transpose of$U$. Since$U$is a rotation,$U^{t}$is the matrix inverse of$U$, i.e.,$U^{t} U = I$. Then for any$U$and$v$, here's the key theorem! U R(v) U^{t} = R(U v) To see how this clarifies the literature on rotations, suppose Textbook A defines Euler matrices in terms of rotation angles${\theta, \phi, \psi}$about fixed unit axes${\hat{n}_1,\hat{n}_2,\hat{n}_3}\$ as follows R(\theta, \phi, \psi) \edef R(\theta \hat{n}_1) R(\phi \hat{n}_2) R(\psi \hat{n}_3) \edef R_1 R_2 R_3 Now we use the above theorem to rewrite this in terms of *moving* axes. R(\theta,\phi,\psi) = R(\psi R1 R2 \hat{n_3}) R(\phi R1 \hat{n_2}) R(\theta \hat{n_1}) Cool! Its the same three angles, but now applied in the *opposite* order, and about moving instead of fixed axes. Yet the final matrix is the same. And it is perfectly reasonable for Textbook B to adopt this moving-axis convention to define Euler angles. Given all this ambiguity, I no longer use Euler angles when doing calculations involving rotations. It is just too easy to confuse the various signs and conventions! A much safer strategy, which I recommend, is to simply code the function R(v) and its inverse v(R) as utility routines in whatever language you prefer. You will find that these two functions, plus ordinary matrix multiplication, suffice for *any* calculation involving rotations. No more Euler angle torture! No more sine and cosine functions with obscure arguments! Here's some Mathematica code for R(v): rotationMatrix[x_] := Block[ {angle,xHat,pPar,pPerp,pOrthog}, angle = Sqrt[x.x]//N; If[angle<10^-6,Return[id]]; xHat = x/angle; pPar = Outer[Times,xHat,xHat]; pPerp =DiagonalMatrix[{1.0,1.0,1.0}] - pPar; pOrthog = { {0.0,xHat[[3]],-xHat[[2]]}, {-xHat[[3]],0.0,xHat[[1]]}, {xHat[[2]],-xHat[[1]],0.0} }; pPar + Cos[angle]* pPerp + Sin[angle]*pOrthog ] It is left as an exercise to (a) figure out how the above works, and (b) code the inverse function! Coding the inverse function is quite a nontrivial exercise. Hint: (a) determine the cosine() from the trace of R, then determine the sine() and the the axis of rotation from the antisymmetric part of R. This works for all rotations *except* for angles near Pi, for which the axis of rotation should be set to the (unique) eigenvector of the symmetric part of R that has unit eigenvalue (this is because sin(Pi) = 0). A final refinement: the above algorithm assumes that R is orthogonal. But what if R is just *close* to orthogonal, but has some accumulated numerical imprecision? Even worse, you can bet that some future user of your v(R) routine will hand it an R matrix that is grossly non-orthonormal! The right thing to do, therefore, is to condition R before calculating v, by (a) calculating the singular value decomposition for R, then (b) adjusting all the singular values to have unit magnitude. This will yield a cleaned-up exactly orthogonal R which (formally) is the orthogonal matrix that is closest to the input matrix in the least-mean squares sense. Better issue an error message if the singular values are far from unity -- because this indicates abuse of the inverse routine! The Mathematica routines SingularValue[] and Eigensystem[] are well suited to the above tasks -- it's tougher in "C". I wrote this up at length because I have a student who is generating animations in Mathematica and POV-Ray -- he might as well learn to do things the easy way! Happy rotating ... JAS • Prev by Date: Re: Why doesn't this work ? • Next by Date: Re: Numeric overflow • Prev by thread: Re: Rotate3D bug solution
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.6732439398765564, "perplexity": 5132.887526754228}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2013-48/segments/1386163051509/warc/CC-MAIN-20131204131731-00046-ip-10-33-133-15.ec2.internal.warc.gz"}
https://docs.nvidia.com/clara-holoscan/archive/clara-deploy-0.8.1-ea/sdk/applications/operators/fastio_variable_passthrough/app_fastio_to_npz/public/docs/readme.html
# 10.23. Clara FastIO Variable Passthrough - Fastio to Npz Converter Operator This application is NOT for medical use. ## 10.23.1. Overview This application reads FastIO variables from the appropriate FastIO inputs, and outputs one of them as a compressed Numpy file. Specifically, the operator 1. reads the fastio variables named segmentation and segmentation_shape, 2. verifies that the shape of segmentation corresponds to the entries in segmentation_shape (ie. segmentation.shape == segmentation_shape), 3. and outputs the array in segmentation as a numpy array. ## 10.23.2. Inputs Two FastIO variables named: • segmentation holding a one-channel 3D volume • segmentation_shape holding a 1D 4-entry array corresponding to the shape of segmentation ## 10.23.3. Outputs Compressed Numpy file (.npz) in the /output folder. ## 10.23.4. Directory Structure The directories in the container are shown below. Copy Copied! /app_fastio_to_npz ├── Dockerfile ├── main.py └── requirements.txt The license to this container is available and can be pulled as part of the procedure described above or obtained from the Clara Deploy SDK. By pulling and using the container, you accept the terms and conditions of these licenses. Release Notes, the Getting Started Guide, and the SDK itself are available at the NVIDIA Developer forum.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.15337271988391876, "perplexity": 14758.916586587371}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 5, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2023-14/segments/1679296950363.89/warc/CC-MAIN-20230401221921-20230402011921-00573.warc.gz"}
https://www.physicsforums.com/threads/jordan-can-form-of-frobenius-map.307507/
# Jordan Can. Form of Frobenius map 1. Apr 15, 2009 ### geor Hello all, I am trying to solve this exercise here: Let \phi denote the Frobenius map x |-> x^p on the finite field F_{p^n}. Determine the Jordan canonical form (over a field containing all the eigenvalues) for \phi considered as an F_p-linear transformation of the n-dimensional F_p-vector space F_{p^n}. So, this is how I start: Suppose that F_{p^n}=F_p(a1,a2,a3, ..., an) (those n elements will be powers of one element, but it doesn't matter). Now, since the Frobenius map is an isomorphism of F_{p^n} to itself, then \phi permutes a1, a2, ..., an. Since a1, a2, ..., a3 form a basis of the n-dimensional F_p-vector space F_{p^n}, then the matrix of \phi in respect with that basis will be just a permutation matrix. So the problem becomes equivalent with: "find the jordan canonical form of a permutation matrix". Am I doing some obvious mistake here? Would the latter be something straightforward? I admit I can't see it... Any help would be greatly appreciated. 2. Apr 15, 2009 ### geor Okay, so this is a similar way that seems to work for me: Suppose F_{p^n}=F_p(a), where a is a root of some irreducible polynomial over F_p of degree n. Then, a^(p^n-1), ..., a^{p^2}, a^p, a (= a^{p^n}) is a basis of the F_p-vector space F_p(a) Then we notice that \phi(a^{p^i}) = a^{p^i+1} So, in respect to the basis above, the matrix of \phi becomes: 0 1 0 0 0 .... 0 0 0 0 1 0 0 .... 0 0 0 0 0 1 0 .... 0 0 .................. .................. 0 0 0 0 0 .... 1 0 0 0 0 0 0 .... 0 1 1 0 0 0 0 .... 0 0 With some little effort, one can see that the characteristic polynomial of this matrix is (t^n)-1. That is, we have n distinct eigenvalues (all the nth roots of unity). Thus, we will have n Jordan blocks, meaning that the Jordan canonical form will be the diagonal matrix with the roots of unity in the diagonal Could somebody please tell me if my arguments are correct or I miss something? Or, if the above are correct, is there a simpler way to obtain this result? Thanks a lot.. Last edited: Apr 15, 2009
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9719711542129517, "perplexity": 868.9263752172195}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-34/segments/1502886105195.16/warc/CC-MAIN-20170818233221-20170819013221-00378.warc.gz"}
https://portlandpress.com/biochemj/article-abstract/285/3/805/27901/Structure-of-a-heparan-sulphate-oligosaccharide?redirectedFrom=fulltext
Binding of basic fibroblast growth factor (bFGF) to the extracellular matrix of cultured bovine aorta smooth muscle cells is likely to be mediated via heparan sulphate, since not only exogenous addition of heparan sulphate to the culture medium but also pretreatment of the cells with heparitinase (but not chondroitinase ABC) resulted in loss of binding. Comparison of the affinity of bFGF to various glycosaminoglycan-conjugated gels showed a direct and specific binding of bFGF to heparan sulphate. Heparan sulphate also bound to a bFGF affinity gel. However, the proportion of heparan sulphate bound varied depending on the source of the HS (more than 90% and 45% with pig aorta heparan sulphate and mouse EHS tumour heparan sulphate respectively). The bound heparan sulphate had the ability to protect bFGF from proteolytic digestion, but the unbound heparan sulphate did not. The results suggest the presence in the bound heparan sulphate of a specific structure involved in binding. Limited digestion with heparitinase I of porcine aorta heparan sulphate yielded 13% oligosaccharides bound to the gel, of which the smallest were octasaccharides. Analysis of a hexadecasaccharide fraction which was obtained at the highest yield among the bound oligosaccharides was performed by h.p.l.c. of the deamination products obtained with nitrous acid and the unsaturated disaccharide products formed by heparitinase digestion. Comparison of the disaccharide unit compositions exhibited a marked difference in IdoA(2SO4)GlcNSO3 and IdoA(2SO4)GlcNSO3(6SO4) units between the bound and unbound hexadecasaccharides. The amounts measured were 3 mol and 1 mol per mol of the former and 0.4 mol and 0.6 mol per mol of the latter. It is likely that the binding of bFGF to heparan sulphate may require the domain structure of the heparan sulphate to be composed of clustering IdoA(2SO4)-GlcNSO3 units. This content is only available as a PDF.
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.830326497554779, "perplexity": 9708.726615857391}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-16/segments/1585370494349.3/warc/CC-MAIN-20200329140021-20200329170021-00470.warc.gz"}
https://sk.sagepub.com/Reference/sage-encyclopedia-of-educational-research-measurement-evaluation/i12553.xml
• Entry • Entries A-Z • Subject index ### Levene’s Homogeneity of Variance Test Homogeneity of variance (HOV) is one of the assumptions of some frequently used statistical procedures for group mean comparisons, such as a one-way analysis of variance (ANOVA) or an independent-samples t test. Under the HOV assumption, population variances of all groups are assumed to be equal. That is, the null hypothesis (H0) being tested for verifying the HOV assumption is that the population variances across groups are equal; that is, , where k denotes the number of groups compared in a study. The examination of the HOV assumption is always an essential step before conducting a comparison of group means using ANOVAs or t tests. Violations of the HOV assumption may result in misleading results in terms of the test for differences in group means. Levene’s ...
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9662325978279114, "perplexity": 1353.2154703688227}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-27/segments/1656104209449.64/warc/CC-MAIN-20220703013155-20220703043155-00273.warc.gz"}
https://www.gamasutra.com/blogs/JeanSimonet/20160310/267441/Beyond_the_State_Machine.php
March 24, 2019 Press Releases March 24, 2019 Games Press If you enjoy reading this site, you might also want to check out these UBM Tech sites: # Beyond the State Machine by Jean Simonet on 03/10/16 03:14:00 pm The following blog post, unless otherwise noted, was written by a member of Gamasutra’s community. The thoughts and opinions expressed are those of the writer and not Gamasutra or its parent company. This post is a follow up to the article Logic over Time in which I described why new language features, such as coroutines, can help game programmers write more readable and robust state machines. In this article, I’ll talk in more details about the specifics of my implementation of coroutines and its advantages and uses beyond state machines themselves. Specifically, I'll talk about concurrency and synchronization. I have posted the sources on GitHub, under an MIT license, feel free to use this framework in your own projects. The code is written in .NET 3.5 (the version supported by Unity). https://github.com/jeansimonet/Coroutines I will start by showing you an example, and highlight how these coroutines are nicely composable. After that I will dive deeper into the implementation of the framework, and answer some of the more common questions related to using coroutines in game code. # A Simple Turret Picking up where I left off in the first article, I went ahead and wrote a simple Turret behavior for a mock game (sources of which are also available on the repository). To summarize, the turret does the following: it looks for a target (the player) within a given radius. Once it finds a target, it does two things at once. It shoots projectiles, and it tracks the target. If it loses lock on the target (player moving too far away), it returns to its original orientation and starts over. Here is the relevant piece of code, let’s dissect it right away! Let’s start by looking at the IEnumerable<Instruction> type of the Main() coroutine. As I mentioned in my previous article, this function generates an iterator block, that yields intermediate results of type Coroutines.Instruction. These intermediate instructions tell the framework how to proceed. By default the framework enumerates the iterator block, which is what executes the actual coroutine code. It does this until a value is yield-ed by the coroutine code. Depending on what that value is, it will do one thing or another. ControlFlow.ExecuteWhileRunning() and ControlFlow.ExecuteWhile() are of course special instructions that tell the framework what to do: mainly to execute other (sub)coroutines under certain conditions. Instructions like ExecuteWhile() or Call() are how we can compose coroutines, let’s look at them more closely. # Coroutine Instructions To understand how instructions works, let’s start by looking at a simpler example: This is the utility coroutine that waits for a specific amount of time. It just sits in a loop checking the time since it was first called, and yields null. Once the time has elapsed, it simply terminates. null is interpreted by the framework to mean ‘sleep until next frame’. (Note: it is the exact same meaning as it is for Unity’s coroutines. How convenient!) A coroutine can also yield a Call instruction, passing in another coroutine. This is in fact what the FireProjectiles() coroutine of our turret does to wait between firing projectiles. ControlFlow.Call(...) is a utility method that returns a derived class of Instruction, and has a special meaning that the framework understands. In this case it means ‘start executing the coroutine I am passing in (stored in the instruction), and resume me once it has terminated’. As you would expect, there are other control flow methods that return different Instructions, which in turn have different meanings. ControlFlow.ExecuteWhile(...) is an example of that. The ExecuteWhile instruction passes a number of (sub)coroutines and a predicate, and the framework understands it to mean ‘Run all the coroutines in parallel for as long as the predicate is true’. But before diving into the details of the ExecuteWhile(...) code, we need to take a step back and explain how the runtime works. # It’s always Graphs Behind the scenes, the framework is building a graph structure. The runtime executes the user code, until the user code yields an instruction, and then the runtime interprets that instruction accordingly. Depending on the yielded instruction, the runtime can create different kinds of sub nodes. The most common coroutine node is the one that executes an IEnumerator<Instruction> iterator. The graph is stored by the user however, by manually instantiating a root coroutine node. In our turret example, the root of the graph was declared when we added _Main to the Turret class. There is no global coroutine manager or anything like that in the framework. If you want to use a coroutine, you instantiate it yourself, and then ‘tick’ it yourself as well. After this, the graph structure is built on-demand, based on the instructions yield-ed by the user code. If the user indicates it wants to ‘Call’ a subroutine for instance, the runtime creates a new coroutine, sets it as the child of the current coroutine, and passes execution to it. Which brings us to the interface that coroutine nodes need to implement: ICoroutine. A basic node of the coroutine graph needs to be able to perform the following: • Be updated, of course, to do some actual work! • Indicate whether it is running or finished. That value is used, among other things, to determine when to return execution to a parent coroutine node. • Be reset, that is: restart whatever it is doing from the beginning. • Be disposed. This is crucial so that nodes can make sure they clean up after themselves in a predictable fashion. It also has the nice advantage that we can easily build a node pooling system once we know for a fact that disposed nodes are no longer used. # The Coroutine Node The Coroutine node is the main workhorse of the framework. It is the node where user code is executed. The coroutine node is the one that understands the ‘Instructions’ I mentioned earlier. It can be represented like this: And in practice, it stores the following data: The coroutine needs to know the original IEnumerable so it can restart enumeration from the beginning when reset. Of course it stores the IEnumerator to keep track of where it is in the coroutine (for all intents and purposes, the IEnumerator is the auto-generated state machine). After that, it has two extra members: a state variable and a subroutine. The subroutine is null until a control-flow instruction is yield-ed and tells the coroutine node how to create the child node. In the case of a CallInstruction as seen earlier, the Coroutine.Update() method sets a flag indicating that instead of iterating its iterator (the user code), it should instead create and then execute a child node. Once that child node completes, it can reset the flag and continue iterating its iterator (the user code). In most cases, the subroutine it creates is itself an iterator-based Coroutine, but in other cases, such as with ExecuteWhile(...), it is a node of a different type. # The While Node The While node stores two things: • A predicate (or in other words a function returning a boolean) that it will use to determine if it needs to continue executing its child. • A child node that it will execute normally, but interrupt as soon as the predicate becomes false. There is also a state variable, but it isn’t strictly necessary. We’re using it to avoid having to check the master condition again and again when asked what the Running state of this node is. Of course the Update() method for the While node is very straightforward: the While node checks the predicate every update, and if the returned value is true, executes the subroutine. Otherwise, it interrupts the subroutine (calling Dispose() on it so it gets a chance to clean up), and considers itself finished. Note that the predicate isn’t evaluated at the time of the call, but instead at every update of the While node. Going back to the Turret example, you can see that the Condition passed to the While node is in fact a lambda expression, also called an Anonymous Closure. But how does ExecuteWhile() take more than one node? And what happens to those nodes? # The Concurrent Node ExecuteWhile() is a utility method that takes a variable number of parameters (variadic function). Let’s look at it! ExecuteWhile() does two things. First it creates a Concurrent node, passing it the subroutines, and then it creates the While node, with the predicate and the Concurrent node as a child. Finally it returns a CallInstruction indicating that the calling coroutine node should wait until the While node is finished to continue its own execution. In essence our call to ExecuteWhile() in the turret example builds the following graph: If things are starting to look like Behavior Trees at this point, that’s normal, this is pretty much what we are building here. In fact, it is a fairly simple affair to implement more of the behavior graph nodes, such as priority nodes. But let’s look at a the concurrent node in more detail first, there is more than meets the eye there. Indeed, what does this Any() mean? Well, as I hinted at the end of my previous article, as soon as you start talking about concurrence, you need to think about arbitration. And the question we need to answer is this: what is the ‘Running’ state of the concurrent node itself? Is it ‘Running’ as long as all its child nodes are ‘Running’? Or does it terminate as soon as one of its child nodes does? The answer is that it depends, of course! It depends on what the user wants. So we need a way for the user to be able to specify how to arbitrate the ‘Running’ state. This is what Any() is in this case. It is telling the concurrent node that it should be running if any of the child nodes are running. IsRunning_Arbitration() takes an array of boolean and returns a boolean, and the Any() method that ExecuteWhile() is passing in simply performs a logical OR. We could of course implement any behavior we want, for instance indicating that the concurrent node should be running only as long as all its children are running. The ExecuteWhileRunning(masterCoroutine, slaveCoroutine) call seen at the beginning of the Turret example is yet another specialized version of the While node that treats the running state of the master node as the condition whether or not to continue executing the slave node. # Disposing Coroutines So now that we understand how the ExecuteWhile(...) coroutine works, what exactly happens to the slave node(s) when we decide to stop updating? What if a coroutine was holding onto some sort of resource, like a particle effect. Does it get a chance to turn it off, or does the coroutine stay in some sort of limbo state until garbage collection? This is where the IDisposable interface comes into play, and more specifically, the fact that C# enumerators implement it. In fact they implement the IDisposable interface for this specific reason. If you provide an IEnumerator that reads from a file, you would want to make sure that the file gets closed when your user code stops enumerating, regardless of whether they reached the end of the file. So when it came to C# iterators (the building block of our coroutines, the auto-generated state machines), the designers of the language had the great idea to come up with the following convention (from an MSDN article) If you have a try ... finally block in your iterator, the language executes the finally block under the following conditions: • After the last statement of the try block is executed. (No surprise here.) • When an exception propagates out of the try block. (No surprise here either.) • When execution leaves the try block via yield break. • When the iterator is Disposed and the iterator body was trapped inside a try block at the time. That last case can occur if somebody decides to abandon the enumerator before it is finished. The language guarantees that if we dispose the iterator, then the finally block will be executed! And so, the coroutine framework makes sure to do just that whenever a node is disposed or reset, and this is how it can make sure that user code gets a chance to clean up when interrupted. Looking at the Turret example, this is what the TrackTarget() coroutine looks like: Even though the meat of the coroutine is an infinite while loop, we specify that once we’ve enabled the tracking light, we want to make sure we turn it off again, if the coroutine ever gets interrupted (disposed). Note: Don’t let the finally keyword scare you into thinking we’re triggering exceptions here, we’re not. The finally block will get executed if an exception occurs, of course, but for the regular case (coroutines completing or being interrupted) we are not incurring the heavy cost of exception stack unwinding. You can have as many try/finally blocks as you want, and they can also be nested, so you can make sure that only the things that need to be cleaned up are. # Returning values to caller The last big thing to look at is how Coroutines return data to their caller. And unfortunately, that’s something that they just can’t do (at least not in .NET 3.5 where we don’t have support for async/await). It’s easy to understand how that’s not a trivial feature though, since Coroutines are meant to return intermediate values, they don’t have a notion of final return value. So in order to return values to parent coroutines, we rely on Closures again (or lambda expressions). Looking at the turret example one last time, and the FindTargetInRadius() coroutine specifically, we can see how that works. FindTargetInRadius() takes a regular parameter (the radius), and an action (a function that returns void). When it finds a target, it invokes the action with the found target. From the Main() coroutine, we use it like this: You can see the local variable target, and the trivial Lambda Expression being passed to FindTargetInRadius() which simply assigns the local variable. It’s a little roundabout way of doing things, but thanks to the shorthand that the compiler allows, it is not too bad. # More Nodes Types and Applications Of course the real value of a framework such as this one is in whether it is easy to extend and modify to suit the needs of your game. In fact, these coroutines are a great starting point to build more complex graph-based AI systems. They give us an elegant way to compose and synchronize procedural behaviors. This is what I’d like to investigate in more detail in the next article. Coroutines with concurrency and synchronization are really close in concept to Behaviour Graphs, which are extremely popular for AI these days. I'd also like to try out writing more traditional Hierarchical State Machines, as well as other AI structures such as the Voting and Subsumption architectures; all constructs that can be best represented by a graph of (mostly) asynchronous code. ### Related Jobs Pixel Pool — Portland, Oregon, United States [03.22.19] Software Developer (Unreal Engine 4, Blueprint, C++) Crystal Dynamics — Redwood City, California, United States [03.22.19] Senior Tools Engineer Sucker Punch Productions — Bellevue, Washington, United States [03.22.19] Open World Content Designer Phosphor Studios — Chicago, Illinois, United States [03.22.19] Senior Gameplay Programmer
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.24926163256168365, "perplexity": 1399.112147861084}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-13/segments/1552912203409.36/warc/CC-MAIN-20190324083551-20190324105551-00028.warc.gz"}
https://www.nature.com/articles/s41586-021-03984-4?error=cookies_not_supported&code=47bf7bc9-1573-4787-97a2-53f003c39adb
Thank you for visiting nature.com. You are using a browser version with limited support for CSS. To obtain the best experience, we recommend you use a more up to date browser (or turn off compatibility mode in Internet Explorer). In the meantime, to ensure continued support, we are displaying the site without styles and JavaScript. # Globally resolved surface temperatures since the Last Glacial Maximum ## Abstract Climate changes across the past 24,000 years provide key insights into Earth system responses to external forcing. Climate model simulations1,2 and proxy data3,4,5,6,7,8 have independently allowed for study of this crucial interval; however, they have at times yielded disparate conclusions. Here, we leverage both types of information using paleoclimate data assimilation9,10 to produce the first proxy-constrained, full-field reanalysis of surface temperature change spanning the Last Glacial Maximum to present at 200-year resolution. We demonstrate that temperature variability across the past 24 thousand years was linked to two primary climatic mechanisms: radiative forcing from ice sheets and greenhouse gases; and a superposition of changes in the ocean overturning circulation and seasonal insolation. In contrast with previous proxy-based reconstructions6,7 our results show that global mean temperature has slightly but steadily warmed, by ~0.5 °C, since the early Holocene (around 9 thousand years ago). When compared with recent temperature changes11, our reanalysis indicates that both the rate and magnitude of modern warming are unusual relative to the changes of the past 24 thousand years. This is a preview of subscription content ## Access options from\$8.99 All prices are NET prices. ## Data availability All LGMR and associated proxy data are publicly available via the National Oceanic and Atmospheric Administration (NOAA) Paleoclimatology Data Archive (https://www.ncdc.noaa.gov/paleo/study/33112). Source data are provided with this paper. ## Code availability The MATLAB code used for the reconstruction (DASH) are publicly available (https://github.com/JonKing93/DASH), as are all accompanying Bayesian proxy forward models (BAYSPAR, BAYSPLINE, BAYFOX, and BAYMAG) used in this study (https://github.com/jesstierney). The iCESM1.2 model code is available at https://github.com/NCAR/iCESM1.2. ## References 1. 1. Liu, Z. et al. Transient simulation of last deglaciation with a new mechanism for Bølling-Allerød warming. Science 325, 310–314 (2009). 2. 2. Liu, Z. et al. The Holocene temperature conundrum. Proc. Natl Acad. Sci. USA 111, E3501–E3505 (2014). 3. 3. Shakun, J. D. et al. Global warming preceded by increasing carbon dioxide concentrations during the last deglaciation. Nature 484, 49–54 (2012). 4. 4. Snyder, C. W. Evolution of global temperature over the past two million years. Nature 538, 226–228 (2016). 5. 5. Bereiter, B., Shackleton, S., Baggenstos, D., Kawamura, K. & Severinghaus, J. Mean global ocean temperatures during the last glacial transition. Nature 553, 39–44 (2018). 6. 6. Marcott, S. A., Shakun, J. D., Clark, P. U. & Mix, A. C. A reconstruction of regional and global temperature for the past 11,300 years. Science 339, 1198–1201 (2013). 7. 7. Kaufman, D. et al. Holocene global mean surface temperature, a multi-method reconstruction approach. Sci. Data 7, 201 (2020). 8. 8. Bova, S., Rosenthal, Y., Liu, Z., Godad, S. P. & Yan, M. Seasonal origin of the thermal maxim at the Holocene and the last interglacial. Nature 589, 548–553 (2021). 9. 9. Hakim, G. J. et al. The last millennium climate reanalysis project: framework and first results. J. Geophys. Res. Atmos. 121, 6745–6764 (2016). 10. 10. Tierney, J. E. et al. Glacial cooling and climate sensitivity revisited. Nature 584, 569–573 (2020). 11. 11. Morice, C. P. et al. An updated assessment of near-surface temperature change from 1850: the HadCRUT5 dataset. J. Geophys. Res. Atmos. 126, e2019JD032361 (2020). 12. 12. Marsicek, J., Shuman, B. N., Bartlein, P. J., Shafer, S. L. & Brewer, S. Reconciling divergent trends and millennial variations in Holocene temperatures. Nature 554, 92–96 (2018). 13. 13. Baggenstos, D. et al. Earth’s radiative imbalance from the Last Glacial Maximum to the present. Proc. Natl Acad. Sci. USA 116, 14881–14886 (2019). 14. 14. Kageyama, M. et al. The PMIP4-CMIP6 Last Glacial Maximum experiments: preliminary results and comparison with the PMIP3-CMIP5 simulations. Clim. Past 17, 1065–1089 (2021). 15. 15. Brierley, C. M. et al. Large-scale features and evaluation of the PMIP4-CMIP6 mid Holocene simulations. Clim. Past 16, 1847–1872 (2020). 16. 16. Park, H.-S., Kim, S.-J., Stewart, A. L., Son, S.-W. & Seo, K.-H. Mid-Holocene Northern Hemisphere warming driven by Arctic amplification. Sci. Adv. 5, eaax8203 (2019). 17. 17. Tardif, R. et al. Last Millennium Reanalysis with an expanded proxy database and seasonal proxy modeling. Clim. Past 15, 1251–1273 (2019). 18. 18. Tierney, J. E. & Tingley, M. P. A Bayesian, spatially-varying calibration model for the TEX86 proxy. Geochim. Cosmochim. Acta 127, 83–106 (2014). 19. 19. Tierney, J. E. & Tingley, M. P. BAYSPLINE: a new calibration for the alkenone paleothermometer. Paleoceanogr. Paleoclimatol. 33, 281–301 (2018). 20. 20. Malevich, S. B., Vetter, L. & Tierney, J. E. Global core top calibration of δ18o in planktic foraminifera to sea surface temperature. Paleoceanogr. Paleoclimatol. 34, 1292–1315 (2019). 21. 21. Tierney, J. E., Malevich, S. B., Gray, W., Vetter, L. & Thirumalai, K. Bayesian calibration of the Mg/Ca paleothermometer in planktic foraminifera. Paleoceanogr. Paleoclimatol. 34, 2005–2030 (2019). 22. 22. Brady, E. et al. The connected isotopic water cycle in the community Earth system model version 1. J. Adv. Model. Earth Sys. 11, 2547–2566 (2019). 23. 23. Amrhein, D. E., Hakim, G. J. & Parsons, L. A. Quantifying structural uncertainty in paleoclimate data assimilation with an application to the last millennium. Geophys. Res. Lett. 47, e2020GL090485 (2020). 24. 24. Köhler, P., Nehrbass-Ahles, C., Schmitt, J., Stocker, T. F. & Fischer, H. A 156 kyr smoothed history of the atmospheric greenhouse gases CO2, CH4, and N2O and their radiative forcing. Earth Syst. Sci. Data 9, 363–387 (2017). 25. 25. Braconnot, P. & Kageyama, M. Shortwave forcing and feedbacks in Last Glacial Maximum and Mid-Holocene PMIP3 simulations. Philos. Trans. R. Soc. A 373, 20140424 (2015). 26. 26. Berger, A. Long-term variations of daily insolation and quaternary climatic changes. J. Atmos. Sci. 35, 2362–2367 (1978). 27. 27. Huybers, P. & Denton, G. Antarctic temperature at orbital timescales controlled by local summer duration. Nat. Geosci. 1, 787–792 (2008). 28. 28. Imbrie, J. et al. On the structure and origin of major glaciation cycles 1. Linear responses to Milankovitch forcing. Paleoceanography 7, 701–738 (1992). 29. 29. McManus, J. F., Francois, R., Gherardi, J.-M., Keigwin, L. D. & Brown-Leger, S. Collapse and rapid resumption of Atlantic meridional circulation linked to deglacial climate changes. Nature 428, 834–837 (2004). 30. 30. Böhm, E. et al. Strong and deep Atlantic meridional overturning circulation during the last glacial cycle. Nature 517, 73–76 (2015). 31. 31. Lippold, J. et al. Constraining the variability of the Atlantic meridional overturning circulation during the Holocene. Geophys. Res. Lett. 46, 11338–11346 (2019). 32. 32. Shakun, J. D. & Carlson, A. E. A global perspective on Last Glacial Maximum to Holocene climate change. Quat. Sci. Rev. 29, 1801–1816 (2010). 33. 33. Clark, P. U. et al. Global climate evolution during the last deglaciation. Proc. Natl Acad. Sci. USA 109, E1134–E1142 (2012). 34. 34. Pedro, J. B. et al. Beyond the bipolar seesaw: toward a process understanding of interhemispheric coupling. Quat. Sci. Rev. 192, 27–46 (2018). 35. 35. He, F. et al. Northern Hemisphere forcing of Southern Hemisphere climate during the last deglaciation. Nature 494, 81–85 (2013). 36. 36. Ritz, S. P., Stocker, T. F., Grimalt, J. O., Menviel, L. & Timmermann, A. Estimated strength of the Atlantic overturning circulation during the last deglaciation. Nat. Geosci. 6, 208–212 (2013). 37. 37. Praetorius, S. K. et al. The role of Northeast Pacific meltwater events in deglacial climate change. Sci. Adv. 6, eaay2915 (2020). 38. 38. Gray, W. R. et al. Wind-driven evolution of the North Pacific subpolar gyre over the last deglaciation. Geophys. Res. Lett.47, e2019GL086328 (2020). 39. 39. Reimer, P. J. et al. IntCal13 and Marine13 radiocarbon age calibration curves 0–50,000 years cal BP. Radiocarbon 55, 1869–1887 (2013). 40. 40. Blaauw, M. & Christen, J. A. Flexible paleoclimate age-depth models using an autoregressive gamma process. Bayesian Anal. 6, 457–474 (2011). 41. 41. Locarnini, R. A. et al. World Ocean Atlas 2013. Volume 1, Temperature (NOAA, 2013). 42. 42. Wang, K. J. et al. Group 2i Isochrysidales produce characteristic alkenones reflecting sea ice distribution. Nat. Commun. 12, 15 (2021). 43. 43. Sachs, J. P. Cooling of Northwest Atlantic slope waters during the Holocene. Geophys. Res. Lett. 34, L03609 (2007). 44. 44. Tierney, J. E., Haywood, A. M., Feng, R., Bhattacharya, T. & Otto-Bliesner, B. L. Pliocene warmth consistent with greenhouse gas forcing. Geophys. Res. Lett. 46, 9136–9144 (2019). 45. 45. Rayner, N. A. et al. Global analyses of sea surface temperature, sea ice, and night marine air temperature since the late nineteenth century. J. Geophys. Res. 108, 4407 (2003). 46. 46. Gray, W. R. & Evans, D. Nonthermal influences on Mg/Ca in planktonic foraminifera: a review of culture studies and application to the Last Glacial Maximum. Paleoceanogr. Paleoclimatol. 34, 306–315 (2019). 47. 47. Lambeck, K., Rouby, H., Purcell, A., Sun, Y. & Sambridge, M. Sea level and global ice volumes from the Last Glacial Maximum to the Holocene. Proc. Natl Acad. Sci. USA 111, 15296–15303 (2014). 48. 48. Monnin, E. et al. Atmospheric CO2 concentrations over the Last Glacial Termination. Science 291, 112–114 (2001). 49. 49. MacFarling Meure, C. et al. Law Dome CO2, CH4 and N2O ice core records extended to 2000 years BP. Geophys. Res. Lett. 33, L14810 (2006). 50. 50. Rubino, M. et al. A revised 1000-year atmospheric 13C-CO2 record from Law Dome and South Pole, Antarctica. J. Geophys. Res. Atmos. 118, 8482–8499 (2013). 51. 51. Marcott, S. A. et al. Centennial-scale changes in the global carbon cycle during the last deglaciation. Nature 514, 616–619 (2014). 52. 52. Ahn, J. & Brook, E. J. Siple Dome ice reveals two modes of millennial CO2 change during the last ice age. Nat. Commun. 5, 3723 (2014). 53. 53. Bereiter, B. et al. Revision of the EPICA Dome C CO2 record from 800 to 600 kyr before present. Geophys. Res. Lett. 42, 542–549 (2015). 54. 54. Olsen, A. et al. The Global Ocean Data Analysis Project version 2 (GLODAPv2) – an internally consistent data product for the world ocean. Earth Syst. Sci. Data 8, 297–323 (2016). 55. 55. Lisiecki, L. E. & Raymo, M. E. A Pliocene-Pleistocene stack of 57 globally distributed benthic δ18O records. Paleoceanography 20, PA1003 (2005). 56. 56. Schrag, D. P., Hampt, G. & Murray, D. W. Pore fluid constraints on the temperature and oxygen isotopic composition of the glacial ocean. Science 272, 1930–1932 (1996). 57. 57. LeGrande, A. N. & Schmidt, G. A. Global gridded data set of the oxygen isotopic composition in seawater. Geophys. Res. Lett. 33, L12604 (2006). 58. 58. Zhu, J., Poulsen, C. J. & Tierney, J. E. Simulation of Eocene extreme warmth and high climate sensitivity through cloud feedbacks. Sci. Adv. 5, eaax1874 (2019). 59. 59. Hurrell, J. W. et al. The community Earth system model: a framework for collaborative research. Bull. Am. Meteorol. Soc. 94, 1339–1360 (2013). 60. 60. Meehl, G. A. et al. Effects of model resolution, physics, and coupling on Southern Hemisphere storm tracks in CESM1.3. Geophys. Res. Lett. 46, 12408–12416 (2019). 61. 61. Zhu, J. et al. Reduced ENSO variability at the LGM revealed by an isotope-enabled Earth system model. Geophys. Res. Lett. 44, 6984–6992 (2017). 62. 62. Stevenson, S. et al. Volcanic eruption signatures in the isotope-enabled last millennium ensemble. Paleoceanogr. Paleoclimatol. 34, 1534–1552 (2019). 63. 63. Lüthi, D. et al. High-resolution carbon dioxide concentration record 650,000–800,000 years before present. Nature 453, 379–382 (2008). 64. 64. Loulergue, L. et al. Orbital and millennial-scale features of atmospheric CH4 over the past 800,000 years. Nature 453, 383–386 (2008). 65. 65. Schilt, A. et al. Atmospheric nitrous oxide during the last 140,000 years. Earth Planet. Sci. Lett. 300, 33–43 (2010). 66. 66. Peltier, W. R., Argus, D. F. & Drummond, R. Space geodesy constrains ice age terminal deglaciation: The global ICE-6G_C (VM5a) model. J. Geophys. Res. Solid Earth 120, 450–487 (2015). 67. 67. DiNezio, P. N. et al. Glacial changes in tropical climate amplified by the Indian Ocean. Sci. Adv. 4, eaat9658 (2018). 68. 68. Duplessy, J. C., Labeyrie, L. & Waelbroeck, C. Constraints on the ocean oxygen isotopic enrichment between the Last Glacial Maximum and the Holocene: Paleoceanographic implications. Quat. Sci. Rev. 21, 315–330 (2002). 69. 69. Kageyama, M. et al. The PMIP4 contribution to CMIP6–part 4: scientific objectives and experimental design of the PMIP4-CMIP6 Last Glacial Maximum experiments and PMIP4 sensitivity experiments. Geosci. Model Dev. 10, 4035–4055 (2017). 70. 70. Otto-Bliesner, B. L. et al. The PMIP4 contribution to CMIP6–part 2: two interglacials, scientific objective and experimental design for Holocene and Last Interglacial simulations. Geosci. Model Dev. 10, 3979–4003 (2017). 71. 71. Lawrence, D. M. et al. Parameterization improvements and functional and structural advances in Version 4 of the Community Land Model. J. Adv. Model. Earth Sys. 3, M03001 (2011). 72. 72. Bartlein, P. J. & Shafer, S. L. Paleo calendar-effect adjustments in time-slice and transient climatemodel simulations (PaleoCalAdjust v1.0): impact and strategies for data analysis. Geosci. Model Dev. 12, 3889–3913 (2019). 73. 73. Whitaker, J. S. & Hamill, T. M. Ensemble data assimilation without perturbed observations. Mon. Weather Rev. 130, 1913–1924 (2002). 74. 74. Hamill, T. M. Interpretation of rank histograms for verifying ensemble forecasts. Mon. Weather Rev. 129, 550–560 (2001). 75. 75. Jones, T. R. et al. Water isotope diffusion in the WAIS Divide ice core during the Holocene and last glacial. J. Geophys. Res. Earth Surface 122, 290–309 (2017). 76. 76. Sokratov, S. A. & Golubev, V. N. Snow isotopic content change by sublimation. J. Glaciol. 55, 823–828 (2009). 77. 77. Comas-Bru, L. et al. Evaluating model outputs using integrated global speleothem records of climate change since the last glacial. Clim. Past 15, 1557–1579 (2019). 78. 78. Atsawawaranunt, K. et al. The SISAL database: a global resource to document oxygen and carbon isotope records from speleothems. Earth Syst. Sci. Data 10, 1687–1713 (2018). 79. 79. Blunier, T. & Brook, E. J. Timing of millennial-scale climate change in Antarctica and Greenland during the Last Glacial Period. Science 291, 109–112 (2001). 80. 80. Stenni, B. et al. Expression of the bipolar see-saw in Antarctic climate records during the last deglaciation. Nat. Geosci. 4, 46–49 (2011). 81. 81. Watanabe, O. et al. Homogeneous climate variability across East Antarctica over the past three glacial cycles. Nature 422, 509–512 (2003). 82. 82. Brook, E. J. et al. Timing of millennial-scale climate change at Siple Dome, West Antarctica, during the last glacial period. Quat. Sci. Rev. 24, 1333–1343 (2005). 83. 83. Stenni, B. et al. The deuterium excess records of EPICA Dome C and Dronning Maud Land ice cores (East Antarctica). Quat. Sci. Rev. 29, 146–159 (2010). 84. 84. Steig, E. J. et al. Synchronous climate changes in Antarctica and the North Atlantic. Science 282, 92–95 (1998). 85. 85. Petit, J. R. et al. Climate and atmospheric history of the past 420,000 years from the Vostok ice core, Antarctica. Nature 399, 429–436 (1999). 86. 86. Markle, B. R. et al. Global atmospheric teleconnections during Dansgaard–Oeschger events. Nat. Geosci. 10, 36–40 (2017). 87. 87. Vinther, B. M. et al. Synchronizing ice cores from the Renland and Agassiz ice caps to the Greenland Ice Core Chronology. J. Geophys. Res. Atmos. 113, D08115 (2008). 88. 88. Stuiver, M. & Grootes, P. M. GISP2 oxygen isotope ratios. Quat. Res. 53, 277–284 (2000). 89. 89. Johnsen, S. J. et al. The δ18O record along the Greenland Ice Core Project deep ice core and the problem of possible Eemian climatic instability. J. Geophys. Res. Oceans 102, 26397–26410 (1997). 90. 90. Andersen, K. K. et al. High-resolution record of Northern Hemisphere climate extending into the last interglacial period. Nature 431, 147–151 (2004). 91. 91. Holmgren, K. et al. Persistent millennial-scale climatic variability over the past 25,000 years in Southern Africa. Quat. Sci. Rev. 22, 2311–2326 (2003). 92. 92. Novello, V. F. et al. A high-resolution history of the South American Monsoon from Last Glacial Maximum to the Holocene. Sci. Rep. 7, 44267 (2017). 93. 93. Cheng, H. et al. The climate variability in northern Levant over the past 20,000 years. Geophys. Res. Lett. 42, 8641–8650 (2015). 94. 94. Dutt, S. et al. Abrupt changes in Indian summer monsoon strength during 33,800 to 5500 years B.P. Geophys. Res. Lett. 42, 5526–5532 (2015). 95. 95. Ayliffe, L. K. et al. Rapid interhemispheric climate links via the Australasian monsoon during the last deglaciation. Nat. Commun. 4, 2908 (2013). 96. 96. Partin, J. W., Cobb, K. M., Adkins, J. F., Clark, B. & Fernandez, D. P. Millennial-scale trends in west Pacific warm pool hydrology since the Last Glacial Maximum. Nature 449, 452–455 (2007). 97. 97. Cai, Y. et al. Variability of stalagmite-inferred Indian monsoon precipitation over the past 252,000 y. Proc. Natl Acad. Sci. USA 112, 2954–2959 (2015). 98. 98. Fleitmann, D. et al. Timing and climatic impact of Greenland interstadials recorded in stalagmites from northern Turkey. Geophys. Res. Lett. 36, L19707 (2009). 99. 99. Cruz, F. W. et al. Insolation-driven changes in atmospheric circulation over the past 116,000 years in subtropical Brazil. Nature 434, 63–66 (2005). 100. 100. Hellstrom, J., McCulloch, M. & Stone, J. A detailed 31,000-year record of climate and vegetation change, from the isotope geochemistry of two New Zealand speleothems. Quat. Res. 50, 167–178 (1998). 101. 101. Grant, K. M. et al. Rapid coupling between ice volume and polar temperature over the past 150,000 years. Nature 491, 744–747 (2012). 102. 102. Cheng, H. et al. Climate change patterns in Amazonia and biodiversity. Nat. Commun. 4, 1411 (2013). ## Acknowledgements We thank B. Malevich for early discussions and explorations on LGM-to-present data assimilation, and M. Fox and N. Rapp for help in compiling the proxy data. We thank P. DiNezio for providing initial and boundary condition files for the CESM simulations, and B. Markle for assistance in compiling and sharing the ice core water isotope data. This study was supported by National Science Foundation (NSF) grant numbers AGS-1602301 and AGS-1602223, and Heising-Simons Foundation grant numbers 2016-012, 2016-014 and 2016-015. The CESM project is supported primarily by the NSF. This material is based on work supported by the National Center for Atmospheric Research, which is a major facility sponsored by the NSF under Cooperative Agreement No. 1852977. Computing and data storage resources, including the Cheyenne supercomputer (https://doi.org/10.5065/D6RX99HX), were provided by the Computational and Information Systems Laboratory (CISL) at NCAR. ## Author information Authors ### Contributions M.B.O. conducted the data assimilation, led the analysis and interpretation of the results, and designed the figures. M.B.O. and J.E.T. led the writing of this paper. J.E.T. led the proxy data compilation. J.K. wrote the DASH code, based on methods and input by R.T. and G.J.H. J.Z. and C.J.P. planned and conducted the iCESM simulations. All authors contributed to the design of the study and the writing of this manuscript. ### Corresponding author Correspondence to Matthew B. Osman. ## Ethics declarations ### Competing interests The authors declare no competing interests. Peer review information Nature thanks William Gray and the other, anonymous, reviewer(s) for their contribution to the peer review of this work. Peer reviewer reports are available. Publisher’s note Springer Nature remains neutral with regard to jurisdictional claims in published maps and institutional affiliations. ## Extended data figures and tables ### Extended Data Fig. 1 Time resolution and temporal coverage of the SST proxy data compilation. a, Histogram of record resolution (denoting the median sample resolution for each record), computed for each proxy type. b, Histogram of record length for each proxy type. ### Extended Data Fig. 2 Statistical validation of randomly withheld marine geochemical proxies. a, From left: observed versus forward-modelled δ18Oc mean values for each site using the posterior data assimilation estimates. Shown at right are the associated median R2validation scores (each based on n = ~100 LGMR ensemble members), computed on a per-site basis (see Methods section “Internal and external validation testing”). bd, As in a, but for $${\text{U}}_{37}^{\text{K'}}$$ (b), Mg/Ca (c) and TEX86 (d), respectively. ### Extended Data Fig. 3 Validation using independent δ18Op ice core and speleothem records. a, 3 ka–preindustrial (PI; 0 ka) posterior ∆δ18Op field; overlying markers show the observed 3 ka–PI ∆δ18Op values from speleothems and ice cores. Only records spanning at least 18 of the past 24 kyr are shown. ∆R2 and ∆RMSEP values denote the change in observed versus posterior assimilated ∆δ18Op values relative to the prior (that is, iCESM) estimated values. bh, As in a, but for values differenced at 6, 9, 12, 14, 16, 18 and 21 ka versus the PI, respectively. I, All observed ∆δ18Op versus model prior values; dashed line indicates the 1:1 relationship. j, All observed ∆δ18Op versus posterior values, which show a strong improvement in ∆R2 and ∆RMSEP over the prior. Note that each scatter point shown in panels i, j corresponds to an external validation site shown in panels ah. ### Extended Data Fig. 4 Time-comparison of posterior LGMR δ18Op with selected δ18Op ice core and speleothem records. Uncertainty ranges denote the ±1σ level (dark) and 95% confidence range (light) from the LGMR ensemble. Also shown for comparison are the full range (shaded grey) and median iCESM time slice prior values (50-year means) for each site. See also Extended Data Table 2. ### Extended Data Fig. 5 Influences on global surface temperature evolution during the past 24 kyr. ac, Spatial LGM-to-present correlations between surface air temperature (SAT) and combined greenhouse gas24 and global albedo radiative forcing13 (a); summer length at 65°S;27 (b); and the –1 × 231Pa/230Th AMOC proxy index from Bermuda Rise29,30,31 (c; shown such that SAT correlations are positive with AMOC strength). ### Extended Data Fig. 6 Proxy-specific GMST reconstructions and comparison of Holocene GMST trends. a, δ18Oc, $${\text{U}}_{37}^{\text{K'}}$$, and Mg/Ca-derived GMST reconstructions, derived using both the proxy-only (PO) and data assimilation (DA) approaches. In a, the shaded regions show the ±1σ range across n = 50 ensemble members for the DA-based GMST estimates, and n = 10,000 realizations for the PO-based GMST estimates (note uncertainty ranges are not shown for the dotted-dashed curves). b, Sensitivity of the Holocene GMST evolution to the removal of proxies situated in contiguous 15° latitudinal bands, both for the PO and DA approaches. c, Sensitivity of the DA-based Holocene GMST evolution to proxy seasonality (computed by fixing foraminifera growth seasonality to either preindustrial (PI) or LGM monthly SSTs for Mg/Ca and δ18Oc, or by removing records with seasonal alkenone production for $${\text{U}}_{37}^{\text{K'}}$$), and to the ‘pooled’ foraminifera species SST calibrations of refs. 20,21 (see Supplementary Information). All ∆GMST time series denote deviations relative to the past 2 kyr. ### Extended Data Fig. 7 Hemispheric variability during the past 24 kyr. Ensemble distribution (n = 500) of LGMR-estimated Northern Hemisphere (NH; red) and Southern Hemisphere (SH; blue) mean hemispheric temperatures during the past 24 kyr. Shown at top is the surface temperature spatial difference for the Bølling–Allerød (BA) and Younger Dryas (YD) intervals. Range of hemispheric last deglacial and interglacial onset timings are shown as histograms at bottom. The LGMR is plotted alongside reconstructed decadal hemispheric temperatures from the last millennium reanalysis v2.117 and HadCRUT5 observational product11. ## Supplementary information ### Supplementary Information This file contains Supplementary Information sections 1–-5. ## Rights and permissions Reprints and Permissions Osman, M.B., Tierney, J.E., Zhu, J. et al. Globally resolved surface temperatures since the Last Glacial Maximum. Nature 599, 239–244 (2021). https://doi.org/10.1038/s41586-021-03984-4 • Accepted: • Published: • Issue Date:
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8148507475852966, "perplexity": 17635.230166974852}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-49/segments/1637964358953.29/warc/CC-MAIN-20211130050047-20211130080047-00633.warc.gz"}
https://cs.stackexchange.com/questions/53105/matrix-permanent-p-hard-problems-and-np
# Matrix permanent, #P-hard problems and NP First of all, I'm not a computer scientist so I apologize if this is a stupid question. I know that the problem of computing the permanent of a matrix is #P-hard, which as I understand it this implies that if you can solve an arbitrary instance of the problem in polynomial time then you can in principle solve any problem in #P in polynomial time. What I'm wondering is if it also implies being able to solve NP problems in polynomial time (I'm guessing not). • Thank you. That's very interesting, since it is possible to engineer quantum mechanical systems the measurement probabilities of which are described by the permanent of the system's matrix. That's why I doubted it, it seemed like it would make it theoretically possible to efficiently solve NP-Complete problems on a quantum computer. I suppose the big challenge there is doing it in the general case and not just for once specific instance. Is your statement related to the fact that $NP \subseteq P^{\#P}$? – fulis Feb 15 '16 at 7:42 • Shouldn't #2-sum be #P-hard if $P=NP$? – Travis Wells Dec 9 '19 at 22:47
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.7500302195549011, "perplexity": 221.04947567421416}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-45/segments/1603107880878.30/warc/CC-MAIN-20201023073305-20201023103305-00207.warc.gz"}
https://www.arxiv-vanity.com/papers/gr-qc/0610018/
The Asymptotic Safety Scenario in Quantum Gravity [6mm] – An Introduction – M. Niedermaier***Membre du CNRS. [4mm] Laboratoire de Mathematiques et Physique Theorique CNRS/UMR 6083, Université de Tours Parc de Grandmont, 37200 Tours, France Abstract The asymptotic safety scenario in quantum gravity is reviewed, according to which a renormalizable quantum theory of the gravitational field is feasible which reconciles asymptotically safe couplings with unitarity. All presently known evidence is surveyed: (a) from the expansion, (b) from the perturbation theory of higher derivative gravity theories and a ‘large ’ expansion in the number of matter fields, (c) from the 2-Killing vector reduction, and (d) from truncated flow equations for the effective average action. Special emphasis is given to the role of perturbation theory as a guide to ‘asymptotic safety’. Further it is argued that as a consequence of the scenario the selfinteractions appear two-dimensional in the extreme ultraviolet. Two appendices discuss the distinct roles of the ultraviolet renormalization in perturbation theory and in the flow equation formalism. 1. Survey of the scenario and evidence for it The quest for a physically viable theory of quantized gravitation is ongoing; in part because the physics it ought to describe is unknown, and in part because different approaches may not ‘approach’ the same physics. The most prominent contenders are string theory and loop quantum gravity, with ample literature available on either sides. For book-sized expositions see for example [68, 125, 132]. The present review surveys a circle of ideas which differ in several important ways from these approaches; we refer to [110] for a more detailed account with a slightly different emphasis. 1.1 Survey of the scenario In brief, the scenario delineates conditions under which a functional integral based quantum theory of gravity can be viable beyond the level of an effective field theory: first a physics premise (“antiscreening”) is made about the selfinteraction of the quantum degrees of freedom in the ultraviolet. Second, the effective diminution of the relevant degrees of freedom in the ultraviolet (on which morally speaking all approaches agree) is interpreted as universality in the statistical physics sense in the vicinity of an ultraviolet renormalization group fixed point. Third, the resulting picture of microscopic geometry is fractal-like with a local dimensionality of two. The concrete implementation of these ideas has begun only recently and led to a number of surprising results to be reviewed here. Part of the physics intuition, on the other hand, dates back to an 1979 article by S. Weinberg [152], see also [66]. Motivated by the analogy to the asymptotic freedom property of nonabelian gauge theories, the term “asymptotic safety” was suggested in [152], indicating that physically motivated running couplings should be “safe” from divergencies at all scales. Following this suggestion we shall refer to the above circle of ideas as the “asymptotic safety scenario” for quantum gravity. For convenient orientation we display the main features in overview: Asymptotic safety scenario – main ideas: • The gravitational field itself is taken as the prime carrier of the relevant classical and quantum degrees of freedom; its macro- and micro-physics are related through a renormalization flow. • As the basic physics premise stipulate that the physical degrees of freedom in the ultraviolet interact predominantly antiscreening. • Based on this premise benign renormalization properties in the ultraviolet are plausible. The resulting “Quantum Gravidynamics” can then be viewed as a peculiar quasi-renormalizable field theory based on a non-Gaussian fixed point. • In the extreme ultraviolet the residual interactions appear two-dimensional. The first point is shared by the effective field theory framework for quantum gravity, the others are needed to go beyond it in a functional integral based approach. The rationale for trying to so is twofold: first, the effective field framework gives rise only to very few universal corrections which are quantitatively too small to be phenomenologically interesting. Second, once the physics premise underlying a regularized functional integral for gravity has been made a “UV completion” based simply on removal of the regulator, if feasible, is physically well-motivated and computationally seamless. A strategy centered around a functional integral picture was indeed adopted early on [101] but is now mostly abandoned. A functional integral over geometries of course has to differ in several crucial ways from one for fields on a fixed geometry. This lead to the development of several formulations (canonical, covariant [44, 45, 46], proper time [142, 143] and covariant Euclidean [74, 64]). As is well-known the functional integral picture is also beset from severe technical problems [140, 42, 64, 105]. Nevertheless this should not distract attention from the fact that a functional integral has a physics content which differs from the physics content of other approaches. For want of a better formulation we shall refer to this fact by saying that a functional integral picture “takes the degrees of freedom of the gravitational field seriously also in the quantum regime”. Let us briefly elaborate on that. Arguably the cleanest intuition to ‘what quantizing gravity might mean’ comes from the functional integral picture. Transition or scattering amplitudes for nongravitational processes should be affected not only by one geometry solving the gravitational field equations, but by a ‘weighted superposition’ of ‘nearby possible’ off-shell geometries. The rationale behind this intuition is that all known (microscopic) matter is quantized that way, and using an off-shell matter configuration as the source of the Einstein field equations is in general inconsistent, unless the geometry is likewise off-shell. Moreover, relativistic quantum field theory suggests that the matter-geometry coupling is effected not only through averaged or large scale properties of matter. For example nonvanishing connected correlators of a matter energy momentum tensor should be a legitimate source of gravitational radiation as well, as should be the Casimir energy, see [55, 58]. Of course this doesn’t tell in which sense the geometry is off-shell, nor which class of possible geometries ought to be considered and be weighed with respect to which measure. Rapid decoherence, a counterpart of spontaneous symmetry breaking, and other unknown mechanisms may in addition mask the effects of the superposition principle. Nevertheless the argument suggests that the degrees of freedom of the gravitational field should be taken seriously also in the quantum regime, roughly along the lines of a functional integral. Doing so one readily arrives at the effective field theory description of quantum gravity, see [33] for a recent review. It is a commonly accepted criterion that a theory of quantum gravity, even one evading a functional integral over geometries, should match whatever universal results are obtained from an effective field theory framework. The issue at stake thus is the extent to which different “UV completions” of the effective field theory description have a physics content different from the latter and from each other. Of course in the absence of empirical guidance the ‘true’ physics of quantum gravity is unknown; so for the time being it will be important to try to isolate differences in the physics content of the various “UV completions”. By physics content we mean here qualitative or quantitative results for the values of “quantum gravity corrections” to generic physical quantities in the approach considered. Generic physical quantities should be such that they in principle capture the entire invariant content of a theory. In a conventional field theory S-matrix elements by and large have this property, in canonical general relativity Dirac observables play this role [7, 147, 48]. In quantum gravity, in contrast, no agreement has been reached on the nature of such generic physical quantities. The present scenario proposes a UV completion proper, one which is based on the very same physics principles that make the effective field theory description so credible and which renders the crucial interpolating regime computationally accessible. We share the viewpoint expressed by Wilczek in [153]: “Whether the next big step will require a sharp break from the principles of quantum field theory, or, like the previous ones, a better appreciation of its potentialities, remains to be seen”. Here we center the discussion around the above four main ideas, and, for short, call a quantum theory of gravity based on them Quantum Gravidynamics. For the remainder of Section 1.1 we now discuss a number of key issues that arise. In any functional integral picture one has to face the crucial renormalizability problem. Throughout we shall be concerned exclusively with (non-)renormalizability in the ultraviolet. The perspective on the nature of the impasse entailed by the perturbative nonrenormalizability of the Einstein–Hilbert action (see Bern [21] for a review), however, has changed significantly since the time it was discovered by ’t Hooft and Veltmann [140]. First, the effective field theory framework applied to quantum gravity provides unambiguous answers some lowest order corrections despite the perturbative nonrenormalizability of the ‘fundamental’ action, as stressed by Donoghue (see [27, 33] and references therein). The role of an a-priori microscopic action is moreover strongly deemphasized when a Kadanoff–Wilson view on renormalization is adopted. We shall give a quick reminder on this framework in Appendix A. Applied to gravity it means that the Einstein–Hilbert action should not be considered as the microscopic (high energy) action, rather the renormalization flow itself will dictate to a certain extent which microscopic action to use, and whether or not there is a useful description of the extreme ultraviolet regime in terms of ‘fundamental’ (perhaps nonmetric) degrees of freedom. The extent to which this is true hinges on the existence of a fixed point with a renormalized trajectory emanating from it. The fixed point guarantees universality in the statistical physics sense. If there is a fixed point any action on a renormalized trajectory describes identically the same physics on all energy scales lower than the one where it is defined. Following the trajectory back (almost) into the fixed point one can in principle extract unambiguous answers for physical quantities on all energy scales. Compared to the effective field theory framework the main advantage of genuine renormalizability lies not primarily in the gained energy range in which reliable computations can be made, but rather that one has a chance to properly identify ‘large’ quantum gravity effects at low energies (assuming they exist). The effective field theory framework rests on a decoupling assumption: there exists a potentially process-dependent scale such that the low energy degrees of freedom () relevant for the process obey an approximately autonomous dynamics. Based on this assumption some unambiguously defined low energy effects of quantized gravity can be identified, but are found to be suppressed by the powers of energy scale/Planck mass expected on dimensional grounds. However in the presence of massless degrees of freedom the decoupling assumption may fail (mediated e.g. by anomalies [10] or by spontaneous symmetry breaking) and the extent to which it is valid in ‘quantized’ gravity is a dynamical problem. In a theory of quantum gravidynamics this dynamical problem can be investigated: the effect of high energy (Planck scale) processes can in principle be computationally propagated through many orders of magnitudes down to accessible energies, where they may leave a detectable low energy imprint. Note that the nature of the ‘fundamental’ degrees of freedom is of secondary importance in this context. From the viewpoint of renormalization theory it is the universality class that matters not the particular choice of dynamical variables. Once a functional integral picture has been adopted even nonlocally and nonlinearly related sets of fields or other variables may describe the same universality class – and hence the same physics. Generally, the arena on which the renormalization group acts is a space of actions or, equivalently, a space of (regularized) measures. A typical action has the form: , where are interaction monomials (including kinetic terms) and the are scale dependent coefficients. The subset which cannot be removed by field redefinitions are called essential parameters, or couplings. Usually one makes them dimensionless by taking out a suitable power of the scale parameter , . In the following the term essential coupling will always refer to these dimensionless variants. We also presuppose the principles according to which a (Wilson-Kadanoff) renormalization flow is defined on this arena. For the convenience of the reader a brief glossary is included Section 1.4. In the context of Quantum Gravidynamics some key notions (coarse graining operation, unstable manifold and continuum limit) have a somewhat different status which we outline below. Initially all concepts in a Wilson-Kadanoff renormalization procedure refer to a choice of coarse graining operation. It is part of the physics premise of a functional integral approach that there is a physically relevant distinction between coarse grained and fine grained geometries. On a classical level this amounts to the distinction, for example, between a perfect fluid solution of the field equations and one generated by its or so molecular constituents. A sufficiently large set of Dirac observables would be able to discriminate two such spacetimes. One can also envisage a vacuum counterpart of this distinction and view the coarse graining scale as analogous to an ‘intrinsic clock’ variable in general relativity. Whenever we shall refer later on to “coarse grained” versus “fine grained” geometries we have a similar picture in mind for the ensembles of off-shell geometries entering a (regularized) functional integral. For example, the value of integrated curvature invariants like may provide a rough measure for the coarseness. Tested proposals for an intrinsic coarse graining scale for geometries are however presently not available. As a substitute one can define the coarse graining with respect to a state-dependent dynamically adjusted background metric. Let be an initially prescribed background metric and suppose that it has been used to define a “background covariant” notion of coarse graining, e.g. by referring to the spectrum of a covariant differential operator built from . The coarse graining can then be used to construct the functional integral averages subject to suitable boundary conditions that encode information about the state vector. Eventually one obtains a functional (“a state”, roughly in the algebraic sense) which depends parameterically on the background via a functional . In a second step one then selfconsistently adjusts the background metric to one solving ⟨qαβ⟩F(¯g∗)!=¯g∗αβ. (1.1) Here is defined via a stationarity condition referring to the full quantum dynamics and hence implicitly to the underlying state, see section 1.2. Equation (1.1) can thus be viewed as selecting a class of state dependent backgrounds such that the average of the quantum metric in the state co-determined by coincides with . For definiteness we formulated (1.1) in terms of the metric, assuming in particular that is well-defined. This assumption is dispensible, however, as one could rephrase the above construction with whatever (nonlocal) composites or observables one decides to work with: given a family of ’s containing the information about the background metric deemed relevant, one can for any initially prescribed set of their values define a coarse graining operation relative to it and use the coarse graining to construct the functional averages , depending parametrically on the values. In a second step one can stipulate the counterpart of (1.1), i.e. , which dynamically adjusts the values to the selfconsistent ones . In formulating (1.1) we assumed that the ‘infrared problem’ has been solved, in particular that the full averages used for the adjustment contain information also about the infrared degrees of freedom and are well-defined. The same adjustment could, however, be done using scale dependent Wilsonian averages at some scale infrared cutoff scale , see Section 1.2. With respect to a given coarse graining operation one can ask whether the flow of actions or couplings has a fixed point. The existence of a fixed point is the raison d’être for the universality properties (in the statistical field theory sense) which eventually are ‘handed down’ to the physics in the low energy regime. By analogy with other field theoretical systems one should probably not expect that the existence (or nonexistence) of a (non-Gaussian) fixed point will be proven with mathematical rigor in the near future. From a physics viewpoint, however, it is the high degree of universality ensued by a fixed point that matters, rather than the existence in the mathematical sense. For example nonabelian gauge theories appear to have a (Gaussian) fixed point ‘for all practical purposes’, while their rigorous construction as the continuum limit of a lattice theory is still deemed a ‘millennium problem’. In the case of quantum gravity we shall survey in Section 1.3 various pieces of evidence for the existence of a (non-Gaussian) fixed point. Accepting the existence of a (non-Gaussian) fixed point as a working hypothesis one is led to determine the structure of its unstable manifold. Given a coarse graining operation and a fixed point of it, the stable (unstable) manifold is the set of all points connected to the fixed point by a coarse graining trajectory terminating at it (emanating from it). It is not guaranteed though that the space of actions can in the vicinity of the fixed point be divided into a stable and an unstable manifold; there may be trajectories which develop singularities or enter a region of coupling space deemed unphysical for other reasons and thus remain unconnected to the fixed point. The stable manifold is the innocuous part of the problem, it is the unstable manifold which is crucial for the construction of a continuum limit. By definition it is swept out by flow lines emanating from the fixed point, the so-called renormalized trajectories. Points on such a flow line correspond to actions or measures which are called perfect in that they can be used to compute continuum answers for physical quantities even in the presence of an ultraviolet (UV) cutoff, like one which discretizes the base manifold [73]. In practice the unstable manifold is not known and renormalized trajectories have to be identified approximately by a tuning process. What is easy to determine is whether in a given expansion “sum over coupling times interaction monomial” a coupling will be driven away from the value the corresponding coordinate has at the fixed point after a sufficient number of coarse graining steps (in which case it is called relevant) or will move towards this fixed point value (in which case it is called irrelevant). Note that this question can be asked even for trajectories which are not connected to the fixed point. The dimension of the unstable manifold equals the number of independent relevant interaction monomials that are ‘connected’ to the fixed point by a (renormalized) trajectory. In quantum gravity traditionally the Einstein-Hilbert action is taken as the microscopic action. Perturbatively this action is not connected to a fixed point, not even to the perturbative Gaussian one. The question whether or not the situation improves in a nonperturbative formulation has been mostly addressed in discretized formulations, see [70, 5] and references therein. The discretized action used then may no longer have a naive (classical) continuum limit reproducing the Einstein-Hilbert action, but it is still labelled by two bare parameters. Conceptually one can assign to the discretized two-parametric measure a microscopic action in the above sense by requiring that combined with the regularized continuum measure [22] it reproduces approximately the same correlation functions. The microscopic action defined that way would presumably be different from the Einstein-Hilbert action but it would still contain only two tunable parameters. Presupposing again the existence of a fixed point, this type of construction relies on the hope that the non-naive discretization procedure adopted gets all but two coordinates of the unstable manifold automatically right. We refer to [70, 5] for the numerical evidence. In the present context a counterpart of these constructions starting from a perturbatively (weakly or strictly) microscopic renormalizable action (see Section 2) would seem more promising. The tuning to the unstable manifold then is more complicated, but perturbation theory (or other expansion techniques) can be used as a guideline, both analytically and for the extrapolation of numerical results. Typically the unstable manifold is indeed locally a manifold, though it may have cusps. Although ultimately it is only the unstable manifold that matters for the construction of a continuum limit, relevant couplings which blow up somewhere inbetween may make it very difficult to successfully identify the unstable manifold. In practice, if the basis of interaction monomials in which this happens is deemed natural and a change of basis in which the pathological directions could simply be omitted from the space of actions is very complicated, the problems caused by such a blow up may be severe. An important issue in practice is therefore whether in a natural basis of interaction monomials the couplings are ‘safe’ from such pathologies and the space of actions decomposes in the vicinity of the fixed point neatly into a stable and an unstable manifold. This regularity property is one aspect of “asymptotic safety”, as we shall see below. A second limitation appears in infinite dimensional situations. Whenever the coarse graining operates on an infinite set of potentially relevant interaction monomials convergence issues in the infinite sums formed from them may render formally equivalent bases inequivalent. In this case the geometric picture of a (coordinate independent) manifold breaks down or has to be replaced by a more refined functional analytic framework. An example of a field theory with an infinite set of relevant interaction monomials is QCD in a lightfront formulation [123] where manifest Lorentz and gauge invariance is given up in exchange of other advantages. In this case it is thought that there are hidden dependencies among the associated couplings so that the number of independent relevant couplings is finite and the theory is eventually equivalent to conventional QCD. Such a reduction of couplings is nontrivial because a relation among couplings has to be preserved under the renormalization flow. In quantum gravity related issues arise to which we turn later. As an interlude let us mention the special role of Newton’s constant in a diffeomorphism invariant theory with a dynamical metric. Let be any local action, where is the (‘quantum’) metric entering the (regularized) functional integral and the “matter” fields are not scaled when the metric is. Constant rescalings of the metric then give rise to a variation of the Lagrangian which vanishes on shell: ddω2S[ω2q,matter]∣∣ω=1=∫dx√qqαβδS[q,matter]δqαβ. (1.2) As a consequence one of the coupling parameters which in the absence of gravity would be essential (i.e. a genuine coupling) becomes inessential (i.e. can be changed at will by a redefinition of the fields). The running of this parameter, like that of a wave function renormalization constant, has no direct significance. If the pure gravity part contains the usual Ricci scalar term, , the parameter that becomes inessential may be taken as its prefactor . Up to a dimension dependent coefficient it can be identified with the inverse of Newton’s constant . It is also easy to see that in a background field formalism sets the overall normalization of the spectral/momentum values. Hence in a theory with a dynamical metric the three (conceptually distinct) inessential parameters: overall scale of the metric, the inverse of Newton’s constant, and the overall normalization of the spectral/momentum values are in one-to-one correspondence; see section 2.1 for details. For definiteness let us consider the running of Newton’s constant here. Being inessential, the quantum field theoretical running of has significance only relative to the running coefficient of some reference operator. The most commonly used choice is a cosmological constant term . Indeed GNewton~Λd−2d=:constτ(μ)2/d, (1.3) is dimensionless and invariant under constant rescalings of the metric [83]. The associated essential coupling is in the present context assumed to be asymptotically safe, i.e. , , where here . Factorizing it into the dimensionless Newton constant and , there are two possibilities: One is that the scheme choices are such that both and behave like asymptotically safe couplings, i.e. satisfy (1.5) below. This is advantageous for most purposes. The second possibility is realized when a singular solution for the flow equation for is inserted into the flow equation for . This naturally occurs when , viewed as an inessential parameter, is frozen at a prescribed value, say GeV, which amounts to working with Planck units. Then the flow is trivial, , but the flow equation for carries an explicit -dependence [119]. By and large both formulations are mathematically equivalent, see section 2.1. For definiteness we considered here the cosmological constant term as a reference operator, but many other choices are possible. In summary, the dimensionless Newton constant can be treated either as an inessential parameter (and then frozen to a constant value) or as a quasi-essential coupling (in which case it runs and assumes a finite positive asymptotic value). The unstable manifold of a fixed point is crucial for the construction of a continuum limit. The fixed point itself describes a strictly scale invariant situation. More precisely the situation at the fixed point is by definition invariant under the chosen coarse graining (i.e. scale changing) operation. In particular any dependence on an ultraviolet cutoff must drop out ‘at’ the fixed point, which is why fixed points are believed to be indispensable for the construction of a scaling limit. If one now uses a different coarse graining operation the location of the fixed point will change in the given coordinate system provided by the essential couplings. One aspect of universality is that all field theories based on the fixed points referring to different coarse graining operations have the same long distance behavior. This suggests to introduce the notion of a continuum limit as an ‘equivalence class’ of scaling limits in which the physical quantities become strictly independent of the UV cutoff, largely independent of the choice of the coarse graining operation, strictly independent of the choice of gauge slice and, ideally, invariant under local reparameterizations of the fields. In the framework of statistical field theories one distinguishes between two construction principles, a massless scaling limit and a massive scaling limit. In the first case all the actions/measures on a trajectory emanating from the fixed point describe a scale invariant system, in the second case this is true only for the action/measure at the fixed point. In either case the unstable manifold of the given fixed point has to be at least one dimensional. Here we shall exclusively be interested in the second construction principle. Given a coarse graining operation and a fixed point of it with a nontrivial unstable manifold a scaling limit is then constructed by ‘backtracing’ a renormalized trajectory emanating from the fixed point. The number of parameters needed to specify a point on the unstable manifold gives the number of possible scaling limits – not all of which must be physically distinct, however. In this context it should be emphasized that the number of relevant directions in a chosen basis is not directly related to the predictive power of the theory. A number of authors have argued in the effective field theory framework that even theories with an infinite number of relevant parameters can be predictive [92, 13, 27]. This applies all the more if the theory under consideration is based on a fixed point, and thus not merely effective. One reason lies in the fact the number of independent relevant directions connected to the fixed point might not be known. Hidden dependencies would then allow for a (genuine or effective) reduction of couplings [159, 113, 123, 9, 13]. For quantum gravity the situation is further complicated by the fact that generic physical quantities are likely to be related only nonlocally and nonlinearly to the metric. What matters for the predictive power is not the total number of relevant parameters but how the observables depend on them. To illustrate the point imagine a (hypothetical) case where observables are injective functions of relevant couplings each: Oi(g1,…,gn),i=1,…,n2. (1.4) Then measurements will determine the couplings, leaving predictions. This gives plenty of predictions, for any , and it remains true in the limit , despite the fact that one then has infinitely many relevant couplings. This example may be seen as a mathematical abstraction of the reason why effective field theories (or renormalizable ones with a UV cutoff kept) are predictive. The ’s may depend on additional couplings, but if this dependence is quantitatively sufficiently suppressed the situation is qualitatively as in the example. Initially infinitely many essential couplings arise when a perturbative treatment of Quantum Gravidynamics is based on a type propagator. Perturbation theory can be seen as a degenerate special case of the general framework described before. Depending on the structure of the coupling flow the associated perturbative Gaussian fixed point does or does not reflect a Gaussian fixed point proper. In the case of gravity, as first advocated by Gomis and Weinberg [66], the use of a type graviton propagator in combination with higher derivative terms avoids the problems with unitarity that occur in other treatments of higher derivative theories. Consistency requires that quadratic counterterms (those which contribute to the propagator) can be absorbed by field redefinitions. This can be seen to be the case [8] either in the absence of a cosmological constant term or when the background spacetime admits a metric with constant curvature. The price to pay for the type propagator is that all nonquadratic counterterms have to be included in the bare action, so that independence of the UV cutoff can only be achieved with infinitely many essential couplings, but it can be [66]. In order to distinguish this from the familiar notion of perturbative renormalizability with finitely many couplings we shall call such theories (perturbatively) weakly renormalizable. The above results then show the existence of a “weakly renormalizable” but “propagator unitary” Quantum Gravidynamics based on a perturbative Gaussian fixed point. The beta functions for this infinite set of couplings are presently unknown. If they were known, expectations are that at least a subset of the couplings would blow up at some finite momentum scale and would be unphysical for . In this case the computed results for physical quantities (“reaction rates”) are likely to blow up likewise at some (high) energy scale . This illustrates Weinberg’s concept of asymptotic safety. To quote from [152]: “A theory is said to be asymptotically safe if the essential coupling parameters approach a fixed point as the momentum scale of their renormalization point goes to infinity”. Here ‘the’ essential couplings are those which are useful for the absorption of cutoff dependencies, i.e. not irrelevant ones. The momentum scale is the above , so that the condition amounts to having nonterminating trajectories for the ’s with a finite limit: supμ0≤μ≤∞gi(μ)<∞,\makebox[28.452756pt]limμ→∞gi(μ)=g∗i<∞, (1.5) for some -independent . In other words in an asymptotically safe theory the above blow up in the couplings and hence in physical observables does not occur. We suggest to call couplings satisfying (1.5) asymptotically safe. As a specification one should add [152]: “Of course the question whether or not an infinity in coupling constants betokens a singularity in reaction rates depends on how the coupling constants are parameterized. We could always adopt a perverse definition (e.g. ) such that reaction rates are finite even at an infinity of the coupling parameters. This problem can be avoided if we define the coupling constants as coefficients in a power series expansion of the reaction rates themselves around some physical renormalization point”. A similar remark applies to the signs of coupling constants. When defined through physical quantities certain couplings or coupling combinations will be constrained to be positive. For example in a (nongravitational) effective field theory this constrains the couplings of a set of leading power counting irrelevant operators to be positive [2, 134]. In an asymptotically safe theory similar constraints are expected to arise and are crucial for its physics viability. Note that whenever the criterion for asymptotic safety is met all the relevant couplings lie in the unstable manifold of the fixed point (which is called the “UV critical surface” in [152], p.802, a term now usually reserved for the surface of infinite correlation length). The regularity property described earlier is then satisfied and the space of actions decomposes in the vicinity of the fixed point into a stable and an unstable manifold. Comparing the two perturbative treatments of Quantum Gravidynamics described earlier one sees that they have complementary advantages and disadvantages: higher derivative theories based on a propagator are strictly renormalizable with couplings that are presumed to be asymptotically safe; however unphysical propagating modes are present. Defining higher derivative gravity perturbatively with respect to a propagator has the advantage that all propagating modes are physical, but initially infinitely many essential couplings are needed, a subset of which is presumed to be not asymptotically safe. From a technical viewpoint the challenge of Quantum Gravidynamics lies therefore not so much in achieving renormalizability but to reconcile asymptotically safe couplings with the absence of unphysical propagating modes. Even in the above perturbative formulations one can see heuristically how this might be feasible: both descriptions should be related through a reduction of couplings, i.e. the infinite set of couplings in the formulation should be thought of as having hidden dependencies such that a nonredundant set corresponds to the finitely many safe couplings in the formulation. The proper computational implementation presumably requires new (perturbative or nonperturbative) techniques. Assuming that this can be achieved certain qualitative features such a gravitational functional integral must have can be inferred without actually evaluating it. One is the presence of anti-screening configurations, the other is a dimensional reduction phenomenon in the ultraviolet. In nonabelian gauge theories the anti-screening phenomenon can be viewed as the physics mechanism underlying their benign high energy behavior (as opposed to abelian gauge theories, say), see e.g. [124] for an intuitive discussion. It is important not to identify “anti-screening” with its most widely known manifestation, the sign of the dominant contribution to the one-loop beta function. In an exact continuum formulation of a pure Yang-Mills theory, say, the correlation functions do not even depend on the gauge coupling. Nevertheless they indirectly do know about “asymptotic freedom” through their characteristic high energy behavior. The phenomenon is also state-dependent: it is the Yang-Mills vacuum that acts like a specific polarizable medium. In the functional integral measure this in principle comes about through the dominance of certain configurations/histories which one might also call “anti-screening”. By analogy one would expect that in a gravitational functional integral that allows for a continuum limit a similar mechanism is responsible for its benign ultraviolet behavior (as opposed to the one expected by power counting considerations with respect to a propagator, say). As in the Yang-Mills case a certain class of states will act like a polarizable, predominantly “antiscreening” medium. Importantly, since a preferred ground state is unlikely to exist in quantum gravity, one can take advantage of the ensued ambiguity to select the class of states appropriately. In a functional integral the state dependence can be encoded in boundary terms for the microscopic action, so that a corresponding ambiguity in the definition of the functional integral will result. Some insight into the nature of the gravitational antiscreening mechanism can be gained from a hamiltonian formulation of the functional integral but a proper understanding of the interplay between the class of states, the dominant geometries/histories, and the renormalization properties in the ultraviolet remains to be found. Nevertheless it is clearly legitimate to utilize the beforementioned ambiguities so as to faciltate the construction of a continuum limit. For simplicity we shall refer to such an adjustment as the implementation of an “anti-screening constraint”. In a discretized functional integral the dominance of antiscreening configurations/histories would by definition be responsable for the benign ultraviolet properties associated with a a non-Gaussian fixed point. Conversely understanding the nature of these antiscreening geometries/histories might help to design good discretizations. A discretization of the gravitational functional integral which allows for a continuum limit might also turn out to exclude or dynamically disfavor configurations that are taken into account in other, off-hand equally plausible, discretizations. Compared to such a naive discretization it will look as if a constraint on the allowed configurations/histories has been imposed. A useful analogy is the inclusion of a causality constraint in the definition of the (formal Euclidean) functional integral originally proposed by Teitelboim [142, 143], and recently put to good use in the framework of dynamical triangulations [4]. Just as the inclusion of a good causality constraint is justified retroactively, so would be the inclusion of a suitable “antiscreening” constraint. A second qualitative property of a gravitational functional integral where the continuum limit is based on a non-Gaussian fixed point is a dimensional reduction of the residual interactions in the UV. There are several arguments for this phenomenon which we describe in section 3. Perhaps the simplest one is based on the large anomalous dimensions at a non-Gaussian fixed point and runs as follows: (We present here a model-independent variant [106] of the argument used in [94]). Suppose that the unkown microscopic action is local and reparameterization invariant. The only term containing second derivatives then is the familiar Einstein-Hilbert term of mass dimension in dimensions, if the metric is taken dimensionless. As explained before the dimensionful running prefactor ( for “Newton”) multiplying it plays a double role, once as a wave function renormalization constant and once as a quasi-essential coupling . Both aspects are related as outlined before; in particular ZN(μ)=μd−2g\slN(μ). (1.6) Here is a dimensionless coupling which is treated as “quasi-essential” and whose running may also depend on all the other couplings (gravitational and non-gravitational) made dimensionless by taking out a suitable power of . The short distance behavior of the propagator will now be governed by the “anomalous dimension” , by the usual field theoretical arguments, say, via the Callan-Symanzik equation for the effective action. On the other hand the flow equation for can be expressed in terms of as μ∂μg\slN=[d−2+ηN(g\slN,other)]g\slN, (1.7) where we schematically indicated the dependence on the other dimensionless couplings. If this flow equation now has a nontrivial fixed point , the only way how the right hand side can vanish is for , irrespective of the detailed behavior of the other couplings as long as no blow-up occurs. This is a huge anomalous dimension. For a graviton “test propagator” (see below) the key property of is that it gives rise to a high momentum behavior of the form modulo logarithms, or a short distance behavior of the form modulo logarithms. Keeping only the leading part the vanishing power at translates into a logarithmic behavior, , formally the same as for massless (scalar) propagators in a two-dimensional field theory. We shall comment on potential pitfalls of such an argument below. In accordance with this argument a type propagator goes hand in hand with a non-Gaussian fixed point for in two other computational settings: in strictly renormalizable higher derivative theores (see section 2.2) and in the expansion [145, 146, 135]. In the latter case a nontrivial fixed point goes hand in hand with a graviton propagator whose high momentum behavior is of the form , in four dimensions, and formally in dimensions. The fact that a large anomalous dimension occurs at a non-Gaussian fixed point was first observed in in the context of the expansion [83, 84] and then noticed in computations based on truncated flow equations [94]. The above variant of the argument shows that no specific computational information enters. It highlights what is special about the Einstein–Hilbert term (within the class of local gravitational actions): it is the kinetic (second derivative) term itself which carries a dimensionful coupling. Of course one could assign to the metric a mass dimension , in which case Newton’s constant would be dimensionless. However one readily checks that then the wave function renormalization constant of a standard matter kinetic term acquires a mass dimension for bosons and for fermions, respectively. Assuming that the dimensionless parameter associated with them remains nonzero as one can repeat the above argument and finds now that all matter propagators have a type high momentum behavior, or a logarithmic short distance behavior. It is this universality which justifies to attribute the modification in the short distance behavior of the fields to a modification of the underlying (random) geometry. This may be viewed as a specific variant of the old expectation that gravity acts as a short distance regulator. Let us stress that while the anomalous dimension always governs the UV behavior in the vicinity of a (UV) fixed point, it is in general not related to the geometry of field propagation, see [91] for a discussion in QCD. What is special about gravity is ultimately that the propagating field itself determines distances. In the context of the above argument this is used in the reshuffling of the soft UV behavior to matter propagators. The propagators used here should be viewed as “test propagators”, not as physical ones. One transplants the information in derived from the gravitational functional integral into a conventional propagator on a (flat or curved) background spacetime. The reduced dimension two should be viewed as an “interaction dimension” specifying roughly the (normalized) number of independent degrees of freedom a randomly picked one interacts with. The same conclusion ( propagators or interaction dimension ) can be reached in a number of other ways as well, which are described in section 3. A more detailed understanding of the microstructure of the random geometries occuring in an asymptotically safe functional integral remains to be found. Accepting this dimensional reduction as a working hypothesis it is natural to ask whether there exists a two-dimensional field theory which provides a quantitatively accurate (‘effective’) description of this extreme UV regime. Indeed, one can identify a number of characteristics such a field theory should have, using only the main ideas of the scenario, see the end of Section 3. The asymptotic safety of such a field theory would then strongly support the corresponding property of the full theory and the selfconsistency of the scenario. In summary, we have argued that the qualitative properties of the gravitational functional integral in the extreme ultraviolet follow directly from the previously highlighted principles: the existence of a nontrivial UV fixed point, asymptotic safety of the couplings, and antiscreening. Moreover these UV properties can be probed for selfconsistency. 1.2 Coarse graining and dynamically adjusted background data Since renormalization implicitly (in perturbation theory) or explicitly (in the Kadanoff-Wilson framework) depends on the choice of a coarse graining operation one is in a quantum gravity context lead to address the question “with respect to what” field configurations are coarsely or finely grained. The piecemeal evaluation of the functional integral (decomposition of the presumed ‘critical’ problem into ‘subcritical’ ones) requires a physically motivated notion of the slicing. For statistical field theories on a non-dynamical background the spectrum of a covariant differential operator (generalized momenta) can be used. In quantum gravity the determination of an averaged geometry is part of the dynamical problem, and one has to proceed differently. The retroactive dynamical adjustment of initially prescribed background data provides a natural generalization. The principle has already been outlined in the discussion around Eq. (1.1). Here we describe the construction in somewhat more detail using the background effective action formalism, see [78, 1, 30] for the latter. In this formalism the effective action of a scalar field theory becomes a (highly nonlocal) functional of two fields, . The second field is the initially prescribed background field , the first can be interpreted as the source dependent average of the quantum field shifted by , where the source is given by . Switching off the source, , correlates both fields and one may assume that locally (in function space) one can be expressed as a functional of the other. We write the relation as , so that corresponds to , and assume that it can be solved locally for , i.e. . Then , obeys ([106, 110]) , where the derivative of the action is taken with respect to the explicit background dependence, if any. From the viewpoint of the underlying functional integral a dynamically adjusted background is optimal with regard to a small field expansion around it, where “small”, however, now means “selfconsistently small with respect to the full quantum dynamics”. This construction can be transferred to gravity, where the dynamically adjusted background metric can in addition be used to define an intrinsic coarse graining scale. As remarked before the use of a background metric as opposed to other, more specific, background data is dispensible, for concreteness we use here the metric itself, both as a dynamical variable, , in the functional integral and to specify the background data needed. This leads to an effective action which is a reparameterization invariant functional of two symmetric second rank tensors. The second, , is an initially independently prescribed “background metric”. The first, , is interpreted as an initially source dependent average of the dynamical metric shifted by , where the source is given by . The dots in indicate other fields, dual to ghost sources, which are inessential to the discussion. Switching off the source, , now correlates with and as before we may assume that a functional relation with inverse holds, at least locally in the space of metrics. Then , obeys [106] δ¯Γ[g]δgαβ=⟨δSδ¯gγδ⟩J∗=0δFγδδgαβ, (1.8) where the derivative of the action is taken with respect to the explicit background dependence. Starting with a reparameterization invariant microscopic action , the gauge fixing and ghost terms will introduce such an explicit dependence on ; schematically . The solutions of (1.8) contain information about: the source-free condition , about the state vector underlying the functional integral through the choice of boundary terms, and about the choice of gauge-slice. We now comment on each of these dependencies successively: The construction of the extremizing sources entering the definition of (as the Legendre transform of , the generating functional of connected correlation functions) is usually done within a formal power series ansatz. This gives a solution , where the ’s can be expressed in terms of the moments of , ‘amputated’ with the exact . Clearly iff within a formal power series ansatz. This amounts to and the dynamically adjusted background coincides with the prescribed one. Conversely, in order to get a genuine dynamical adjustment, one has to go beyond a formal power series ansatz. Assuming one gets , The functional here contains the dynamical information inherited from the full via . The right hand side of (1.1) can now be viewed as a new background and the parameteric dependence of the state can be relabelled to . The equation characterizing the class of dynamically adjusted backgrounds then becomes ⟨qαβ⟩F(¯g∗)=¯g∗αβ, (1.9) as anticipated in (1.1). The notion of a state is implicitly encoded in the effective action. Recall that the standard effective action for a scalar field theory, when evaluated on a given time-independent function , is proportional to the minimum value of the Hamiltonian in that part of the Hilbert space spanned by normalizable states satisfying . A similar interpretation holds formally for the various background effective actions [34]. In a functional integral formulation the information about the state can be encoded in suitable (though often not explicitly known) boundary terms for the microscopic action. An alternative way to see that in principle also encodes the information about the underlying state vector, is via reconstruction. Let , , be the vertex functions associated with , i.e. Γ(n)(x1,…,xn;g):=δδg(x1)…δδg(xn)Γ[g−¯g;¯g,…]∣∣¯g=F(g). (1.10) In a flat space quantum field theory the Wightman or Osterwalder-Schrader reconstruction procedures would allow one to (re-)construct the state space and field operators from knowledge of the . In a quantum gravity context little is known about the feasibility of such a reconstruction from e.g. the vertex functions (1.10). The use of metric correlators (or quantities tentatively interpreted as such) may also not be ideal from the viewpoint of such a reconstruction. One would expect that correlators of (nonlocal) quantities closer to (Dirac) observables are better suited for a reconstruction. Returning to , one should think of it as a functional of both the selected state and of the fields. The selected state will indirectly (co-)determine the space of functionals on which the renormalization flow acts. For example the type of nonlocalities which actually occur in should know about the fact that stems from a microscopic action suited for the appropriate notion of positivity and from a physically acceptable state. The notion of a physically acceptable state is another unexplored issue in this context. In conventional flat space quantum field theories there is a clear-cut notion of a ground state and of the physical state space based on it. Already in quantum field theories on curved but non-dynamical spacetimes a preferred vacuum is typically absent and physically acceptable states have to be selected by suitable conditions (like, for example, the well-known Hadamard condition imposed on the short distance behavior of the two point function, which for free quantum field theories in curved spacetime selects states with desirable stability properties.) In quantum gravity the formulation of analogous selection criteria is an open problem. As a tentative example we mention the condition [106] ⟨Pq(T)⟩∼T−d/2,\makebox[28.452756pt]T→∞, Pq(T):=∫dx√qexp(TΔq)(x,x). (1.11) Here is the Laplace-Beltrami operator of a (pseudo-) riemannian metric , and is the associated heat kernel. When is flat decays like for . The condition (1.11) therefore indirectly characterizes a class of states which favor geometries that are smooth and almost flat on large scales. Finally we comment on the gauge dependence of or . The right hand side of (1.8) renders the dependence of on the choice of gauge slice manifest. Had a (technically more complicated) Vilkovisky-deWitt type effective action [158, 126, 117] been used this dependence should be absent. As an approximative shortcut one can continue to work with the previous background effective action and consider solutions of , which retroactively minimize the dependence on the choice of gauge. This condition will be used later on. Since is highly nonlocal the identification of physical solutions of (1.8) or , is a nontrivial problem. See e.g. [16] for examples based on an anomaly induced part of an effective action. The previous discussion suggests a partial characterization, namely those solutions of (1.8) should be regarded as physical which are associated with physically acceptable states. The use of a dynamically adjusted background geometry has the additional advantage of allowing one to introduce an intrinsic coarse graining scale. Let be again an initially prescribed background geometry. Let denote an (unphysical) scale parameter which schematically cuts off modes whose average extension with respect to is larger than . Clearly there is a large degree of arbitrariness in defining such a mode cut-off and for each choice there is an effective action containing mostly the dynamical information about modes larger than . In a two step procedure one can now replace with an intrinsic coarse graining scale. In a first step is replaced with a dynamically adjusted background solving the counterpart of (1.8) for . In a second step one considers the spectrum of a covariant differential operator, say , built from . The implicit equation μ2=Eω(ˇgμ), (1.12) then determines or and hence allows one to replace with the spectral scale intrinsic to the dynamically adjusted background. 1.3 Evidence for asymptotic safety Presently the evidence for asymptotic safety in quantum gravity comes from the following very different computational settings: (a) the expansion, (b) perturbation theory of higher derivative theories and a large N expansion in the number of matter fields, (c) the study of symmetry truncations, and (d) that of truncated functional flow equations. Arguably none of the pieces of evidence is individually compelling but taken together they make a strong case for asymptotic safety. The results from the expansion were part of Weinberg’s original motivation to propose the existence of a non-Gaussian fixed point. Since gravity in two and three dimensions is non-dynamical, however, the lessons for a genuine quantum gravitational dynamics are somewhat limited. Higher derivative derivative theories were known to be strictly renormalizable with a finite number of couplings, at the expense of having unphysical propagating modes, see [138, 56, 15, 24]. With hindsight one can identify a non-Gaussian fixed point for Newton’s constant already in this setting, see Section 2.2. The occurance of this non-Gaussian fixed point is closely related to the -type propagator that is used. The same happens when (Einstein- or a higher derivative) gravity is coupled to a large number of matter fields and a expansion is performed. A nontrivial fixed point is found that goes hand in hand with a -type progagator (modulo logs), which here arises from a resummation of matter selfenergy bubbles, however. As emphasized before the challenge of Quantum Gravidynamics is not so much to achieve (perturbative or nonperturbative) renormalizability but to reconcile asymptotically safe couplings with the absence of unphysical propagating modes. Two recent developments provide complementary evidence that this might indeed be feasible. Both of these developments take into account the dynamics of infinitely many physical degrees of freedom of the four dimensional gravitational field. In order to be computationally feasible the ‘coarse graining’ has to be constrained somehow. To do this the following two strategies have been pursued (which we label here (c) and (d) according to the subsection in which they will be discussed below): (c) The metric fluctuations are constrained by a symmetry requirement but the full (infinite dimensional) renormalization group dynamics is considered. This is the strategy via symmetry reductions. (d) All metric fluctuations are taken into account but the renormalization group dynamics is projected onto a low dimensional submanifold. Since this is done using truncations of functional renormalization group equations we shall refer to this as the strategy via truncated functional flow equations. Both strategies (truncation in the fluctuations but unconstrained flow and unconstrained quantum fluctuations but constrained flow) are complementary. Tentatively both results are related by the dimensional reduction phenomenon outlined earlier. For the remainder of this Section we now describe the pieces of evidence from the various computational settings (a) – (d) mentioned. To emphasize its auxiliary role we shall write for the ‘quantum metric’ playing the role of the integration variable in the functional integral. Averages thereof or macroscopic metrics are denoted by and reference metrics by . Our curvature conventions are set by and . For a signature metric the Einstein-Hilbert and scalar field action read, and , respectively. Occasionally we shall switch to Euclidean signature metrics, , in which case , , and the Euclidean signature Lagrangians are obtained by formally flipping the sign of the Lorentzian signature ones. (a) Evidence from expansions: In the non-gravitational examples of perturbatively nonrenormalizable field theories with a non-Gaussian fixed point the non-Gaussian fixed point can be viewed as a ‘remnant’ of an asymptotically free fixed point in a lower dimensional version of the theory. It is thus natural to ask how gravity behaves in this respect. In spacetime dimensions Newton’s constant is dimensionless and formally the theory with the bare action is power counting renormalizable in perturbation theory. However, as the Einstein–Hilbert term is purely topological in two dimensions the inclusion of local dynamical degrees of freedom requires, at the very least, starting from dimensions and then studying the behavior near . The resulting “-expansion” amounts to a double expansion in the number of ‘graviton’ loops and in the dimensionality parameter . Typically dimensional regularization is used, in which case the UV divergencies give rise to the usual poles in . Specific for gravity are however two types of complications. The first one is due to the fact that is topological at , which gives rise to additional “kinematical” poles of order in the graviton propagator. The goal of the renormalization process is to remove both the ultraviolet and the kinematical poles in physical quantities. The second problem is that in pure gravity Newton’s constant is an inessential parameter, i.e. it can be changed at will by a field redefinition. Newton’s constant can be promoted to a coupling proper by comparing its flow with that of the coefficient of some reference operator, which is fixed to be constant. For the reference operator various choices have been adopted (we follow the discussion in Kawai et al [85, 83, 84, 3] with the conventions of [84]): (i) a cosmological constant term , (ii) monomials from matter fields which are quantum mechanically non-scale invariant in , (iii) monomials from matter fields which are quantum mechanically scale invariant in , and (iv) the conformal mode of the metric itself in a background field expansion. All choices lead to flow equation of the form μddμg\slN=ϵg\slN−γg2\slN, (1.13) but the coefficient depends on the choice of the reference operator [85]. For all there is a nontrivial fixed point with a one-dimensional unstable manifold. In other words is an asymptotically safe coupling in dimensions and the above rule of thumb suggests that this a remnant of a nontrivial fixed point in with respect to which is asymptotically safe (see Section 1.3 for the renormalization group terminology). Technically the non-universality of arises from the before-mentioned kinematical poles. In the early papers [60, 37, 152] the choice (i) was adopted giving , or if free matter of central charge is minimally coupled. A typical choice for (ii) is a mass term of a Dirac fermion, a typical choice for (iii) is the coupling of a four-fermion (Thirring) interaction. Then comes out as , where , respectively. Here is the scaling dimension of the reference operator, and again free matter of central charge has been minimally coupled. It has been argued in [85] that the loop expansion in this context should be viewed as double expansion in powers of and , and that reference operators with are optimal. The choice (iv) has been pursued systematically in a series of papers by Kawai et al [83, 84, 3]. It is based on a parameterization of the metric in terms of a background metric , the conformal factor , and a part which is traceless, . Specifically is inserted into the Einstein–Hilbert action; propagators are defined (after gauge fixing) by the terms quadratic in and , vertices correspond to the higher order terms. This procedure turns out to have a number of advantages. First the conformal mode is renormalized differently from the modes and can be viewed as defining a reference operator in itself; in particular the coefficient comes out as . Second, and related to the first point, the system has a well-defined -expansion (absence of poles) to all loop orders. Finally this setting allows one to make contact to the exact (KPZ [88]) solution of two-dimensional quantum gravity in the limit . (b) Evidence from perturbation theory and large N: Modifications of the Einstein-Hilbert action where fourth derivative terms are included are known to be perturbatively renormalizable [138]. A convenient parameterization is S=−∫dx√q[~Λ−1cdGNR+12sC2−ω3sR2+θsE]. (1.14) Here , is a constant such that , is the square of the Weyl tensor and is the integrand of the Gauss-Bonnet term. The sign of the crucial coupling is fixed by the requirement that the Euclidean functional integral is damping. The one-loop beta functions for the (nonnegative) couplings, , are known and on the basis of them these couplings are expected to be asymptotically safe. In particular is asymptotically free, . The remaining couplings and are made dimensionless via , , where is the renormalization scale. At these flow equations are compatible with the existence of a non-trivial fixed point for Newton’s constant, , see Section 2.2. The value of is highly nonuniversal but it cannot naturally be made to vanish, i.e. the nontrivial and the trivial fixed point, , do not merge. The rationale for identifying a nontrivial fixed point by perturbative means is explained in Appendix A1. The benign renormalizability properties seen in this framework are due to the type propagator in combination with diffeomorphism invariance, at the expense of unphysical propagating modes. The action (1.14) can be supplemented by a matter action, containing a large number, , of free matter fields. One can then keep the product fixed, retain the usual normalization of the matter kinetic terms, and expand in powers of . Renormalizability of the resulting ‘large N expansion’ then amounts to being able to remove the UV cutoff order by order in the formal series in . This type of studies was initiated by Tomboulis where the gravity action was taken either the pure Ricci scalar [145], Ricci plus cosmological term [135], or a higher derivative action [146], with free fermionic matter in all cases. More recently the technique was reconsidered [120] with (1.14) as the gravity action and free matter consisting of scalar fields, Dirac fields, and Maxwell fields. Starting from the Einstein-Hilbert action the high energy behavior of the usual -type propagator gets modified. To leading order in the modified propagator can be viewed as the graviton propagator with an infinite number of matter selfenergy bubbles inserted and resummed. The resummation changes the high momentum behavior from to , in four dimensions. In dimensions the resulting expansion is believed to be renormalizable in the sense that the UV cutoff can strictly be removed order by order in without additional (counter) terms in the Lagrangian. In the same is presumed to hold provided an extra term is included in the bare Lagrangian, as in (1.14). After removal of the cutoff the beta functions of the dimensionless couplings can be analyzed in the usual way and already their leading term will decide about the flow pattern. The qualitative result (due to [145, 135]) is that there exists a nontrivial fixed point for the dimensionless couplings , and . Its unstable manifold is three dimensional, i.e. all couplings are asymptotically safe. Repeating the computation in dimensions the fixed point still exists and (taking into account the different UV regularization) corresponds to the large (central charge) limit of the fixed point found the expansion. These results have recently been confirmed and extended by Percacci [120] using the heat kernel expansion. In the presence of scalar fields, Dirac fields, and Maxwell fields, the flow equations for and come out to leading order in as μddμgN = −2gN+1(4π)216(nS−2nD−4nM)g2N, μddμλ = −2λ+1(4π)2[16(nS−2nD−4nM)λgN+14(nS−4nD+2nM)gN]. (1.15) μddμs = −1(4π)21360(6nS+25nD+72nM)s2. One sees that the coupling is always asymptotically free, and that Newton’s constant has a nontrivial fixed point, , which is positive if the number of scalar matter fields is not too large. As a caveat one should add that the -type propagators occuring both in the perturbative and in the large framework are bound to have an unphysical pole at some intermediate momentum scale. This pole corresponds to unphysical propagating modes and it is the price to pay for (strict) perturbative renormalizability combined with asymptotically safe couplings. From this point of view, the main challenge of Quantum Gravidynamics lies in reconciling asymptotically safe couplings with the absence of unphysical propagating modes. This can be achieved in the context of the reduction. (c) Evidence from symmetry reductions: Here one considers the usual gravitational functional integral but restricts it from “4-geometries modulo diffeomorphisms” to “4-geometries constant along a foliation modulo diffeomorphisms”. This means instead of the familiar foliation of geometries one considers a foliation in terms of two-dimensional hypersurfaces and performs the functional integral only over configurations that are constant as one moves along the stack of two-surfaces. Technically this constancy condition is formulated in terms of two commuting vectors fields , , that are Killing vectors of the class of geometries considered, . For definiteness we consider here only the case where both Killing vectors are spacelike. From this pair of Killing vector fields one can form the symmetric matrix . Then (with the components of and ) defines a metric on the orbit space which obeys and . The functional integral is eventually performed over metrics of the form qαβ=γαβ+MabKaαKbβ, (1.16) where the components of a metric tensor are parameterized by the independent functions in and . Each of these functions is constant along the stack of two-surfaces but may be arbitrarily rough within a two-surface. In the context of the asymptotic safety scenario the restriction of the functional integral to metrics of the form (1.16) is a very fruitful one: (i) the restricted functional integral inherits the perturbative non-renormalizability (with finitely many relevant couplings) from the full theory. (ii) it takes into account the crucial ‘spin-2’ aspect, that is, linear and nonlinear gravitational waves with two independent polarizations per spacetime point are included. (iii) it goes beyond the Eikonal approximation [141, 49] whose dynamics can be understood via a related decomposition [80, 51]. (iv) based on heuristic arguments the dynamics of full Quantum Gravidynamics is expected to be effectively two-dimensional in the extreme ultraviolet with qualitative properties resembling that of the truncation. The renormalization of the truncation can thus serve as a prototype study and its asymptotic safety probes the selfconsistency of the scenario. (v) for the restricted functional integral the full infinite dimensional renormalization group dynamics can be studied; it reveals both a Gaussian and a non-Gaussian fixed point, where the properties of the latter are compatible with the existence of a non-perturbative continuum limit. Two additional bonus features are: in this sector the explicit construction of Dirac observables is feasible (classically and presumably also in the quantum theory). Finally a large class of matter couplings is easily incorporated. As mentioned the effective dynamics looks two-dimensional. Concretely the classical action describing the dynamics of the 2-Killing vector subsector is that of a non-compact symmetric space sigma-model non-minimally coupled to 2D gravity via the “area radius” , of the two Killing vectors. To avoid a possible confusion let us stress, however, that the system is very different from most other models of quantum gravity (mini-superspace, 2D quantum gravity or dilaton gravity, Liouville theory, topological theories) in that it has infinitely many local and selfinteracting dynamical degrees of freedom. Moreover these are literally (an infinite subset of) the degrees of freedom of the 4-dimensional gravitational field, not just analogues thereof. The corresponding classical solutions (for both signatures of the Killing vectors) have been widely studied in the General Relativity literature, c.f. [69, 18, 87]. We refer to [35, 36] for details on the reduction procedure and [130] for a canonical formulation. The case with aligned polarizations (Beck-Einstein-Rosen waves) is much simpler and the essential aspects can be modelled by a massive free field on [107]. For generic polarizations strongly selfinteracting systems arise whose the renormalization [108] can be achieved by borrowing covariant background field techniques from Riemannian sigma-models; see [78, 57, 79, 133, 39, 148, 114]. In the particular application here the sigma-model perturbation theory is partially nonperturbative from the viewpoint of a graviton loop expansion as not all of the metric degrees of freedom are Taylor expanded in the bare action, see [110]. This together with the field reparameterization invariance blurs the distinction between a perturbative and a non-perturbative treatment of the gravitational modes. The renormalization can be done to all orders of sigma-model perturbation theory, which is ‘not-really-perturbative’ for the gravitational modes. It turns out that strict cutoff independence can be achieved only by allowing for infinitely many essential couplings. They are conveniently combined into a generating functional , which is a positive function of one real variable. Schematically the renormalized action takes the form [108] S[q]=SEH[h(ρ)ρq]+other second derivative % terms. (1.17) Here is a metric of the form (1.16), is the Einstein–Hilbert action evaluated on it, and is the generating coupling function evaluated on the renormalized area radius field . Higher derivative terms are not needed in this subsector for the absorption of counter terms; the “other second derivative terms” needed are known explicitly. This “coupling functional” is scale dependent and is subject to a flow equation of the form μddμh=\boldmath{β}h(h), (1.18) where is the renormalization scale and is the ‘running’ generating functional. To preclude a misunderstanding let us stress that the function changes with , irrespective of the name of the argument, not just its value on , say. Interestingly a closed formula for the beta function (or functional) in (1.18) can be found [108, 109]. The resulting flow equation is a nonlinear partial integro-differential equation and difficult to analyze. The fixed points however are easily found. Apart from the degenerate ‘Gaussian’ one, , there is a nontrivial fixed point . For the Gaussian fixed point a linearized stability analysis is empty, the structure of the quadratic perturbation equation suggests that it has both attractive and repulsive directions in the space of functions . For the non-Gaussian fixed point a linearized stability analysis is non-empty and leads to a system of linear integro-differential equations. Since the fixed point has the form of a powerseries in the loop counting parameter , the proper concept of a “linearized perturbation” has the form h(ρ,λ,μ) = hbeta(ρ,λ)+δh(ρ,λ,μ), δh(ρ,λ,μ) = λ2πs1(ρ,t)+(λ2π)2s2(ρ,t)+…. (1.19) where the are functions of and . Note that the perturbation involves infinitely many functions of two variables. Inserting the ansatz (1.19) into the flow equation and linearizing in gives a recursive system of inhomogeneous integro-differential equations for the , The boundary conditions are fixed such that the full flow is driven by the counterterms only, which amounts to the requirement that all the vanish for uniformly in . Subject to these boundary conditions the recursive system of integro-differential equations can be shown to have a unique solution for arbitary smooth initial data. The solution for reads s1(ρ,t)=ρ∫∞ρduur1(u−ζ1t), (1.20) where is an arbitrary smooth function of one variable satisfying for . This function can essentially be identified with the initial datum at some renormalization time , as . Evidently for , if . This condition is indeed satisfied by all the symmetry reduced gravity theories considered in [109], precisely because the coset space is noncompact. If sigma-model scalars and abelian gauge fields are present in the 4D action one has the simple formula ζ1=−k+22,\makebox[28.452756pt]k=#abelian vector % fields. (1.21) Equation (1.20) shows that the lowest order perturbation will always die out for , for arbitrary smooth initial data prescribed at . It can be shown that this continues to hold for all higher order irrespective of the signs of the coefficients . The situation is illustrated in the Figure below. The proof of this result is somewhat technical and can be found in [109]. Thus all linearized perturbations decay for , which is precisely what Weinberg’s criterion for asymptotic safety asks for. Moreover the basic propagator used is free from unphysical poles. This suggests that a genuine continuum limit exist for the reduced Quantum Gravidynamics beyond approximations (like the sigma-model perturbation theory/partially nonperturbative graviton expansion used to compute (1.18)). See [111, 89] for a proposed ‘exact’ bootstrap construction, whose relation to a truncated functional integral however remains to be understood. In summary, in the context of the reduction an asymptotically safe coupling flow can be reconciled with the absence of unphysical propagating modes. In contrast to the technique on which evidence (d) below is based the existence of an infinite cutoff limit here can be shown and does not have to be stipulated as a hypothesis subsequently probed for selfconsistency. Since the properties of the truncation qualitatively are the ones one would expect from an ‘effective’ field theory describing the extreme UV aspects of Quantum Gravidynamics (see the end of Section 3), its asymptotic safety is a strong argument for the selfconsistency of the scenario. (d) Evidence from truncated flows of the effective average action: The effective average action for a scalar field theory is a generating functional generalizing the usual effective action, to which it reduces for . Here depends on the UV cutoff and an additional scale , indicating that in the defining functional integral roughly the field modes with momenta in the range have been integrated out. Correspondingly gives back the bare action and is the usual quantum effective action, in the presence of the UV cutoff . The modes in the momentum range are omitted or suppressed by a mode cutoff ‘action’ , and one can think of as being the conventional effective action but computed with a bare action that differs from the original one by the addition of ; specifically ΓΛ,k=−CΛ,k+ΓΛ∣∣S↦S+CΛ,k. (1.22) From the regularized functional integral defining an (‘exact’) functional renormalization group equation (FRGE) can be derived. Schematically it has the form , where the “right hand side” involves the Hessian of with respect to the dynamical fields. The FRGE itself (that is, its rhs) carries no explicit dependence on the UV cutoff, or one which can trivially be removed. However the removal of the UV regulator implicit in the definition of is nontrivial and is related to the traditional UV renormalization problem. Whenever massless degrees of freedom are involved also the existence of the limit of is nontrivial and requires identification of the proper infrared degrees of freedom. In the present context we take this for granted and focus on the UV aspects. The effective average action has been generalized to gravity by Reuter [127]. The substitution (1.22) is now applied to the (highly nonlocal) background effective action which in addition to the average of the ‘quantum’ metric depends on a background metric . The mode cutoff functional depends covariantly on and the bare action is not specified from the outset. In fact, conceptually it is largely determined by the requirement that a continuum limit exists, see the criterion in Appendix A.2. can be expected to have a well-defined derivative expansion with the leading terms roughly of the form (1.14). Also the gravitational effective average action obeys an ‘exact’ FRGE, which is a new computational tool in quantum gravity not limited to perturbation theory. In practice is replaced in this equation with a independent functional interpreted as . The assumption that the ‘continuum limit’ for the gravitational effective average action exists is of course what is at stake here. The strategy in the FRGE approach is to show that this assumption, although without a-priori justification, is consistent with the solutions of the flow equation (where right hand side now also refers to the Hessian of ). The structure of the solutions of this cut-off independent FRGE should be such that they can plausibly be identified with . Presupposing the ‘infrared safety’ in the above sense, a necessary condition for this is that the limits and exist. Since the first limit probes whether can be made large; the second condition is needed to have all modes integrated out. In other words one asks for global existence of the flow obtained by solving the cut-off independent FRGE. Being a functional differential equation the cutoff independent FRGE requires an initial condition, i.e. the specification of a functional which coincides with at some scale . The point is that only for very special ‘fine tuned’ initial functionals will the associated solution of the cutoff independent FRGE exist globally. The existence of the limit in this sense can be viewed as the counterpart of the UV renormalization problem, namely the determination of the unstable manifold associated with the fixed point . We refer to Appendix A.2 for a more detailed discussion of this issue. The full nonlinear functional differential equation is of course intractable. To make the FRGE computationally useful the space of functionals is truncated typically to a finite dimensional one of the form Γk[g,¯g]=N∑i=0gi(k)kdiIi[g]+%gaugefixingterm, (1.23) where the are ‘well-chosen’ – local and nonlocal – functionals of , and is identified with after functional differentiation. The are numerical parameters that carry the scale dependence. For ’s obeying a non-redundancy condition, the play the role of essential couplings which have been normalized to have vanishing mass dimension by taking out a power . Beyond perturbation theory unfortunately little is known about the type of nonlocal terms to expect in , leaving the choice of such somewhat arbitrary. Conceptually the truncation implicitly replaces the full gravitational dynamics by one whose functional renormalization flow is confined to the subspace (1.23), similar to what happens in a hierarchical approximation. The original FRGE then can be converted into a system of nonlinear ordinary differential equations for the couplings . In the case of gravity the following ansatz has been made by Lauscher and Reuter [94, 95] (with Euclidean signature) I0[g]=∫dx√g,I1[g]=−∫dx√gR(g),I2[g]=∫dx√gR(g)2, (1.24) where is the metric and is the associated curvature scalar. The flow pattern displays a number of remarkable properties. Most importantly a non-Gaussian fixed point exists (first found in [136] based on [127] and corroborated in [137, 94, 95, 98, 28]). Within the truncation (1.24) a three-dimensional subset of initial data is attracted to the fixed point under the reversed flow limk→∞(g0(k),g1(k),g2(k))=(g∗0,g∗1,g∗2), (1.25) where the fixed point couplings , are finite and positive and no blow-up occurs in the flow for large . Again this adheres precisely to the asymptotic safety criterion. The flow equations for the ’s depend on the choice of the mode cutoff function and on the choice of gauge fixing. In general they do not assume a transparent analytical form. An exception is when all but and are omitted from the truncation (so that only the Einstein-Hilbert terms remain) and an optimized mode cutoff is used in combination with a limiting version of the gauge fixing term [98]. In terms of the parameterization and used later on the flow equations then take the form kddkgN = 2gN+6g2NgN−6(4π)2(1−2λ)2, kddkλ = −2λ−gN2(4π)2(1+23gN+12(4π)2(1−3λ)2gN−12(4π)2(1−2λ)2). (1.26) The above properties can then be verified analytically. Some of the trajectories with initial data in the unstable manifold cannot be extended to due to (infrared) singularities. This problem is familiar from nongravitational theories and is presumably an artifact of the truncation. In the vicinity of the fixed point, on the other hand, all trajectories show remarkable robustness properties against modifications of the mode cutoff scheme which provide good reasons to believe that the structural aspects of the above results are not an artifact of the truncation used. The upshot is that there is a clear signal for asymptotic safety in the subsector (1.23), obtained via truncated functional renormalization flow equations. The impact of matter has been studied by Percacci et al [50, 121, 122]. Minimally coupling free fields (bosons, fermions, or abelian gauge fields) one finds that the non-Gaussian fixed point is robust, but the positivity of the fixed point couplings puts certain constraints on the allowed number of copies. When a selfinteracting scalar is coupled nonminmally via , one finds a fixed point (whose values are with matched normalizations the same as in the pure gravity computation) while all selfcouplings vanish, , . In the vicinity of the fixed point a linearized stability analysis can be performed; the admixture with and then lifts the marginality of , which becomes marginally irrelevant  [121, 122]. The running of and is qualitatively unchanged as compared to pure gravity, indicating that the asymptotic safety property is robust also with respect to the inclusion of selfinteracting scalars. This concludes our survey of the evidence for asymptotic safety. More details on the results (c) and (d) can be found in the review [110]. The perturbative identification of the non-Gaussian fixed point is detailed in section 2.2. The results (c) and (d) are genuinely surprising. With hindsight, the most natural explanation is to view them as manifestations of the asymptotic safety of the full dynamics with respect to a nontrivial fixed point. Tentatively (c) reflects a property of the full dynamics in the extreme ultraviolet via the dimensional reduction of the residual interactions. Since the origin of (d) could be the match to the perturbatively visible non-Gaussian fixed point. ### 1.4 Some working definitions Here we attempt working definitions for some of the key terms used before. Quantum Gravidynamics: The term is coined in analogy to “Quantum Chromodynamics” indicating, first, that the theory is supposed to be defined not only as an effective field theory and, second, that the selfinteraction of the quantized gravitational field is predominantly antiscreening in the ultraviolet. In contrast to “Quantum General Relativity” the microscopic action is allowed to be different from the Einstein-Hilbert action or a discretization thereof. Plausibly it should be still quasilocal, i.e. have a well-defined derivative expansion, and based on perturbatively renormalizable higher derivative theories one would expect it to contain at least quartic derivative terms. This means that also the number of physical propagating degrees of freedom (with respect to a background) may be different from the number entailed by the Einstein–Hilbert action. As with “Quantum General Relativity” we take the term “Gravidynamics” in a broad sense, allowing for any set of field variables (e.g. vielbein and spin connection, Sen-Ashtekar variables, Plebanski and BF type formulations, teleparallel etc.) that can be used to recast general relativity (see e.g. the review [118]). It is of course not assumed from the outset that the quantum gravidynamics based on the various set of field variables are necessarily equivalent. Gaussian fixed point: A fixed point is called Gaussian if there exists a choice of field variables for which the fixed point action is quadratic in the fields and the functional measure is Gaussian. This includes the local case but also allows for nonlocal quadratic actions. The drawback of this definition is that the proper choice of field variables in which the measure reveals its Gaussian nature may be hard to find. (For example in the correlation functions of the spin field in the two-dimensional Ising model the underlying free fermionic theory is not visible.) A non-Gaussian fixed point is simply one where no choice of fields can be found in which the measure becomes Gaussian. Unfortunately this, too, is not a very operational criterion. Unstable manifold: The unstable manifold of a fixed point with respect to a coarse graining operation is the set of all points that can be reached along flow lines emanating from the fixed point, the so-called renormalized trajectories. Points on such a flow line correspond to perfect actions. The stable manifold is the set of points attracted to the fixed point in the direction of coarse graining. Strict (weak) renormalizability: We call a field theory strictly (weakly) renormalizable with respect to a fixed point and a coarse graining operation if the dimension of its unstable manifold is finite (infinite). It is implied that if a field theory has this property with respect to one coarse graining operation it will have it with respect to many others (“universality”). Strict or weak renormalizability is believed to be a sufficient condition for the existence of a genuine continuum limit for observables. Relevant coupling: Given an expansion “sum over couplings times interaction monomials”, a coarse graining operation, and a fixed point of it, a coupling is called relevant (irrelevant) if it is driven away from (towards) the value the corresponding coordinate has at the fixed point, under a sufficient number of coarse graining steps. Note that this distinction makes sense even for trajectories not connected to the fixed point (because they terminate). It is however an explicitly ‘coordinate dependent’ notion. The same terms are used for the interaction monomials associated with the couplings. The dimension of the unstable manifold equals the maximal number of independent relevant interaction monomials ‘connected’ to the fixed point. All points on the unstable manifold are thus parameterized by relevant couplings but not vice versa. Couplings which are relevant or irrelevant in a linearized analysis are called linearly relevant or linearly irrelevant, respectively. A coupling which is neither linearly relevant nor linearly irrelevant, is called (linearly) marginal. Continuum limit: By a genuine continuum limit we mean here a limit in which physical quantities become: (C1) strictly independent of the UV cutoff, (C2) independent of the choice of the coarse graining operation (within a certain class), and (C3) independent of the choice of gauge slice and invariant under point transformations of the fields. Usually one stipulates properties (C1) and (C2) for the functional measure after which (C3) should be a provable property of physical quantities like the S-matrix. The requirement of having also (C1) and (C2) only for observables is somewhat weaker and in the spirit of the asymptotic safety scenario. For the issue of gauge-independence see [93, 117]. Typically the properties (C1-C3) cannot be rigorously established, but there are useful criteria which render the existence of a genuine continuum limit plausible in different computational frameworks. In Appendices A1 and A2 we discuss in some detail such criteria for the perturbative and for the FRGE approach, respectively. For convenience we summarize the main points here. In renormalized perturbation theory the criterion involves two parts: (PTC1) Existence of a formal continuum limit. This means, the removal of the UV cutoff is possible and the renormalized physical quantities are independent of the scheme and of the choice of interpolating fields – all termwise in a formal power series in the loop counting parameter. The perturbative beta functions always have a have a trivial (Gaussian) fixed-point but may also have a nontrivial (non-Gaussian) fixed point. The second part of the criterion is: (PTC2) The dimension of the unstable manifold of the (Gaussian or non-Gaussian) fixed point as computed from the perturbative beta functions equals the number of independent essential couplings. For example and QED meet (PTC1) but not (PTC2) while QCD satisfies both (PTC1) and (PTC2). In the framework of the functional renormalization group equations (FRGE) similar criteria for the existence of a genuine continuum limit can be formulated. Specifically for the FRGE of the effective average action one has: (FRGC1) The solution of the FRG equation admits (for fine tuned initial data at some ) a global solution , i.e. one that can be extended both to and to (where the latter limit is not part of the UV problem in itself). (FRGC2) The functional derivatives of (vertex functions) meet certain requirements which ensure stability/positivity/unitarity. In (FRGE1) the existence of the limit in theories with massless degrees of freedom is nontrivial and the problem of gaining computational control over the infrared physics should be separated from the UV aspects of the continuum limit as much as possible. However the limit is essential to probe stability/positivity/unitarity. For example, to obtain a (massive) Euclidean quantum field theory the Schwinger functions constructed from the vertex functions have to obey nonlinear relations which ensure that the Hilbert space reconstructed via the Osterwalder-Schrader procedure has a positive definite inner product. Perturbative (weak) renormalizability: We call a theory perturbatively (weakly) renormalizable if (PTC1) can be achieved with finitely (infinitely) many essential couplings. A theory were neither can be achieved is called perturbatively nonrenormalizable. Perturbative (weak) renormalizability is neither necessary nor sufficient for (weak or strict) renormalizability in the above nonperturbative sense. It is only in combination with (PTC2) that perturbative results are indicative for the existence of a genuine continuum limit. Asymptotically free coupling: A non-constant coupling in the unstable manifold of a Gaussian fixed point. The “non-constant” proviso is needed to exclude cases like a trivial coupling. In a nonperturbative lattice construction of theory only a Gaussian fixed point with a one-dimensional unstable manifold (parameterized by the renormalized mass) is thought to exist, along which the renormalized coupling is constant and identically zero. The Gaussian nature of the fixed-point, on the other hand, is not crucial and we define: Asymptotically safe coupling: A non-constant coupling in the unstable manifold of a fixed point. Asymptoticaly safe functional measure: The functional measure of a statistical field theory is said to be asymptotically safe if it is perturbatively weakly renormalizable or non-renormalizable, but it possesses a fixed point with respect to which it is strictly renormalizable. Subject to the regularity assumption that the space of actions can in the vicinity of the fixed point be decomposed into a stable and an unstable manifold this is equivalent to the following requirement: all relevant couplings are asymptotically safe and there is only a finite number of them. Note that unitarity or other desirable properties that would manifest itself on the level of observables are not part of this definition. In a non-gravitational context the functional measure of the 3D Gross–Neveu model is presently the best candidate to be asymptotically safe in the above sense (see [72, 41, 131, 76] and references therein). Also 5D Yang–Mills theories (see [65, 104] and references therein) are believed to provide examples. In a gravitational context, however, there are good reasons to modify this definition. First the choice of couplings has to be physically motivated, which requires to make contact to observables. In the above nongravitational examples with a single coupling the ‘meaning’ of the coupling is obvious; in particular it is clear that it must be finite and positive at the non-Gaussian fixed point. In general however one does not know whether ill behaved couplings are perverse redefinitions of better behaved ones. To avoid this problem the couplings should be defined as coefficients in a power series expansion of the observables themselves (Weinberg’s “reaction rates”, see the discussion in section 1.1). Of course painfully little is known about (generic) quantum gravity observables, but as a matter of principle this is how couplings should be defined. In particular this will pin down the physically aedequate notion of positivity or unitarity. Second, there may be good reasons to work initially with infinitely many essential couplings. Recall that the number of essential couplings entering the initial construction of the functional measure is not necessarily equal to the number eventually indispensable. In a secondary step a reduction of couplings might be feasible. That is, relations among the couplings might exist which are compatible with the renormalization flow. If these relations are sufficiently complicated, it might be better to impose them retroactively than to try to switch to a more adapted basis of interaction monomials from the beginning. Specifically in the context of quantum gravity microscopic actions with infinitely many essential couplings occur naturally in several ways: when starting from the Gomis and Weinberg picture [66] of perturbative quantum gravity and in the reduction [108], where a coupling function is needed for a dimensionless scalar. Further, the (Wilsonian) effective actions induced by the conformal anomaly can be rewritten in terms of dimensionless scalars [16, 10]. Their functional form is only partially constrained by the requirement to reproduce the anomaly and the fate of the associated couplings or coupling functions in the ultraviolet is in principle a matter of dynamics. Third, the dimension of the unstable manifold is of secondary importance in this context. Recall that the dimension of the unstable manifold is the maximal number of independent relevant interaction monomials ‘connected’ to the fixed point. This maximal number may be difficult to determine in Quantum Gravidynamics for the above reasons. Moreover the identification of all renormalized trajectories emanating from the fixed point may be more than what is needed physicswise; the successful construction of a subset of renormalized trajectories for physically motivated couplings may already be enough to obtain predictions/explanations for some observables. What matters is not so much the total number of relevant couplings but the way how observables depend on them. We remark that even in conventional perturbation theory based on the Einstein-Hilbert action the divergencies in the S-matrix seem to be less severe than those in the effective action [23]. Generally, since generic observables (in the sense used in Section 1.1) are likely to be nonlinearly and nonlocally related to the metric or to the usual basis of interaction monomials (scalars built from polynomials in the curvature tensors, for instance) the condition that the theory should allow for predictions in terms of observables is only indirectly related to the total number of relevant couplings. In summary, the interplay between the microscopic action, its parameterization through essential or relevant couplings, and observables is considerably more subtle than in the presumed non-gravitational examples of asymptotically safe theories with a single coupling. The existence of an asymptotically safe functional measure in the above sense seems to be neither necessary nor sufficient for a physically viable theory of Quantum Gravidynamics. This leads to our final working definition. Asymptotically safe Quantum Gravidynamics: A quantum theory of gravity based on a notion of an asymptotically safe functional integral measure which incorporates the interplay between couplings and observables described above. In brief: (i) the choice of couplings has to be based on observables; this will pin down the physically relevant notion of positivity/unitarity. (ii) the number of essential or relevant couplings is not a-priori finite. (iii) what matters is not so much the dimension of the unstable manifold than how observables depend on the relevant couplings. ### 1.5 Discussion of possible objections Here we discuss some of the possible objections to a physically viable theory of Quantum Gravidynamics. • Since the microscopic action is likely to contain higher derivative terms don’t the problems with non-unitarity notorious in higher derivative gravity theories reappear? • In brief, the unitarity issue has not much been investigated so far, but the presumed answer is No. First, the problems with perturbatively strictly renormalizable higher derivative theories stem mostly from the -type propagator used. The alternative perturbative framework already mentioned, namely to use a -type propagator at the expense of infinitely many essential (potentially ‘unsafe’) couplings avoids this problem [66, 8]. The example of the reduction shows that the reconcilation of safe couplings with the absence of unphysical propagating modes can be achieved in principle. Also the superrenormalizable gravity theories with unitary propagators proposed in [144] are intriguing in this respect. Second, even for higher derivative theories on flat space a well-defined Euclidean functional integral can exist, free of negative norm states or negative probabilities [75]. Physical unitarity is then thought to be restored at low energies, in which case one could ‘live with’ higher derivative ghosts. The same would presumably hold for higher derivative theories on a fixed curved background. Third, when the background effective action is used as the central object to define the quantum theory, the ‘background’ is not a solution of the classical field equations. Rather it is adjusted selfconsistenly by a condition involving the full quantum effective action. If the background effective action is computed nonperturbatively (by whatever technique) the intrinsic notion of unitarity will not be related to the ‘propagator unitarity’ around a solution of the classical field equations in any simple way. One aspect of this intrinsic positivity is the convexity of the background effective action. In the flow equation for the effective average action one can see, for example, that the wrong-sign of the propagator is not an issue: if is of the
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9405592083930969, "perplexity": 542.826312039349}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-39/segments/1631780057424.99/warc/CC-MAIN-20210923135058-20210923165058-00591.warc.gz"}
https://math.stackexchange.com/questions/2759760/determine-the-radius-of-convergence-of-the-following-power-series
# Determine the radius of convergence of the following power series.?.. Determine the radius of convergence of the following power series. a) $\sum_{n=1}^{\infty}\frac{ x^{6n+2}}{(1+\frac{1}{n})^{n^2}}$ my attempts: by applying the ratio test i got $\frac {a_n}{a_{n+1}}$ =$\frac{ x^{6n+2}}{(1+\frac{1}{n})^{n^2}}$.$\frac{(1+\frac{1}{n+1})^{(n+1)^2}}{ x^{6n+8}}$ i got $\frac {a_n}{a_{n+1}}$ = $\frac{e}{x^6}$ now i don't know ...how to find the radius of convergence of given power series.....Pliz help me From $\frac {a_n}{a_{n+1}} \to \frac{e}{x^6}$ we get $\frac {a_{n+1}}{a_{n}} \to \frac{x^6}{e}$ . The ratio test shows that the power series converges for $|x|<e^{1/6}$ and diverges for $|x|>e^{1/6}$, hence the radius of convergence is $e^{1/6}$. First of all, typically we apply the Ratio Test as $\displaystyle \lim_{n\to\infty}\left|\frac{a_{n+1}}{a_n}\right|$, but you have it upside down. Also, don't forget about the limit! Your statement that $$\color{red}{\frac{a_n}{a_{n+1}}=\frac{e}{x^6}}$$ is false; the correct statement is that $$\color{blue}{\lim_{n\to\infty}}{\frac{a_n}{a_{n+1}}=\frac{e}{x^6}}.$$ Other than that, you did a fine job! You just need to flip it over, restore the correct notation — absolute values and limits, and remember that we want the output of the Ratio Test to be less than $1$ to ensure convergence of a series: $$\lim_{n\to\infty}\left|\frac{a_{n+1}}{a_n}\right|=\frac{x^6}{e} \implies \text{the series converges when } \frac{x^6}{e}<1.$$ Solving this inequality — and don't forget the absolute value $|x|$ when you take the sixth root! — you'll find the radius of convergence.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9922431707382202, "perplexity": 68.87210889728793}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 20, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-25/segments/1623487643703.56/warc/CC-MAIN-20210619051239-20210619081239-00048.warc.gz"}
https://kerodon.net/tag/03S1
# Kerodon $\Newextarrow{\xRightarrow}{5,5}{0x21D2}$ $\newcommand\empty{}$ ### 5.4.4 Small Simplicial Sets Definition 5.4.3.1 has a counterpart in the setting of simplicial sets. Definition 5.4.4.1. Let $\kappa$ be an infinite cardinal. We say that a simplicial set $S$ is $\kappa$-small if the collection of nondegenerate simplices of $S$ is $\kappa$-small. Remark 5.4.4.2. In the situation of Definition 5.4.4.1, the dimension of the simplices under consideration is not fixed. That is, a simplicial set $S_{\bullet }$ is $\kappa$-small if and only if the disjoint union $\coprod _{m \geq 0} S_{m}^{\mathrm{nd}}$ is a $\kappa$-small set, where $S_{m}^{\mathrm{nd}} \subseteq S_{m}$ denotes the set of nondegenerate $m$-simplices of $S_{\bullet }$. Remark 5.4.4.3. Let $\kappa$ be an infinite cardinal. Then a simplicial set $S$ is $\kappa$-small if and only if the opposite simplicial set $S^{\operatorname{op}}$ is $\kappa$-small. Remark 5.4.4.5 (Coproducts). Let $\kappa$ be an infinite cardinal and let $\{ S_{i} \} _{i \in I}$ be a collection of $\kappa$-small simplicial sets. Suppose that the cardinality of the index set $I$ is smaller than the cofinality $\mathrm{cf}(\kappa )$. Then the coproduct $\coprod _{i \in I} S_ i$ is also $\kappa$-small (see Corollary 5.4.3.9). In particular: • The collection of $\kappa$-small simplicial sets is closed under finite coproducts. • If $\kappa$ is regular, then the collection of $\kappa$-small simplicial sets is closed under $\kappa$-small coproducts. Remark 5.4.4.6 (Colimits). Let $\kappa$ be an infinite cardinal and let $\{ S_{i} \} _{i \in \operatorname{\mathcal{I}}}$ be a diagram of simplicial sets indexed by a category $\operatorname{\mathcal{I}}$. Suppose that the set of objects $\mathrm{Ob}( \operatorname{\mathcal{I}})$ has cardinality smaller than the cofinality of $\kappa$. Then the colimit $\varinjlim _{i \in \operatorname{\mathcal{I}}} S_ i$ is also $\kappa$-small (since it can be realized as a quotient of the coproduct $\coprod S_ i$, which is $\kappa$-small by virtue of Remark 5.4.4.5). Remark 5.4.4.7. Let $S$ be a simplicial set. Then there is a least infinite cardinal $\kappa$ for which $S$ is $\kappa$-small. If $S$ is finite, then $\kappa = \aleph _0$. If $S$ is not finite, then $\kappa = \lambda ^{+}$, where $\lambda$ is the cardinality of the set of all nondegenerate simplices of $S$. In particular, $\kappa$ is always a regular cardinal. Remark 5.4.4.8. Let $\kappa$ be an infinite cardinal and let $T$ be a $\kappa$-small simplicial set. Then: • Every simplicial subset of $T$ is $\kappa$-small. • The simplicial set $T$ is $\lambda$-small for each $\lambda \geq \kappa$. • For every epimorphism of simplicial sets $T \twoheadrightarrow S$, the simplicial set $S$ is also $\kappa$-small. See Remark 5.4.3.4. Proposition 5.4.4.9. Let $\kappa$ be an infinite cardinal and $S_{\bullet }$ be a simplicial set. Assume that the cofinality of $\kappa$ is larger than $\aleph _0$ (this condition is satisfied, for example, if $\kappa$ is uncountable and regular). The following conditions are equivalent: $(1)$ The simplicial set $S_{\bullet }$ is $\kappa$-small. $(2)$ For every integer $n \geq 0$, the set $S_{n}$ is $\kappa$-small. $(3)$ For every finite simplicial set $K$, the set $\operatorname{Hom}_{\operatorname{Set_{\Delta }}}(K,S_{\bullet })$ is $\kappa$-small. Proof. We first show that $(1)$ implies $(2)$. Assume that $S_{\bullet }$ is $\kappa$-small and let $n \geq 0$ be an integer. For each integer $m \geq 0$, let $S_{m}^{\mathrm{nd}}$ denote the set of nondegenerate $m$-simplices of $X$. Using Proposition 1.1.3.4, we can identify $S_ n$ with the coproduct $\coprod _{ \alpha : [n] \twoheadrightarrow [m] } S_{m}^{\mathrm{nd}}$, where $\alpha$ ranges over all surjective maps of linearly ordered sets $[n] \twoheadrightarrow [m]$. Our assumption that $S_{\bullet }$ is $\kappa$-small guarantees that each of the sets $S_{m}^{\mathrm{nd}}$ is $\kappa$-small, so that $S_{n}$ is also $\kappa$-small (Corollary 5.4.3.6). We now show that $(2)$ implies $(1)$. Assume that, for each $n \geq 0$, the set $S_{n}$ is $\kappa$-small. Since $\kappa$ has cofinality $> \aleph _0$ it follows that the coproduct $\coprod _{n \geq 0} S_{n}$ is also $\kappa$-small. In particular, the coproduct $\coprod _{n \geq 0} S_{n}^{\mathrm{nd}}$ is $\kappa$-small: that is, the simplicial set $S_{\bullet }$ is $\kappa$-small. The implication $(3) \Rightarrow (2)$ is immediate from the definition. We will complete the proof by showing that $(2) \Rightarrow (3)$. Assume that, for each $n \geq 0$, the set $S_{n}$ is $\kappa$-small, and let $K$ be a finite simplicial set. By virtue of Proposition 3.5.1.7, there exists an epimorphism $f: K' \twoheadrightarrow K$, where $K' = \coprod _{i \in I} \Delta ^{n_ i}$ is a disjoint union of finitely many standard simplices. Then precomposition with $f$ induces a monomorphism $\operatorname{Hom}_{\operatorname{Set_{\Delta }}}( K, S_{\bullet } ) \hookrightarrow \operatorname{Hom}_{\operatorname{Set_{\Delta }}}( K', S_{\bullet }) \simeq \prod _{i \in I} S_{n_ i}.$ Since the collection of $\kappa$-small sets is closed under finite products and passage to subsets (Proposition 5.4.3.5 and Remark 5.4.3.4), it follows that the set $\operatorname{Hom}_{\operatorname{Set_{\Delta }}}( K, S_{\bullet } )$ is also $\kappa$-small. $\square$ Warning 5.4.4.10. The implications $(1) \Rightarrow (2) \Leftrightarrow (3)$ of Proposition 5.4.4.9 are valid for an arbitrary infinite cardinal $\kappa$. However, the implication $(2) \Rightarrow (1)$ is false if $\kappa$ has countable cofinality (for example, if $\kappa = \aleph _0$). Corollary 5.4.4.11. Let $\kappa$ be an infinite cardinal. Then the collection of $\kappa$-small simplicial sets is closed under finite products. Proof. Let $\{ S_ i \} _{i \in I}$ be a collection of $\kappa$-small simplicial sets indexed by a finite set $I$; we wish to show that the product $S = \prod _{i \in I} S_ i$ is $\kappa$-small. Without loss of generality, we may assume that $\kappa$ is the least infinite cardinal for which each of the simplicial sets $X_{i}$ is $\kappa$-small. Then $\kappa$ is regular (Remark 5.4.4.7). If $\kappa = \aleph _0$, then the desired result follows from Remark 3.5.1.6. We may therefore assume that $\kappa$ is uncountable. In this case, the desired result follows from the criterion of Proposition 5.4.4.9, since the collection of $\kappa$-small sets is closed under finite products (Proposition 5.4.3.5). $\square$ Corollary 5.4.4.12. Let $\kappa$ be an uncountable cardinal, let $S$ be a $\kappa$-small simplicial set, and let $K$ be a finite simplicial set. Then the simplicial set $\operatorname{Fun}(K, S)$ is $\kappa$-small. Proof. Without loss of generality, we may assume that $\kappa$ is the least uncountable cardinal for which $S$ is $\kappa$-small. In particular, $\kappa$ is regular (Remark 5.4.4.7). By virtue of Proposition 5.4.4.9, it will suffice to show that for every finite simplicial set $L$, the set $\operatorname{Hom}_{\operatorname{Set_{\Delta }}}( L, \operatorname{Fun}(K, S) ) \simeq \operatorname{Hom}_{\operatorname{Set_{\Delta }}}(K \times L, S)$ is $\kappa$-small. This is a special case of Proposition 5.4.4.9, since the simplicial set $K \times L$ is finite (Remark 3.5.1.6). $\square$ Warning 5.4.4.13. The assertion of Corollary 5.4.4.12 is false in the case $\kappa = \aleph _0$. That is, if $K$ and $S$ are finite simplicial sets, then the simplicial set $\operatorname{Fun}(K, S)$ need not be finite. We close by recording stronger forms of Corollaries 5.4.4.11 abnd 5.4.4.12. Corollary 5.4.4.14. Let $\lambda$ be an infinite cardinal and let $\kappa = \mathrm{ecf}(\lambda )$ be its exponential cofinality (Definition 5.4.3.16). Then the collection of $\lambda$-small simplicial sets is closed under $\kappa$-small products. Proof. Let $\{ S_ i \} _{i \in I}$ be a collection of $\lambda$-small simplicial sets indexed by a $\kappa$-small set $I$; we wish to show that the product $S = \prod _{i \in I} S_ i$ is $\lambda$-small. If $\kappa = \aleph _0$, this follows from Corollary 5.4.4.11. We may therefore assume that $\kappa$ is uncountable. Then the cofinality $\mathrm{cf}(\lambda )$ is also uncountable (Remark 5.4.3.17). The desired result now follows from the criterion of Proposition 5.4.4.9, since the collection of $\lambda$-small sets is closed under $\kappa$-small products. $\square$ Corollary 5.4.4.15. Let $\lambda$ be an infinite cardinal and let $\kappa = \mathrm{ecf}(\lambda )$ be its exponential cofinality. If $S$ is a $\lambda$-small simplicial set and $K$ be a $\kappa$-small simplicial set. Then $\operatorname{Fun}(K,S)$ is $\lambda$-small. Proof. Since $K$ is $\kappa$-small, we can choose an epimorphism of simplicial sets $\coprod _{i \in I} \Delta ^{n_{i}} \twoheadrightarrow K$, where $I$ is a $\kappa$-small set. It follows that $\operatorname{Fun}(K,S)$ can be identified with a simplicial subset of the product $\prod _{ i \in I} \operatorname{Fun}( \Delta ^{n_ i}, S)$. Corollary 5.4.4.12 guarantees that each factor $\operatorname{Fun}( \Delta ^{n_ i}, S)$ is $\lambda$-small, so that the product $\prod _{ i \in I} \operatorname{Fun}( \Delta ^{n_ i}, S)$ is $\lambda$-small by virtue of Corollary 5.4.4.14. $\square$
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 2, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9940083026885986, "perplexity": 108.48289054168063}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-33/segments/1659882573623.4/warc/CC-MAIN-20220819035957-20220819065957-00538.warc.gz"}
https://rd.springer.com/chapter/10.1007%2F978-981-13-9409-6_10?error=cookies_not_supported&code=20e78a60-e314-41f1-bdce-e9261d0bb5be
Natural Scene Mongolian Text Detection Based on Convolutional Neural Network and MSER Conference paper Part of the Lecture Notes in Electrical Engineering book series (LNEE, volume 571) Abstract Maximum Stable Extreme Region (MSER) is the most influential algorithm in text detection. However, due to the complex and varied background of Mongolian text in natural scene images, it is difficult to distinguish between text and non-text connected regions, thus reducing the robustness of the MSER algorithm. Therefore, this paper proposes to extract the connected regions in the natural scene pictures by applying MSER, and then uses the convolutional neural network (CNN) to train a high-performance text classifier to classify the extracted connected regions, and finally obtaining the final detection results. This paper evaluates the proposed method on the CSIMU-MTR dataset established by the School of Computer Science, Inner Mongolia University. The recall rate is 0.75, the accuracy rate is 0.83, and the F-score is 0.79, which is significantly higher than the previous method. It shows the effectiveness of the proposed Mongolian text detection method for natural scenes. Keywords Natural scene mongolian text detection Maximum stable extreme region (MSER) Convolutional neural network (CNN) Notes Acknowledgements This study was supported by the National Natural Science Foundation of China (NSFC) under Grant no. 61563039. References 1. 1. Gao G, Su X, Wei H et al (2011) Classical mongolian words recognition in historical document. In: International conference document analysis recognition, IEEE, pp 692–697Google Scholar 2. 2. Wei H, Gao G (2014) A keyword retrieval system for historical mongolian document images. Int J Doc Anal Recognit (IJDAR) 17(1):33–45 3. 3. Ye Q, Doermann D (2015) Text detection and recognition in imagery: a survey. IEEE Trans Pattern Anal Mach Intell 37(7):1480–1500 4. 4. Jaderberg M, Vedaldi A, Zisserman A (2014) Deep features for text spotting. In: Computer vision—ECCV, pp 512–528Google Scholar 5. 5. Chen X, Yuille AL (2004) Detecting and reading text in natural scenes. In: IEEE computer society conference on computer vision and pattern recognition, pp 366–373Google Scholar 6. 6. Babenko B, Belongie S (2011) End-to-end scene text recognition. In: IEEE international conference on computer vision, pp 1457–1464Google Scholar 7. 7. Dalal N, Triggs B (2005) Histograms of oriented gradients for human detection. IEEE Comput Soc Conf Comput Vision Pattern Recognit 1:886–893Google Scholar 8. 8. Chen H, Tsai SS, Schroth G et al (2011) Robust text detection in natural images with edge-enhanced maximally stable extremal regions. In: 18th IEEE international conference on image processing, pp 2609–2612Google Scholar 9. 9. Yin XC, Yin X, Huang K et al (2014) Robust text detection in natural scene images. IEEE Trans Pattern Anal Mach Intell 36(5):970–983 10. 10. He T, Huang W, Qiao Y et al (2015) Text-attentional convolutional neural networks for scene text detection. IEEE Trans Image Process 25(6):2529–2541 11. 11. Epshtein B, Ofek E, Wexler Y (2010) Detecting text in natural scenes with stroke width transform. In: IEEE computer society conference on computer vision and pattern recognition, pp 2963–2970Google Scholar 12. 12. Nistér D, Stewénius H (2008) Linear time maximally stable extremal regions. In: European conference on computer vision-ECCV, pp 183–196Google Scholar 13. 13. Huang W, Qiao Y, Tang X (2014) Robust scene text detection with convolution neural network induced MSER trees. In: Computer vision–ECCV, pp 497–511Google Scholar 14. 14. Neubeck A, Gool L (2006) Efficient non-maximum suppression. In: 18th ICPRGoogle Scholar 15. 15. Shao Y, Wang C, Xiao B (2013) Fast self-generation voting for handwritten Chinese character recognition. Int J Doc Anal Recognit (IJDAR) 16(4):413–424 16. 16. Shao Y, Wang C, Xiao B (2015) A character image restoration method for unconstrained handwritten Chinese character recognition. Int J Doc Anal Recognit (IJDAR) 18(1):73–86 17. 17. Wang T, Wu DJ, Coates A, Ng AY (2012) End-to-end text recognition with convolutional neural network. In: IEEE international conference on pattern recognition, pp 3304–3308Google Scholar 18. 18. He K, Zhang X, Ren S et al (2016) Identity mappings in deep residual networksGoogle Scholar 19. 19. Shao Y, Gao G, Zhang L et al (2015) The first robust mongolian text reading dataset CSIMU-MTR, pp 781–788Google Scholar 20. 20. Wolf C, Jolion JM (2006) Object count/area graphs for the evaluation of object detection and segmentation algorithms. Int J Doc Anal Recognit 8(4):280–296
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8222366571426392, "perplexity": 16375.958564240807}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-31/segments/1627046153931.11/warc/CC-MAIN-20210730025356-20210730055356-00609.warc.gz"}
https://www.allthatmatters.academy/resource-pi/
# Resource: Pi Pi \pi is a constant of nature with infinitely many decimals (an irrational number). It is defined as the ratio between circumference and diameter of any circle. The first 500 decimals are:[1] \begin{aligned} \pi=3.&1415926535\;8979323846\; 2643383279 \; 5028841971 \;6939937510\\ ~& 5820974944 \; 5923078164 \; 0628620899 \; 8628034825 \; 3421170679\\ ~& 8214808651 \; 3282306647 \; 0938446095 \; 5058223172 \; 5359408128\\ ~& 4811174502 \; 8410270193 \; 8521105559 \; 6446229489 \; 5493038196\\ ~& 4428810975 \; 6659334461 \; 2847564823 \; 3786783165 \; 2712019091\\ ~& 4564856692 \; 3460348610 \; 4543266482 \; 1339360726 \; 0249141273\\ ~& 7245870066 \; 0631558817 \; 4881520920 \; 9628292540 \; 9171536436\\ ~& 7892590360 \; 0113305305 \; 4882046652 \; 1384146951 \; 9415116094\\ ~& 3305727036 \; 5759591953 \; 0921861173 \; 8193261179 \; 3105118548\\ ~& 0744623799 \; 6274956735 \; 1885752724 \; 8912279381 \; 8301194912\\ ~& \ldots \end{aligned} \pi has no pattern (never starts repeating itself) and never ends – and this is not just something people think; it can be proven.[2] Pi has become an international phenomenon and there even is a pi day: March 14th (3/14). See the website www.piday.org. References: 1. The first 10 digits of pi (π) are 3.1415926535’ (web page), Pi Day2, 2018, www.piday.org/million (accessed May 10th, 2019) 2. Q: How do we know that π never repeats? If we find enough digits, isn’t it possible that it will eventually start repeating?’ (web page), Ask a Mathematician / Ask a Physicist, 2013, www.askamathematician.com/2013/12/q-how-do-we-know-that-π-never-repeats-if-we-find-enough-digits-isnt-it-possible-that-it-will-eventually-start-repeating (accessed May 10th, 2019) ## Resource: Energies overview Overview of typical energies that we encounter or hear about in everyday life. This list can be extended massively within various fields of science, such… ## Resource: Alphabets References:‘The first 10 digits of pi (π) are 3.1415926535’ (web page), Pi Day2, 2018, www.piday.org/million (accessed May 10th, 2019)‘Q: How do we know that π… ## Resource: Often-used units Often-used units converted into SI units. Units are listed for length, time, mass, quantity (amount of substance), temperature, angle, area, volume, velocity, energy and power.… ## Resource: The four basic operations The four basic arithmetic operations with official symbols and terms: Note the possibility of skipping the multiplication sign completely. Also note the possibility of replacing… ## Resource: The SI unit system The SI unit system (abbreviated from French ‘Système international d’unités’) defines an official international collection of units that can and is recommended to be used…
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.96406090259552, "perplexity": 12756.593402375775}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-50/segments/1606141198409.43/warc/CC-MAIN-20201129123729-20201129153729-00551.warc.gz"}
https://crypto.stackexchange.com/questions/34985/where-is-the-mistake-in-my-rsa-by-hand-calculation?noredirect=1
Where is the mistake in my RSA by-hand calculation? Like several previous askers, I seem to have made a mistake in my RSA calculation, but despite going back over it three times, I cannot spot it. I picked 1000003 and 6000011 as my primes, p and q. n = 1000003 * 6000011 = 6000029000033 z = φ(n) = (1000003 - 1) * (6000011 - 1) = 6000022000020 e = 17 (arbitrary pick of small integer that's coprime to z) Public key is (e, n) = (17, 6000029000033) Found d by solving (17 * e) mod z = (17 * d) % 6000022000020 d = 857146000003 d was calculated with an online multiplicative inverse calculator. I suspect this is where the error is, but I've redone it several times with different calculators to no avail. So then I encrypted the number 6: encryptedMessage = (message)^e mod n encryptedMessage = (6)^17 mod 6000029000033 encryptedMessage = 4926601444670 Decryption is where I realized something must be wrong, because: message = (encryptedMessage)^d mod n message = 4926601444670 ^ 857146000003 mod 6000029000033 Except obviously that exponential operation is too huge to be done. I can't calculate it anywhere. It crashed my interpreter when I put it into python. What am I doing wrong here?? :( Or, perhaps, am I doing it right, and it's just that there's a necessary efficient method for this calculation which I'm not aware of? • Your $d$ is wrong, it doesn't satisfy $ed \equiv 1 \pmod{\mathrm{lcm}(p - 1, q - 1)}$. In fact it doesn't even satisfy the weaker (but sufficient) condition $ed \equiv 1 \pmod{\varphi(n)}$. A working $d$ for $e = 17$ is 4235309647073. What calculator did you use? – Thomas May 1 '16 at 3:30 • For your second question about Python crashing, please see crypto.stackexchange.com/questions/13235/… – Thomas May 1 '16 at 3:31 • I think I put the numbers in wrong....I must have. I don't know how....trying to retrace... – temporary_user_name May 1 '16 at 7:21 • Yes, now they all give me the correct result as in Henno Brandsma's answer. I must have been using the wrong numbers somehow. I don't know what I typed in. Oh well. Thank you. – temporary_user_name May 1 '16 at 7:25 • @Thomas : ​ Congruence mod $\phi(n)$ is a stronger condition than congruence mod the Lcm, since $\phi(n)$ is a multiple of the Lcm. ​ ​ ​ ​ – user991 May 1 '16 at 18:34 Your $d$ is indeed incorrect. I get $d = 4235309647073$, using Wolfram alpha. As to python, use the builtin pow function with a third argument equal to the modulus. So message = 6 encmessage = pow(message, 17, 6000029000033) assert message == pow(encmessage, 4235309647073, 6000029000033) which will apply a smart algorithm like those in answers to this question. Never compute the direct power first! 1. $(e*d) \text{ mod } (\phi(n))=1$ 2. $(17*d) \text{ mod } 6000022000020=1$ 3. $d=4235309647073$. I used WolframAlpha to compute d.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.7835129499435425, "perplexity": 1553.898334562022}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-39/segments/1568514575076.30/warc/CC-MAIN-20190922032904-20190922054904-00147.warc.gz"}
https://www.zbmath.org/authors/?q=ai%3Acohen.michael-p
× # zbMATH — the first resource for mathematics ## Cohen, Michael P. Compute Distance To: Author ID: cohen.michael-p Published as: Cohen, M.; Cohen, M. P.; Cohen, Michael; Cohen, Michael P. External Links: MGP Documents Indexed: 45 Publications since 1974 #### Co-Authors 7 single-authored 3 Kallman, Robert R. 3 Kuo, Lynn 2 Akhmedov, Azer all top 5 #### Serials 2 Topology and its Applications 1 American Mathematical Monthly 1 Israel Journal of Mathematics 1 The Annals of Statistics 1 Biometrical Journal 1 The British Journal for the Philosophy of Science 1 Fundamenta Mathematicae 1 Indiana University Mathematics Journal 1 International Statistical Review 1 Journal of the London Mathematical Society. Second Series 1 Real Analysis Exchange 1 Ergodic Theory and Dynamical Systems 1 Statistics & Decisions 1 Forum Mathematicum all top 5 #### Fields 5 Statistics (62-XX) 3 Group theory and generalizations (20-XX) 3 Topological groups, Lie groups (22-XX) 3 Dynamical systems and ergodic theory (37-XX) 2 Mathematical logic and foundations (03-XX) 2 Real functions (26-XX) 2 General topology (54-XX) 2 Manifolds and cell complexes (57-XX) 1 Measure and integration (28-XX) 1 Sequences, series, summability (40-XX) 1 Abstract harmonic analysis (43-XX) 1 Probability theory and stochastic processes (60-XX) #### Citations contained in zbMATH Open 22 Publications have been cited 258 times in 247 Documents Cited by Year Group-graded rings, smash products, and group actions. Zbl 0533.16001 Cohen, M.; Montgomery, S. 1984 Random utility representation of binary choice probabilities: A new class of necessary conditions. Zbl 0689.92018 Cohen, Michael; Falmagne, Jean-Claude 1990 Fundamental unit structures: A theory of ratio scalability. Zbl 0422.92029 Cohen, Michael; Narens, Louis 1979 The admissibility of the empirical distribution function. Zbl 0575.62011 Cohen, Michael P.; Kuo, Lynn 1985 The normal closure of a semiprime ring. Zbl 0425.16004 Cohen, M.; Montgomery, S. 1979 Factorizable automorphisms in solvable conjoint structures. I. Zbl 0515.06006 Luce, R. Duncan; Cohen, Michael 1983 Cohen, M.; Montgomery, S. 1987 Image and video segmentation by anisotropic kernel mean shift. Zbl 1098.68883 Wang, Jue; Thiesson, Bo; Xu, Yingqing; Cohen, Michael 2004 A unified treatment of Schrödinger’s equation for anharmonic and double well potentials. Zbl 0692.35112 Burrows, B. L.; Cohen, M.; Feldmann, Tova 1989 Openly Haar null sets and conjugacy in Polish groups. Zbl 1358.43001 Cohen, Michael P.; Kallman, Robert R. 2016 Robust, smoothly heterogeneous variance regression. Zbl 0825.62583 Cohen, M.; Dalal, S. R.; Tukey, J. W. 1993 A dynamic epistemic logic with a knowability principle. Zbl 06521595 Cohen, Michael 2015 Quantum commutativity and central invariants. Zbl 0831.17006 Cohen, M. 1994 Rayleigh-Schrödinger perturbation theory with a strong perturbation: anharmonic oscillators. Zbl 0617.35030 Cohen, M.; Kais, S. 1986 Minimax sampling strategies for estimating a finite population distribution function. Zbl 0587.62026 Cohen, M. P.; Kuo, L. 1985 On three measures of explanatory power with axiomatic representations. Zbl 1386.03010 Cohen, Michael P. 2016 A conjecture of Gleason on the foundations of geometry. Zbl 1286.54034 Cohen, Michael P.; Kallman, Robert R. 2014 The descriptive complexity of series rearrangements. Zbl 1304.40001 Cohen, Michael P. 2013 Sample means of independent standard Cauchy random variables are standard Cauchy: a new approach. Zbl 1257.62011 Cohen, Michael P. 2012 Sample size considerations for multilevel surveys. Zbl 1105.62009 Cohen, Michael P. 2005 Sensitive micro data protection using Latin hypercube sampling technique. Zbl 1051.68629 Dandekar, Ramesh A.; Cohen, Michael; Kirkendall, Nancy 2002 A Lie algebraic study of some Schrödinger equations. Zbl 0819.35115 Burrows, B. L.; Cohen, M.; Feldmann, Tova 1994 Openly Haar null sets and conjugacy in Polish groups. Zbl 1358.43001 Cohen, Michael P.; Kallman, Robert R. 2016 On three measures of explanatory power with axiomatic representations. Zbl 1386.03010 Cohen, Michael P. 2016 A dynamic epistemic logic with a knowability principle. Zbl 06521595 Cohen, Michael 2015 A conjecture of Gleason on the foundations of geometry. Zbl 1286.54034 Cohen, Michael P.; Kallman, Robert R. 2014 The descriptive complexity of series rearrangements. Zbl 1304.40001 Cohen, Michael P. 2013 Sample means of independent standard Cauchy random variables are standard Cauchy: a new approach. Zbl 1257.62011 Cohen, Michael P. 2012 Sample size considerations for multilevel surveys. Zbl 1105.62009 Cohen, Michael P. 2005 Image and video segmentation by anisotropic kernel mean shift. Zbl 1098.68883 Wang, Jue; Thiesson, Bo; Xu, Yingqing; Cohen, Michael 2004 Sensitive micro data protection using Latin hypercube sampling technique. Zbl 1051.68629 Dandekar, Ramesh A.; Cohen, Michael; Kirkendall, Nancy 2002 Quantum commutativity and central invariants. Zbl 0831.17006 Cohen, M. 1994 A Lie algebraic study of some Schrödinger equations. Zbl 0819.35115 Burrows, B. L.; Cohen, M.; Feldmann, Tova 1994 Robust, smoothly heterogeneous variance regression. Zbl 0825.62583 Cohen, M.; Dalal, S. R.; Tukey, J. W. 1993 Random utility representation of binary choice probabilities: A new class of necessary conditions. Zbl 0689.92018 Cohen, Michael; Falmagne, Jean-Claude 1990 A unified treatment of Schrödinger’s equation for anharmonic and double well potentials. Zbl 0692.35112 Burrows, B. L.; Cohen, M.; Feldmann, Tova 1989 Cohen, M.; Montgomery, S. 1987 Rayleigh-Schrödinger perturbation theory with a strong perturbation: anharmonic oscillators. Zbl 0617.35030 Cohen, M.; Kais, S. 1986 The admissibility of the empirical distribution function. Zbl 0575.62011 Cohen, Michael P.; Kuo, Lynn 1985 Minimax sampling strategies for estimating a finite population distribution function. Zbl 0587.62026 Cohen, M. P.; Kuo, L. 1985 Group-graded rings, smash products, and group actions. Zbl 0533.16001 Cohen, M.; Montgomery, S. 1984 Factorizable automorphisms in solvable conjoint structures. I. Zbl 0515.06006 Luce, R. Duncan; Cohen, Michael 1983 Fundamental unit structures: A theory of ratio scalability. Zbl 0422.92029 Cohen, Michael; Narens, Louis 1979 The normal closure of a semiprime ring. Zbl 0425.16004 Cohen, M.; Montgomery, S. 1979 all top 5 #### Cited by 277 Authors 10 Montgomery, Susan 9 Luce, Robert Duncan 9 Nastasescu, Constantin 7 Cohen, Miriam 6 Kelarev, Andrei Vladimirovich 6 Quinn, Declan 6 van Oystaeyen, Freddy 5 Aljadeff, Eli 5 Beattie, Margaret 5 Ilić-Georgijević, Emil 5 Narens, Louis Edward 5 Öinert, Johan 5 Yu, Qiqing 4 Aliyari Ghassabeh, Youness 4 Bergen, Jeffrey 4 Chin, William 4 Fiorini, Samuel 4 Fishburn, Peter Clingerman 4 Stewart, Patrick N. 4 Suck, Reinhard 3 Abrams, Gene D. 3 Asashiba, Hideto 3 Burrows, B. L. 3 Cohen, Meital 3 Dăscălescu, Sorin 3 Doignon, Jean-Paul 3 Feldmann, Tova 3 Haefner, Jeremy 3 Jokiel-Rokita, Alicja 3 Letzter, Edward S. 3 Magiera, Ryszard 3 Menini, Claudia 3 Passman, Donald Steven 3 Raianu, Şerban 3 Westreich, Sara 3 Zhao, Deke 2 Aczél, János Dezső 2 Ara, Pere 2 Blattner, Robert J. 2 Cohen, Michael Andrew 2 Darji, Udayan B. 2 David, Ofir 2 Dugas, Alex S. 2 Elekes, Márton 2 Gaudreau, Philippe J. 2 Green, Edward Lee 2 Grzeszczuk, Piotr 2 Han, Yang 2 Herscovich, Estanislao 2 Jespers, Eric 2 Joret, Gwenaël 2 Kalina, Kende 2 Kiss, Viktor 2 Li, Liping 2 Lomp, Christian 2 Marcos, Eduardo N. 2 Monderer, Dov 2 Nystedt, Patrik 2 Okniński, Jan 2 Osterburg, James 2 Pascaud, Jean-Louis 2 Pikhtilkov, Sergeĭ Alekseevich 2 Plant, Anne L. 2 Safouhi, Hassan 2 Şahinkaya, Serap 2 Schneider, Hans-Jürgen 2 Sidorov, Alexander Vasil’evich 2 Silvestrov, Sergei D. 2 Skryabin, Serge 2 Vidnyánszky, Zoltán 2 Wauters, Paul 2 Wilcox, Rand R. 2 Xu, Yunge 2 Zhou, Borong 1 Abouzaid, Bouchra 1 Abuhlail, Jawad Y. 1 Al-Shomrani, Mohammed Mosa 1 Al-Subaie, Najla Sh. 1 Albu, Toma 1 Alcock, E. 1 Allison, A. C. 1 Alvares, Edson Ribeiro 1 Alves, Marcelo Muniz Silva 1 Balaba, Irina Nikolaevna 1 Batista, Eliezer 1 Beaulieu, Raymond A. 1 Bell, Allen D. 1 Berele, Allan 1 Blass, Andreas Raphael 1 Bolotashvili, G. G. 1 Bouyssou, Denis 1 Brendle, Jörg 1 Brian, William Rea 1 Bulacu, Daniel 1 Cai, Chuanren 1 Caruvana, Christopher 1 Castella, Dominique 1 Cavagnaro, Daniel R. 1 Ceretto, Daniel 1 Chen, Huixiang ...and 177 more Authors all top 5 #### Cited in 78 Serials 49 Journal of Algebra 18 Journal of Mathematical Psychology 14 Proceedings of the American Mathematical Society 11 Journal of Pure and Applied Algebra 10 Communications in Algebra 10 Israel Journal of Mathematics 10 Transactions of the American Mathematical Society 5 Journal of Mathematical Physics 5 Mathematische Zeitschrift 5 Mathematical Social Sciences 4 Bulletin of the Australian Mathematical Society 4 Advances in Mathematics 4 Annals of the Institute of Statistical Mathematics 4 Communications in Statistics. Theory and Methods 3 Mathematical Proceedings of the Cambridge Philosophical Society 3 Theory and Decision 3 Journal of Mathematical Sciences (New York) 2 The Canadian Journal of Statistics 2 Communications in Mathematical Physics 2 Discrete Mathematics 2 Journal of Mathematical Analysis and Applications 2 Metrika 2 Algebra and Logic 2 Archiv der Mathematik 2 Glasgow Mathematical Journal 2 Journal of Computational and Applied Mathematics 2 Journal of Statistical Planning and Inference 2 Semigroup Forum 2 Acta Mathematica Hungarica 2 Acta Applicandae Mathematicae 2 Annals of Physics 2 Pattern Recognition 2 Acta Mathematica Sinica. English Series 2 Journal of Algebra and its Applications 2 Discrete Optimization 1 Discrete Applied Mathematics 1 Journal of Computational Physics 1 Mathematical Notes 1 Physics Letters. A 1 Psychometrika 1 Rocky Mountain Journal of Mathematics 1 Journal of Geometry and Physics 1 Fundamenta Mathematicae 1 Information Sciences 1 Publications Mathématiques 1 Journal of Multivariate Analysis 1 Manuscripta Mathematica 1 Proceedings of the Edinburgh Mathematical Society. Series II 1 Rendiconti del Seminario Matematico della Università di Padova 1 Studia Logica 1 Statistics & Probability Letters 1 Order 1 Science in China. Series A 1 Machine Learning 1 International Journal of Algebra and Computation 1 Journal of Risk and Uncertainty 1 Numerical Algorithms 1 Aequationes Mathematicae 1 Communications in Statistics. Simulation and Computation 1 Journal of Statistical Computation and Simulation 1 Acta Mathematica Sinica. New Series 1 Applied Categorical Structures 1 Applied Mathematics. Series B (English Edition) 1 Topology Proceedings 1 Journal of Multi-Criteria Decision Analysis 1 Transformation Groups 1 Representation Theory 1 Taiwanese Journal of Mathematics 1 Algebras and Representation Theory 1 Journal of Machine Learning Research (JMLR) 1 Central European Journal of Mathematics 1 Annali dell’Università di Ferrara. Sezione VII. Scienze Matematiche 1 Mathematics in Computer Science 1 Optimization Letters 1 Algebra & Number Theory 1 European Journal of Pure and Applied Mathematics 1 São Paulo Journal of Mathematical Sciences 1 European Journal for Philosophy of Science all top 5 #### Cited in 37 Fields 149 Associative rings and algebras (16-XX) 28 Game theory, economics, finance, and other social and behavioral sciences (91-XX) 25 Statistics (62-XX) 22 Nonassociative rings and algebras (17-XX) 16 Group theory and generalizations (20-XX) 13 Category theory; homological algebra (18-XX) 12 Order, lattices, ordered algebraic structures (06-XX) 10 Mathematical logic and foundations (03-XX) 8 Quantum theory (81-XX) 7 Convex and discrete geometry (52-XX) 7 Computer science (68-XX) 6 Operations research, mathematical programming (90-XX) 5 General algebraic systems (08-XX) 5 Commutative algebra (13-XX) 5 Linear and multilinear algebra; matrix theory (15-XX) 4 Combinatorics (05-XX) 4 Topological groups, Lie groups (22-XX) 4 Measure and integration (28-XX) 4 General topology (54-XX) 4 Numerical analysis (65-XX) 3 General and overarching topics; collections (00-XX) 3 Ordinary differential equations (34-XX) 3 Partial differential equations (35-XX) 3 Functional analysis (46-XX) 2 Algebraic geometry (14-XX) 2 $$K$$-theory (19-XX) 2 Difference and functional equations (39-XX) 2 Differential geometry (53-XX) 2 Probability theory and stochastic processes (60-XX) 2 Biology and other natural sciences (92-XX) 1 History and biography (01-XX) 1 Dynamical systems and ergodic theory (37-XX) 1 Sequences, series, summability (40-XX) 1 Abstract harmonic analysis (43-XX) 1 Manifolds and cell complexes (57-XX) 1 Statistical mechanics, structure of matter (82-XX) 1 Information and communication theory, circuits (94-XX)
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.45397260785102844, "perplexity": 8965.125548281609}, "config": {"markdown_headings": true, "markdown_code": false, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-21/segments/1620243989614.9/warc/CC-MAIN-20210511122905-20210511152905-00504.warc.gz"}
http://mathematica.stackexchange.com/questions/19288/summing-tensors-in-mathematica
# Summing tensors in mathematica How do I perform the following summation in mathematica? $$\Sigma_{m=1}^5 e_{ijklm}A^{mn}$$ I have the $e_{ijklm}$ tensor of rank 5 in 5 dimension as a array and $A^{mn}$ as a 5x5 matrix. - Since the sum goes over the last index of $e$ and the first index of $A$, it is directly done by using Dot: dim = 5; e = Array[\[ScriptE], Table[dim, {dim}]]; a = Array[\[ScriptA], Table[dim, {2}]]; c = e.a; Here I defined the arrays with the appropriate dimensions but suppressed the output because it's too long for five dimensions. Another interesting alternative for more general sums is what I mentioned in this answer, but it requires version 9: TensorContract[TensorProduct[e, a], {dim, dim+1}] == c True Here the {dim, dim+1} are just the last index of the first factor and the first index of the second factor. The latter can be generalized to sums over say k etc. -
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 1, "x-ck12": 0, "texerror": 0, "math_score": 0.8990561962127686, "perplexity": 1037.1879273783302}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-49/segments/1416400380233.64/warc/CC-MAIN-20141119123300-00015-ip-10-235-23-156.ec2.internal.warc.gz"}
https://eprints.soton.ac.uk/30079/
The University of Southampton University of Southampton Institutional Repository # Exact calculations for the one-sided studentized range test for testing against a simple ordered alternative Record type: Article Hayter (J. Amer. Statist. Assoc. 85 (1990)) proposed a one-sided studentized range test (OSRT) for testing the null hypothesis H0: ?1 = … = ?k against the simple ordered alternative Ha: ?1 ? … ? ?k in a one-way layout. The size and power of this test, however, are quite difficult to calculate. The method suggested in Hayter (J. Amer. Statist. Assoc. 85 (1990)) for critical point computation works only for small k. The method introduced in this paper works for much larger k, and also works for the power calculation. Some tables of critical points and minimum sample sizes satisfying certain power requirements are provided. Full text not available from this repository. ## Citation Hayter, A.J. and Liu, W. (1996) Exact calculations for the one-sided studentized range test for testing against a simple ordered alternative Computational Statistics & Data Analysis, 22, (1), pp. 17-25. Published date: 1996 Keywords: analysis of variance, critical point calculation, order restricted inference, multiple comparisons Organisations: Statistics ## Identifiers Local EPrints ID: 30079 URI: http://eprints.soton.ac.uk/id/eprint/30079 ISSN: 0167-9473 PURE UUID: 00aa448e-f208-4d69-8e6a-9eca4b24c5b3 ORCID for W. Liu: orcid.org/0000-0002-4719-0345 ## Catalogue record Date deposited: 15 Mar 2007 ## Contributors Author: A.J. Hayter Author: W. Liu
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8048139214515686, "perplexity": 7887.2471843639005}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-30/segments/1500549425407.14/warc/CC-MAIN-20170725222357-20170726002357-00513.warc.gz"}
https://arxiv.org/abs/1611.00840
cs.DS # Title:Below all subsets for Minimal Connected Dominating Set Abstract: A vertex subset $S$ in a graph $G$ is a dominating set if every vertex not contained in $S$ has a neighbor in $S$. A dominating set $S$ is a connected dominating set if the subgraph $G[S]$ induced by $S$ is connected. A connected dominating set $S$ is a minimal connected dominating set if no proper subset of $S$ is also a connected dominating set. We prove that there exists a constant $\varepsilon > 10^{-50}$ such that every graph $G$ on $n$ vertices has at most $O(2^{(1-\varepsilon)n})$ minimal connected dominating sets. For the same $\varepsilon$ we also give an algorithm with running time $2^{(1-\varepsilon)n}\cdot n^{O(1)}$ to enumerate all minimal connected dominating sets in an input graph $G$. Comments: 13 pages Subjects: Data Structures and Algorithms (cs.DS); Discrete Mathematics (cs.DM); Combinatorics (math.CO) Cite as: arXiv:1611.00840 [cs.DS] (or arXiv:1611.00840v1 [cs.DS] for this version) ## Submission history From: Michał Pilipczuk [view email] [v1] Wed, 2 Nov 2016 23:21:18 UTC (19 KB)
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.585865318775177, "perplexity": 328.40397930615546}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-39/segments/1568514572471.35/warc/CC-MAIN-20190916015552-20190916041552-00189.warc.gz"}
http://www.physicsforums.com/showpost.php?p=3051758&postcount=5
Thread: Percent Composition View Single Post P: 18 Quote by sjb-2812 No, if you are happy that the chemical formula you have given is correct, the maths is also correct. However, see e.g. http://en.wikipedia.org/w/index.php?...ldid=399297621 or similar. But the formula has to be (CH3)2 SO. 24 / 78 x 100 = 30.7 % - thanks a lot and happy holidays
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9302353858947754, "perplexity": 1853.0336280631377}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-35/segments/1409535921318.10/warc/CC-MAIN-20140909055359-00488-ip-10-180-136-8.ec2.internal.warc.gz"}
https://brilliant.org/problems/energy-of-anti-neutrino/
# Energy of anti-neutrino The $$\beta$$-decay process, discovered around 1900, is basically the decay of a neutron $$(n)$$. In the laboratory, a proton $$(p)$$ and an electron $$( e^- )$$ are observed as the decay products of the neutron. Therefore, considering the decay of a neutron as a two-body decay process, it was predicted theoretically that the kinetic energy of the electron should be a constant. But experimentally, it was observed that the electron kinetic energy has a continuous spectrum. Considering a three-body decay process, i.e. $$n \rightarrow p + e^- + (\overline{v_e})$$, around 1930, Pauli explained the observed electron energy spectrum. Assuming the anti-neutrino $$( \overline{v_e} )$$ to be massless and possessing negligible energy, and the neutron to be at rest, momentum and energy conservation principles are applied. From this calculation, the maximum kinetic energy of the electron is $$0.8 \times 10^6$$ eV. The kinetic energy carried by the proton is only the recoil energy. What is the maximum energy of the anti-neutrino? ×
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9913627505302429, "perplexity": 335.5522004593652}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-05/segments/1516084890928.82/warc/CC-MAIN-20180121234728-20180122014728-00592.warc.gz"}
https://cityofmclemoresville.com/pytorch-adadelta/
Checkout this video: ## Introduction Adadelta is similar to Adagrad in that it works well with sparse data (data that has many 0 values). Both methods are also resistant to exploding gradients (gradients that get too large and cause numerical instability). Adadelta is a Learning algorithm for training neural networks. It is based on the intuition that the gradient of a loss function can be used to update weights in such a way as to minimize the value of the loss function. Adadelta has been shown to be more effective than other Learning algorithms, such as Adagrad and RMSProp, in terms of both training time and accuracy. Adadelta is an extension of Adagrad that seeks to eliminate its aggressive, monotonically decreasing learning rate. Instead of each parameter having its own learning rate, it maintains a decaying average of the squared gradients similar to RMSProp. Similarly to RMSProp, Adadelta uses moving averages of both the gradients and the second moments of the gradients to scale the learning rate. However, unlike RMSProp, which uses the last gradient and second moment estimate, Adadelta uses all historical estimates when computing the current learning rate [1]. The authors proposed two methods for initializing the parameters $\epsilon$ and $\rho$. The first is to initialize both parameters to 0.9 and let them decay by 0.95 every 1000 training iterations [1]. The other is to initialize $\epsilon$ to a very small value such as $10^{-6}$ and $\rho$ according to \begin{equation*} \rho = \begin{cases} 0.9 & \text{if } T \leqslant 5 \\ 1 – \frac{1}{2T} & \text{if } 5 One advantage of Adadelta over other optimizers is that it does not require a manually set learning rate – learning rates are automatically adjusted as training progresses. Another advantage of Adadelta vs. other optimizers is that it tends to converge more quickly and sometimes even reaches a higher final accuracy than other optimizers (although not always – it largely depends on the problem and the model being optimizing). Adadelta has a couple of disadvantages. Firstly, it requires more memory than other optimizers because it needs to store all the squared gradients in memory. Secondly, Adadelta is not well suited for mini-batch training because the squared gradients are calculated using the entire dataset instead of just the mini-batch. Lastly, Adadelta does not work well with sparse data (data that has many zeros). Adadelta is a parameter update rule that is used in training neural networks. It was proposed by Matthew Zeiler in 2012. – Neural machine translation – Image Captioning – Speech Recognition ## Conclusion This concludes our tutorial on Adadelta – a Pytorch implementation. We have seen how to implement Adadelta from scratch and how to use it in practice. We have also looked at some of the important parameters that need to be tuned for Adadelta. Thank you for reading!
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.7816529870033264, "perplexity": 579.8409503985355}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2023-14/segments/1679296948951.4/warc/CC-MAIN-20230329054547-20230329084547-00404.warc.gz"}
https://research.vit.ac.in/publication/soft-switched-dc-dc-converter-with-current
X Soft-switched DC-DC converter with current and voltage doubler Dharmesh P.D., Published in Informa UK Limited 2013 Volume: 10 Issue: 1 Pages: 1 - 10 Abstract This paper proposes a method that consists of a phase-shift converter with a current doubler rectifier on the output side. The phase-shift converter operates at a frequency as high as 20-25 kHz (depending on the requirement of the application) to improve the power density of the converter using unity turns ratio in the isolation transformer. The size of the circuit is smaller for the same power rating. Ferrite core transformer is used instead of the conventional one, which is bulky and leads to a very large core loss. Unlike the conventional method, the number of turns of the transformer is reduced and the overall power density is increased. The current doubler rectifier doubles the input current as needed for a few vital applications. The circuit for the phase-shift converter with the current doubler was simulated using PSPICE software. For an input current of 4 A, an output of 10.2 A was obtained, which eventually settled at nearly 8.5 A. A hardware model of the proposed method validates the simulation results.
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8885353803634644, "perplexity": 966.6319642399594}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2023-14/segments/1679296948932.75/warc/CC-MAIN-20230329023546-20230329053546-00039.warc.gz"}
http://beesbuzz.biz/comics/?id=646
## Journal: 2014 Resolutions # Two versions of this one, both with and without the now-traditional white tummy patch. When I first designed this particular self-portrayal character it was patchless, but I decided to add it a couple years later (and the horns came even later). Now I’m waffling on it again. Which one do you think looks better?
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8877328634262085, "perplexity": 2853.787173089204}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2023-14/segments/1679296948932.75/warc/CC-MAIN-20230329023546-20230329053546-00211.warc.gz"}
http://cran.salud.gob.sv/web/packages/pkgdown/vignettes/metadata.html
Package authors can customize the metadata used by Twitter and the Open Graph protocol for rich social media cards. In addition to specifying an alternate description for the package and any individual articles, you may also choose the preview image shown and the style of card used on Twitter. You can preview and validate the appearance of the social media cards with online tools: ## Site-wide customization Metadata for the entire pkgdown website can be specified in the site’s _pkgdown.yml configuration file in the home and template: opengraph sections: home: title: An R package for pool-noodle discovery template: opengraph: image: src: man/figures/card.png alt: "Pool noodles configured to form the word poolnoodlr" site: "@rstudio" card: summary_large_image The home: title and home: description fields override the Title and Description fields in the package DESCRIPTION. It’s good practice to set these fields to make your package documentation easier to find via search, rather than sticking with the title and description needed by CRAN. The template: opengraph section allows you to further customize the social media card. • image: By default, pkgdown uses the package’s logo for the card image (if one exists). Use image to specify an alternative image for the social media cards of pages in your pkgdown site. • src: A fully qualified URL to a media card image, or a relative path to an image stored in the package. The src field is required if image is specified. • alt: Alternative text describing the image for screen readers and other situations where your social media card image cannot be displayed. • twitter: You can specify the Twitter accounts associated with your package and the style of social media card that Twitter will display. • creator: Typically, the Twitter handle of the author of the package or article. • site: The Twitter handle of the organization affiliated with the package author or sponsoring the package development. • If only one of creator or site are included, the provided value will be used for both fields. • card: The style of social media card that Twitter will display. For pkgdown sites, the most relevant options are summary_large_image, featuring a large image over the page title and description, or summary, featuring a small square image inline and to the left of the page title and description. Articles and vignettes rendered as articles by pkgdown can have individually customized metadata and social media cards. title: "Introduction to poolnoodlr" description: "A brief introduction to pool noodles in R." author: "Mara Averick" opengraph: image: src: "http://example.com/pkg/batpig.png" card: summary creator: "@dataandme" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Introduction to poolnoodlr} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} Use the title, description, and author fields to specify the title, description, and (optional) author of the vignette or article. • The title field is used as the title of your article in your pkgdown site and should always be included. • Both title and description are used by pkgdown for the page’s social media card. If description is not included in the article’s YAML front matter, then the name of the package is used instead. The description is also displayed on the articles index. • The author field is only used in the text of the vignette or article. How the author name is displayed depends on the output format. In articles, the opengraph section works in the same way as the site-wide template: opengraph settings, but is only applied to the article or vignette. This allows you to specify social media card preview images for individual articles, or to associate an article with a particular Twitter account. If not specified, the opengraph settings from the site-wide configuration are used.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.18063010275363922, "perplexity": 3382.380838706726}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-24/segments/1590348492427.71/warc/CC-MAIN-20200605014501-20200605044501-00250.warc.gz"}
https://mesfind.github.io/R-ecology-lesson/01-intro-to-r.html
### Learning Objectives • Define the following terms as they relate to R: object, assign, call, function, arguments, options. • Assign values to objects in R. • Learn how to name objects • Use comments to inform script. • Solve simple arithmetic operations in R. • Call functions and use arguments to change their default options. • Inspect the content of vectors and manipulate their content. • Subset and extract values from vectors. • Analyze vectors with missing data. ## Creating objects in R You can get output from R simply by typing math in the console: 3 + 5 12 / 7 However, to do useful and interesting things, we need to assign values to objects. To create an object, we need to give it a name followed by the assignment operator <-, and the value we want to give it: weight_kg <- 55 <- is the assignment operator. It assigns values on the right to objects on the left. So, after executing x <- 3, the value of x is 3. The arrow can be read as 3 goes into x. For historical reasons, you can also use = for assignments, but not in every context. Because of the slight differences in syntax, it is good practice to always use <- for assignments. In RStudio, typing Alt + - (push Alt at the same time as the - key) will write <- in a single keystroke in a PC, while typing Option + - (push Option at the same time as the - key) does the same in a Mac. Objects can be given any name such as x, current_temperature, or subject_id. You want your object names to be explicit and not too long. They cannot start with a number (2x is not valid, but x2 is). R is case sensitive (e.g., weight_kg is different from Weight_kg). There are some names that cannot be used because they are the names of fundamental functions in R (e.g., if, else, for, see here for a complete list). In general, even if it’s allowed, it’s best to not use other function names (e.g., c, T, mean, data, df, weights). If in doubt, check the help to see if the name is already in use. It’s also best to avoid dots (.) within an object name as in my.dataset. There are many functions in R with dots in their names for historical reasons, but because dots have a special meaning in R (for methods) and other programming languages, it’s best to avoid them. It is also recommended to use nouns for object names, and verbs for function names. It’s important to be consistent in the styling of your code (where you put spaces, how you name objects, etc.). Using a consistent coding style makes your code clearer to read for your future self and your collaborators. In R, three popular style guides are Google’s, Jean Fan’s and the tidyverse’s. The tidyverse’s is very comprehensive and may seem overwhelming at first. You can install the lintr package to automatically check for issues in the styling of your code. ### Objects vs. variables What are known as objects in R are known as variables in many other programming languages. Depending on the context, object and variable can have drastically different meanings. However, in this lesson, the two words are used synonymously. For more information see: https://cran.r-project.org/doc/manuals/r-release/R-lang.html#Objects When assigning a value to an object, R does not print anything. You can force R to print the value by using parentheses or by typing the object name: weight_kg <- 55 # doesn't print anything (weight_kg <- 55) # but putting parenthesis around the call prints the value of weight_kg weight_kg # and so does typing the name of the object Now that R has weight_kg in memory, we can do arithmetic with it. For instance, we may want to convert this weight into pounds (weight in pounds is 2.2 times the weight in kg): 2.2 * weight_kg We can also change an object’s value by assigning it a new one: weight_kg <- 57.5 2.2 * weight_kg This means that assigning a value to one object does not change the values of other objects For example, let’s store the animal’s weight in pounds in a new object, weight_lb: weight_lb <- 2.2 * weight_kg and then change weight_kg to 100. weight_kg <- 100 What do you think is the current content of the object weight_lb? 126.5 or 220? The comment character in R is #, anything to the right of a # in a script will be ignored by R. It is useful to leave notes, and explanations in your scripts. RStudio makes it easy to comment or uncomment a paragraph: after selecting the lines you want to comment, press at the same time on your keyboard Ctrl + Shift + C. If you only want to comment out one line, you can put the cursor at any location of that line (i.e. no need to select the whole line), then press Ctrl + Shift + C. ### Challenge What are the values after each statement in the following? mass <- 47.5 # mass? age <- 122 # age? mass <- mass * 2.0 # mass? age <- age - 20 # age? mass_index <- mass/age # mass_index? ### Functions and their arguments Functions are “canned scripts” that automate more complicated sets of commands including operations assignments, etc. Many functions are predefined, or can be made available by importing R packages (more on that later). A function usually gets one or more inputs called arguments. Functions often (but not always) return a value. A typical example would be the function sqrt(). The input (the argument) must be a number, and the return value (in fact, the output) is the square root of that number. Executing a function (‘running it’) is called calling the function. An example of a function call is: b <- sqrt(a) Here, the value of a is given to the sqrt() function, the sqrt() function calculates the square root, and returns the value which is then assigned to the object b. This function is very simple, because it takes just one argument. The return ‘value’ of a function need not be numerical (like that of sqrt()), and it also does not need to be a single item: it can be a set of things, or even a dataset. We’ll see that when we read data files into R. Arguments can be anything, not only numbers or filenames, but also other objects. Exactly what each argument means differs per function, and must be looked up in the documentation (see below). Some functions take arguments which may either be specified by the user, or, if left out, take on a default value: these are called options. Options are typically used to alter the way the function operates, such as whether it ignores ‘bad values’, or what symbol to use in a plot. However, if you want something specific, you can specify a value of your choice which will be used instead of the default. Let’s try a function that can take multiple arguments: round(). round(3.14159) #> [1] 3 Here, we’ve called round() with just one argument, 3.14159, and it has returned the value 3. That’s because the default is to round to the nearest whole number. If we want more digits we can see how to do that by getting information about the round function. We can use args(round) or look at the help for this function using ?round. args(round) #> function (x, digits = 0) #> NULL ?round We see that if we want a different number of digits, we can type digits=2 or however many we want. round(3.14159, digits = 2) #> [1] 3.14 If you provide the arguments in the exact same order as they are defined you don’t have to name them: round(3.14159, 2) #> [1] 3.14 And if you do name the arguments, you can switch their order: round(digits = 2, x = 3.14159) #> [1] 3.14 It’s good practice to put the non-optional arguments (like the number you’re rounding) first in your function call, and to specify the names of all optional arguments. If you don’t, someone reading your code might have to look up the definition of a function with unfamiliar arguments to understand what you’re doing. ## Vectors and data types A vector is the most common and basic data type in R, and is pretty much the workhorse of R. A vector is composed by a series of values, which can be either numbers or characters. We can assign a series of values to a vector using the c() function. For example we can create a vector of animal weights and assign it to a new object weight_g: weight_g <- c(50, 60, 65, 82) weight_g A vector can also contain characters: animals <- c("mouse", "rat", "dog") animals The quotes around “mouse”, “rat”, etc. are essential here. Without the quotes R will assume there are objects called mouse, rat and dog. As these objects don’t exist in R’s memory, there will be an error message. There are many functions that allow you to inspect the content of a vector. length() tells you how many elements are in a particular vector: length(weight_g) length(animals) An important feature of a vector, is that all of the elements are the same type of data. The function class() indicates the class (the type of element) of an object: class(weight_g) class(animals) The function str() provides an overview of the structure of an object and its elements. It is a useful function when working with large and complex objects: str(weight_g) str(animals) You can use the c() function to add other elements to your vector: weight_g <- c(weight_g, 90) # add to the end of the vector weight_g <- c(30, weight_g) # add to the beginning of the vector weight_g In the first line, we take the original vector weight_g, add the value 90 to the end of it, and save the result back into weight_g. Then we add the value 30 to the beginning, again saving the result back into weight_g. We can do this over and over again to grow a vector, or assemble a dataset. As we program, this may be useful to add results that we are collecting or calculating. An atomic vector is the simplest R data type and is a linear vector of a single type. Above, we saw 2 of the 6 main atomic vector types that R uses: "character" and "numeric" (or "double"). These are the basic building blocks that all R objects are built from. The other 4 atomic vector types are: • "logical" for TRUE and FALSE (the boolean data type) • "integer" for integer numbers (e.g., 2L, the L indicates to R that it’s an integer) • "complex" to represent complex numbers with real and imaginary parts (e.g., 1 + 4i) and that’s all we’re going to say about them • "raw" for bitstreams that we won’t discuss further You can check the type of your vector using the typeof() function and inputting your vector as the argument. Vectors are one of the many data structures that R uses. Other important ones are lists (list), matrices (matrix), data frames (data.frame), factors (factor) and arrays (array). ### Challenge • We’ve seen that atomic vectors can be of type character, numeric (or double), integer, and logical. But what happens if we try to mix these types in a single vector? R implicitly converts them to all be the same type • What will happen in each of these examples? (hint: use class() to check the data type of your objects): num_char <- c(1, 2, 3, "a") num_logical <- c(1, 2, 3, TRUE) char_logical <- c("a", "b", "c", TRUE) tricky <- c(1, 2, 3, "4") • Why do you think it happens? Vectors can be of only one data type. R tries to convert (coerce) the content of this vector to find a “common denominator” that doesn’t lose any information. • How many values in combined_logical are "TRUE" (as a character) in the following example: num_logical <- c(1, 2, 3, TRUE) char_logical <- c("a", "b", "c", TRUE) combined_logical <- c(num_logical, char_logical) • You’ve probably noticed that objects of different types get converted into a single, shared type within a vector. In R, we call converting objects from one class into another class coercion. These conversions happen according to a hierarchy, whereby some types get preferentially coerced into other types. Can you draw a diagram that represents the hierarchy of how these data types are coerced? logical → numeric → character ← logical ## Subsetting vectors If we want to extract one or several values from a vector, we must provide one or several indices in square brackets. For instance: animals <- c("mouse", "rat", "dog", "cat") animals[2] #> [1] "rat" animals[c(3, 2)] #> [1] "dog" "rat" We can also repeat the indices to create an object with more elements than the original one: more_animals <- animals[c(1, 2, 3, 2, 1, 4)] more_animals #> [1] "mouse" "rat" "dog" "rat" "mouse" "cat" R indices start at 1. Programming languages like Fortran, MATLAB, Julia, and R start counting at 1, because that’s what human beings typically do. Languages in the C family (including C++, Java, Perl, and Python) count from 0 because that’s simpler for computers to do. ### Conditional subsetting Another common way of subsetting is by using a logical vector. TRUE will select the element with the same index, while FALSE will not: weight_g <- c(21, 34, 39, 54, 55) weight_g[c(TRUE, FALSE, TRUE, TRUE, FALSE)] #> [1] 21 39 54 Typically, these logical vectors are not typed by hand, but are the output of other functions or logical tests. For instance, if you wanted to select only the values above 50: weight_g > 50 # will return logicals with TRUE for the indices that meet the condition #> [1] FALSE FALSE FALSE TRUE TRUE ## so we can use this to select only the values above 50 weight_g[weight_g > 50] #> [1] 54 55 You can combine multiple tests using & (both conditions are true, AND) or | (at least one of the conditions is true, OR): weight_g[weight_g < 30 | weight_g > 50] #> [1] 21 54 55 weight_g[weight_g >= 30 & weight_g == 21] #> numeric(0) Here, < stands for “less than”, > for “greater than”, >= for “greater than or equal to”, and == for “equal to”. The double equal sign == is a test for numerical equality between the left and right hand sides, and should not be confused with the single = sign, which performs variable assignment (similar to <-). A common task is to search for certain strings in a vector. One could use the “or” operator | to test for equality to multiple values, but this can quickly become tedious. The function %in% allows you to test if any of the elements of a search vector are found: animals <- c("mouse", "rat", "dog", "cat") animals[animals == "cat" | animals == "rat"] # returns both rat and cat #> [1] "rat" "cat" animals %in% c("rat", "cat", "dog", "duck", "goat") #> [1] FALSE TRUE TRUE TRUE animals[animals %in% c("rat", "cat", "dog", "duck", "goat")] #> [1] "rat" "dog" "cat" ### Challenge (optional) • Can you figure out why "four" > "five" returns TRUE? When using “>” or “<” on strings, R compares their alphabetical order. Here “four” comes after “five”, and therefore is “greater than” it. ## Missing data As R was designed to analyze datasets, it includes the concept of missing data (which is uncommon in other programming languages). Missing data are represented in vectors as NA. When doing operations on numbers, most functions will return NA if the data you are working with include missing values. This feature makes it harder to overlook the cases where you are dealing with missing data. You can add the argument na.rm=TRUE to calculate the result while ignoring the missing values. heights <- c(2, 4, 4, NA, 6) mean(heights) max(heights) mean(heights, na.rm = TRUE) max(heights, na.rm = TRUE) If your data include missing values, you may want to become familiar with the functions is.na(), na.omit(), and complete.cases(). See below for examples. ## Extract those elements which are not missing values. heights[!is.na(heights)] ## Returns the object with incomplete cases removed. The returned object is an atomic vector of type "numeric" (or "double"). na.omit(heights) ## Extract those elements which are complete cases. The returned object is an atomic vector of type "numeric" (or "double"). heights[complete.cases(heights)] Recall that you can use the typeof() function to find the type of your atomic vector. ### Challenge 1. Using this vector of heights in inches, create a new vector with the NAs removed. heights <- c(63, 69, 60, 65, NA, 68, 61, 70, 61, 59, 64, 69, 63, 63, NA, 72, 65, 64, 70, 63, 65) 2. Use the function median() to calculate the median of the heights vector. 3. Use R to figure out how many people in the set are taller than 67 inches. heights <- c(63, 69, 60, 65, NA, 68, 61, 70, 61, 59, 64, 69, 63, 63, NA, 72, 65, 64, 70, 63, 65) # 1. heights_no_na <- heights[!is.na(heights)] # or heights_no_na <- na.omit(heights) # 2. median(heights, na.rm = TRUE) # 3. heights_above_67 <- heights_no_na[heights_no_na > 67] length(heights_above_67) Now that we have learned how to write scripts, and the basics of R’s data structures, we are ready to start working with the Portal dataset we have been using in the other lessons, and learn about data frames. Page built on: 📆 2018-06-12 ‒ 🕢 21:01:41
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.2631515562534332, "perplexity": 1362.0760275100693}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-31/segments/1627046152156.49/warc/CC-MAIN-20210726215020-20210727005020-00409.warc.gz"}
https://socialcar-project.eu/en/what-2-numbers-add-up-to-get-17-and-multiply-to-get-42.14402.html
17 # What 2 numbers add up to get 17 and multiply to get 42?? $x_{1,2} = \frac{ -17 \pm \sqrt{ 17 ^2 - 4 \cdot 1 \cdot 42} }{ 2 \cdot 1 }$ $x_{1,2} = \frac{ -17 \pm \sqrt{ 121 } }{ 2 }$ $x_1 = \frac{ -17~+~\sqrt{ 121 } }{ 2 } = -3$ $x_2 = \frac{ -17~-~\sqrt{ 121 } }{ 2 } = -14$ And so $x^2+17x+42 = (x+3)(x+14) \to x = -3, -14$ But that is what x equals to when you solve it, but by doing it this way we just have to give the opposite sign to the numbers. So the two numbers are 3 and 14. They add to 17 and multiply to 42. Hope that helped :)
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.6481965184211731, "perplexity": 242.48585728148907}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-21/segments/1652662588661.65/warc/CC-MAIN-20220525151311-20220525181311-00017.warc.gz"}
https://socratic.org/questions/what-are-the-critical-values-if-any-of-f-x-x-2sinx-sinxcos-2x-in-0-pi
Calculus Topics # What are the critical values, if any, of f(x)= x^2sinx +sinxcos^2x in [0,pi]? Jul 11, 2018 $x \approx 2.3339$ #### Explanation: Given: $f \left(x\right) = {x}^{2} \sin x + \sin x {\cos}^{2} x \text{ from } \left[0 , \pi\right]$ One way to find the critical values is to graph and find the maximum using a graphing calculator : relative max: $\left(2.333931 , 4.2818209\right)$ graph{x^2sin x + sinx( cos x)^2 [-2, 3.14159, -2, 5]} The second way is to find the first derivative and set it equal to zero and solve for $x$: Find the first derivative using the product rule: $\left(u v\right) ' = u v ' + v u '$ For the first part of the function let u = x^2; " "u' = 2x, " "v = sinx; " "v' = cos x $\frac{d}{\mathrm{dx}} \left({x}^{2} \sin x\right) = {x}^{2} \cos x + 2 x \sin x$ Let u = sin x; " "u' = cos x v = (cos x)^2; " "v' = 2cos x (-sin x) = -2cos x sin x $\frac{d}{\mathrm{dx}} \left(\sin x {\cos}^{2} x\right) = \cos x \left(- 2 \cos x \sin x\right) + {\left(\cos x\right)}^{2} \cos x$ $= - 2 {\cos}^{2} x \sin x + {\cos}^{3} x$ $f ' \left(x\right) = {x}^{2} \cos x + 2 x \sin x - 2 {\cos}^{2} x \sin x + {\cos}^{3} x$ Find critical values : $f ' = 0$ $f ' \left(x\right) = {x}^{2} \cos x + 2 x \sin x - 2 {\cos}^{2} x \sin x + {\cos}^{3} x = 0$ This is a difficult problem. The easiest way to solve is to use a graphing calculator to graph the derivative and then solve for the zero ($x$-intercept). graph{x^2 cos x + 2x sin x -2(cos x)^2 sin x + (cos x)^3 [-2, 3.14159, -2, 5]} $\text{zero at } x \approx 2.3339$ ##### Impact of this question 125 views around the world
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 16, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8931755423545837, "perplexity": 1718.488416918522}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-04/segments/1547584331733.89/warc/CC-MAIN-20190123105843-20190123131843-00054.warc.gz"}
https://www.dsprelated.com/showarticle/1045.php
# Sinusoidal Frequency Estimation Based on Time-Domain Samples The topic of estimating a noise-free real or complex sinusoid's frequency, based on fast Fourier transform (FFT) samples, has been presented in recent blogs here on dsprelated.com. For completeness, it's worth knowing that simple frequency estimation algorithms exist that do not require FFTs to be performed . Below I present three frequency estimation algorithms that use time-domain samples, and illustrate a very important principle regarding so called "exact" mathematically-derived DSP algorithms. Here, as shown in Figure 1, we assume a time-domain real sinusoidal input sequence is represented by: $$x(n) = Asin(2{\pi}nf/f_s + \phi)$$ where n is the integer time-domain index and the sinusoid's frequency f and the sample rate fs are measured in Hz. Figure 1: Consecutive periodically-spaced x(n) samples of a real-valued sinusoidal sequence. OK, let's consider three time-domain frequency estimation algorithms that estimate sinusoidal signal frequency based on the x(n) samples in Figure 1. Real 3-Sample Frequency Estimation DSP guru Clay Turner provides a method for estimating a real-valued sinusoid's frequency based on three time domain samples [1]. Turner's Real 3-Sample algorithm is: Real 3-Sample:   $f = \frac{f_s}{2\pi} cos^{-1}(\frac{x(0)+x(2)}{2x(1)}) \tag{1}$ After experimenting with Eq. (1) I learned it can give correct results assuming the 3-Sample x(n) input sequence satisfies the following restrictions: * x(n) samples are real-valued and noise free, * Nyquist sampling criterion is satisfied, * Peak amplitude of x(n) is constant, * DC component of x(n) is zero, * Sample x(1) is not equal to zero. Real 4-Sample Frequency Estimation Turner also provides a method for estimating a real-valued sinusoid's frequency based on four time domain samples [2]. Turner's Real 4-Sample algorithm is: Real 4-Sample: $f = \frac{f_s}{2\pi} \cdot cos^{-1}[\frac{1}{2}(\frac{x(3)-x(0)}{x(2)-x(1)}-1)] \tag{2}$ Equation (2) can provide correct results assuming the 4-Sample x(n) input sequence satisfies the following restrictions: * x(n) samples are real-valued and noise free, * Nyquist sampling criterion is satisfied, * Peak amplitude of x(n) is constant, * x(1) ≠ x(2). Unlike Eq. (1), the Eq. (2) method can be applied when the x(n) sequence rides on a DC bias. Complex 2-Sample Frequency Estimation DSP wizard Dirk Bell recently showed me his two-sample method for estimating the frequency of a complex-valued noise-free sinusoid (reminiscent of complex-valued FM demodulation methods). Bell's algorithm to compute frequency f is: $$f = (\frac{f_s}{2\pi}) \cdot tan^{-1}[imag(x(n) \cdot x(n-1)^*)/real(x(n) \cdot x(n-1)^*)] \tag{3}$$ where the '*' symbol means conjugate. The derivation of Eq. (3) is given in Appendix A. Equation (3) can provide correct results assuming the complex $x(n)$ input sequence satisfies the following restrictions: * $x(n) = Me^{j2\pi nf/f_s}$ and noise free, * Nyquist sampling criterion is satisfied. An Important Note Notice that the mathematical derivations of our three algorithms could be called "exact." And by exact I mean no mathematical approximations, such as $sin(x) = x$ for small $x$, were used in their derivations. Algorithm Performance The three time-domain frequency estimation algorithms work well when the input sinusoid is noise free. However, sadly, the Real 3-Sample and Real 4-Sample algorithms do not work well at all if the real-valued input sinusoid is contaminated with noise. (Clay Turner never claimed otherwise.) Figure 2 shows the performance of our three algorithms, for 500 individual frequency estimations, when the 700 Hz input sinusoids have a relatively high signal to noise ratio (SNR) of 60 dB. My "noise" is standard garden variety, wideband, Gaussian-distributed, zero-mean, random noise samples. (The noise sequences used for the real and imaginary parts of the Complex 2-Sample algorithm input signal are independent of each other.) And my definition of input signal SNR is the traditional: $SNR = 10log_{10}(signal variance/noise variance)$. Figure 2: Algorithms' frequency estimation performances when the input sinusoid's frequency is 700 Hz (fs = 8000 Hz) with an SNR of 60 dB. Look carefully at the vertical axes' values when comparing the Figure 2 frequency estimation results. In Figure 2 we see the Real 3-Sample and Real 4-Sample algorithms are very poor at frequency estimation when the input sinusoid is contaminated with noise. (The above Figure 2 performance measurements were made with both 16-bit and floating-point numbers. Those two number formats had essentially the same poor performance.) Figure 2 also shows us the Complex 2-Sample algorithm produces moderately accurate results when the input sinusoid's SNR is 60 dB. Appendix B presents the three algorithms' performances when the input sinusoid's SNR is less than 60 dB. Software Modeling If you decide to model the performance of the above Eq. (1) and Eq. (2) algorithms with noise-contaminated real sinusoids, be aware that those methods can produce invalid complex-valued frequency estimates that should be ignored. That happens when the arguments of the $cos^{-1}()$ functions are, due to noisy $x(n)$ samples, greater than unity. You'll often see this situation when the input sinusoids are low-SNR or low-frequency (relative to the Fs sample rate). Conclusions I've presented three sinusoidal frequency estimation algorithms, Eqs. (1), (2), and (3), that use time-domain samples as opposed to FFT frequency-domain samples. Those algorithms only provide correct frequency estimation results for ideal noise-free input sinusoids. As such, those algorithms are mostly of academic interest only. (No one claimed those algorithms were of significant practical value.) My thoughts on the problems encountered when using the three algorithms in practical DSP applications are given in Appendix C. One point I want to make here is this: Although the Real 3-Sample and Real 4-Sample frequency estimation algorithms are mathematically exact, their performance is very poor in the presence of noise. Thus it is risky to assume that mathematically exact DSP algorithms will always be useful in real-world practical signal processing applications . April 30, 2017 Update: I've continued to learn more about the above Eq. (3). As it turns out, that algorithm is described in the literature of "frequency estimation" and it goes by the name of the "Lank-Reed-Pollon algorithm." In my initial software modeling of Eq. (3) it appeared to me that it provided an unbiased estimate of the frequency of a complex-valued sinusoid. After more thorough modeling I've learned that Eq. (3) is indeed a biased estimator when the input complex-valued sinusoid has a low SNR and is either low in frequency (near zero Hz) or high in frequency (near Fs/2 Hz). This is important because it means that averaging multiple frequency estimation values does not necessarily improve our final frequency estimate's accuracy Acknowledgment I thank Dirk Bell for his very useful suggestions regarding the first draft of this blog. I also thank Cedron Dawg for correcting a notational error in the original version of Appendix A. References [1] Clay Turner, http://www.claysturner.com/dsp/3pointfrequency.pdf [2] Clay Turner, http://www.claysturner.com/dsp/4pointfrequency.pdf Appendix A: Derivation of Eq. (3) Dirk Bell's derivation of his Eq. (3) expression proceeds as follows: Assume the complex-valued sinusoid of frequency $f$ is described by: $$x(n) = Me^{j2\pi nf/f_s}.$$ We can, where the '*' symbol means conjugation, write: $$P = x(n) \cdot x(n-1)^* = Me^{j2\pi nf/f_s} \cdot Me^{-j2\pi (n-1)f/f_s}$$ $$= M^2 \cdot e^{j2\pi [n-(n-1)]f/f_s} = M^2 \cdot e^{j2\pi f/f_s}$$ The radian angle of product P is: $$arg(P) = 2\pi f/f_s$$ $$= tan^{-1}[imag(x(n) \cdot x(n-1)^*)/real(x(n) \cdot x(n-1)^*)].$$ Arbitrarily assuming index $n = 1$, our desired expression for $f$ is: $$f = (\frac{f_s}{2\pi}) \cdot tan^{-1}[imag(P)/real(P)]$$ $$= (\frac{f_s}{2\pi}) \cdot tan^{-1}[imag(x(n) \cdot x(n-1)^*)/real(x(n) \cdot x(n-1)^*)].$$ Appendix B: Algorithms' Performances With Low SNR Input Signals Figure B-1 shows the performance of the algorithms, for 500 individual frequency estimations, when the 700 Hz input sinusoids have an SNR of 30 dB. In this scenario the three algorithms have very poor performance. Figure B-1: Algorithms' frequency estimation performances when the input sinusoid's frequency is 700 Hz (fs = 8000 Hz) with a signal SNR of 30 dB. Figure B-2 shows the performance of the Real 3-Sample and Complex 2-Sample algorithms, for 500 individual frequency estimations, when the 700 Hz input sinusoids have an SNR of 10 dB. (For reference, the top panel of Figure B-2 shows the difference between a noise-free 700 Hz sinusoid and an SNR = 10 dB sinusoid.) In this scenario the two algorithms have terribly poor performance. Figure B-2: Algorithm performance: (a) comparison of a noise-free and a noisy input sinusoid; (b) algorithms' frequency estimation performances when the input sinusoid's frequency is 700 Hz (fs = 8000 Hz) with a signal SNR of 10 dB. Appendix C: Practical Problems of the Three Frequency Estimation Algorithms According to my software modeling, in the presence of noise the Real 3-Sample and Real 4-Sample algorithms typically produce biased results (i.e., the mean of multiple frequency estimations is usually greater then the true input signal's frequency). Thus averaging the results of multiple frequency estimations is not guaranteed to improve their performances. An input signal SNR of roughly 40 dB combined with multiple results averaging is necessary for those algorithms to be just marginally useful. There's more bad news. When the input signal's frequency is less than fs/20 Hz the Real 3-Sample and Real 4-Sample algorithms provide wildly incorrect results. Unlike the Real 3-Sample algorithm, the Real 4-Sample's performance is not degraded by a DC bias on the input sinusoid but it requires a much higher input signal SNR to achieve comparable performance with the Real 3-Sample algorithm. The Complex 2-Sample algorithm provides unbiased frequency estimation results, even at low input signal frequencies. So averaging multiple Complex 2-Sample output samples will improve this algorithm's performance. While its performance is superior to the Real 3-Sample and Real 4-Sample algorithms, the disadvantage of the Complex 2-Sample algorithm is that it requires a complex-valued input. So real-valued input sequences must be converted to an analytic complex-valued sequence before the frequency estimation process can begin. To be truly useful when their input sinusoids are contaminated by noise and narrowband interfering spectral components, our three frequency estimation methods need to be preceded by some sort of SNR-enhancing narrow bandpass filtering. [ - ] Comment by April 20, 2017 I can't recall exactly where I saw it, but didn't you have a "DSP Tips and Tricks" where you used the time-derivatives of the real and imaginary parts of a complex signal to estimate frequency?  I'd be interested to see how that compares with the data from these methods. [ - ] Comment by April 21, 2017 Hello dszabo. You may be right, I don't recall.  I'm away from my desktop computer for the next few days so I can't check the old Tips & Tricks articles.  Regarding your question, I'll have to get back to you on that. [ - ] Comment by April 27, 2017 I actually found it while I was working on something unrelated, but this is the article I was thinking of: http://www.embedded.com/design/configurable-systems/4212086/DSP-Tricks--Frequency-demodulation-algorithms- [ - ] Comment by April 27, 2017 Hi dszabo. Ah, yes.  I did not write that material for embedded.com. That's an excerpt from my DSP book that my Publisher gave embedded.com permission to reprint without  involving me in the agreement. [ - ] Comment by April 21, 2017 How about an overlapping, interleaved binned all-pass NB filter bank, using a PLL to shift bin? [ - ] Comment by April 24, 2017 If you're going to have a PLL in the mix, how about slaving to the incoming signal with the PLL, and reading the frequency off of the command to the NCO? [ - ] Comment by April 24, 2017 You do realize that the 3-measurement method does not work for $y_1 = 0$, yes?  Mathematically it hits a singularity, which is not relieved by using the arc-secant.  Intuitively, if $y_1 = 0$ then the triple $\left(y_0, y_1, y_2 \right)$ describe a straight line -- without amplitude information, no frequency measurement is possible. [ - ] Comment by April 24, 2017 Yes, I did realize that.  In my blog I listed the restriction that the 3-Sample algorithm is not usable when sample y1 = 0.  Of course sample y1 = 0 would rarely occur. Much more likely, as I encountered, is the unpleasant numerical situation I described in the Software Modeling section of my blog. [ - ] Comment by April 24, 2017 Hey Rick: I didn't see anything in there about the spacing of the samples.  It would seem that samples spaced about $\frac{\pi}{2}$ radians apart would be the most noise-proof, and that samples that are spaced really tightly would result in more noise sensitivity in those portions of the waveform where the sinewave is going through its inflection point and is thus mostly a straight line. Yes?  No?  Haven't thought about it? [ - ] Comment by April 24, 2017 In my software modeling I found that the 3-Sample and 4-Sample algorithms had particularly poor performance at low input signal frequencies. In general, at input freqs less than, say, Fs/10 (and input SNR = 30 dB) those two methods were essentially unusable. [ - ] Comment by May 2, 2017 At low frequencies, you can use these generalizations of (1) and (3). Real signal: $$f = \frac{f_s}{2\pi} \cdot \frac{1}{m} \cdot cos^{-1} \left[\frac{x(n)+x(n-2m)}{2x(n-m)}\right]$$ This equation will be most noise resistant when x(n) and x(n-2m) are near zero crossings and x(n-m) is at a peak.   (This corresponds to $\pi/2$ sample spacing.) Complex signal: $$f = \frac{f_s}{2\pi} \cdot \frac{1}{m} \cdot arg\left[x(n) \cdot x^*(n-m)\right]$$ Ced [ - ] Comment by May 9, 2017 Hi.  For an N-length input sequence we can compute N-1 phase shift values and average those N-1 values to achieve some noise reduction.  It seems to me that using an 'm' value greater than one means that we'd be averaging fewer than N-1 phase values and not achieving as much noise reduction by way of averaging. I need to experiment a little with those whole "m > 1" idea. [ - ] Comment by May 10, 2017 Conceptually, it is the same as having a lower sampling rate. You can also average three adjacent points (or five or seven if the spacing is tight enough) to make a single value at the center location.  This will reduce the noise significantly.  For the complex case, exactness is not lost with regards to the formula, for the real case you are left with a very accurate approximation when the point is near a zero crossing. This is equivalent to running a boxcar smoothing filter on the signal first. The first equation also happens to work for complex signals.  I did not realize that when I wrote the comment.  If the numerator of the inverse cosine argument is zero, then the denominator doesn't really matter as long as it it non-zero. Thus if you select "m" such that the three points span half a cycle (real or complex case), the center point's noise is either completely or nearly insignificant.  The real case needs to be centered on a peak for the numerator to be near zero. I would still use the latter formula for complex signals though. Ced [ - ] Comment by June 18, 2017 Another important consideration is the arguable existence of the Fourier Transform for sinusoids.   Since sinusoids are neither absolutely integrable nor square integrable, the existence of its Fourier Transform relies on a bit of hand-wavy tricks with "generalized functions" and the Dirac delta.   Obviously it is functionally consistent and useful to do so, but it does mean that the Fourier Transform of a sinusoid is arguably an estimate (both mathematically and practically) rather than anything "exact".   This means that anything "exact" using the FT of a sinusoid is academic. I think this supports your point.   For engineering and DSP this is not a hindrance at all, since nearly everything is an estimate and estimating parameters to required or useful tolerances is still quite practical. [ - ] Comment by July 21, 2017 The doubts you cast on FT exactness do not apply to the discrete case. The importance of exactness as a quality of a formula is not that it is an indication that it is somehow more robust (which is Rick's point in this article), or that it is always the best solution in a given set of circumstances.  Mathematics is not only a way to obtain a numerical results, but it is also a descriptive language.  This is particularly true of differential equations.  When you find an exact solution for a problem you are also finding a true description.  When you find an approximation as a solution, you are only finding a behavioral description, likely within a limited parameter range. In practical applications, you should know whether the math you are using is exact or an approximation.  If it is the latter, you also need to be aware of the possible error range of the approximation and evaluate that against your tolerances.  In either case, you still have to evaluate your solution for sensitivity to variation of your input values. Often times, approximations are computationally less intensive, comparable in robustness or even superior, and accurate enough to be the preferred choice.  The distinction between an exact solution and an approximation is not as important in an application as it is in the theoretical understanding of the principles underlying the system being analyzed. If your goal is to understand something, exactness is important.  If it just to get a job done, not so much.  It's as simple as that. Ced P.S.  For those who are interested, my last three blog articles are extensions of some of the material presented in this article. "Exact Near Instantaneous Frequency Formulas Best at Peaks (Part 1)" "Exact Near Instantaneous Frequency Formulas Best at Peaks (Part 2)" "Exact Near Instantaneous Frequency Formulas Best at Zero Crossings" They can all be found, with the rest of my articles, here: https://www.dsprelated.com/blogs-1/nf/Cedron_Dawg.... [ - ] Comment by March 26, 2018 Hi. It is real Clay Turner algorithm? I found this algorithm in Vizineanu articles published in Measurement journal? Similar algorithm was published by Seyedi. [ - ] Comment by March 26, 2018 Hello Serek. I'm willing to wager that DSP Guru Clay Turner derived his algorithms "on his own" and did not plagiarize the work of anyone else. [ - ] Comment by March 26, 2018 Thank you very much for your response. This algorithm is very important to me because I use them in my research. [ - ] Comment by August 21, 2018 I had to do frequency estimation recently and was fortunate enough to be dealing with a complex signal, which makes this whole problem relatively trivial.  Well, actually, it was 3 phase power, which you can convert to DQZ, direct+quadrature+common and use the D and Q as complex (the common mode signal, Z, should ideally be zero anyway, but if not, you've separated it out so that D and Q basically do not suffer from any DC offset). The obvious approach is to take points at some distance apart and consider the difference in their phase angles as giving you an instantaneous angular velocity (for the time at half-way between the samples).  In my case, to minimize quantization error, samples were compared at an offset of the half-period of the highest reasonable frequency, for which the signals were pre-filtered (and you lose a half-period from the length to account for the comparison spread). Formula 3 is somewhat nice because it means you can call arctan just once instead of twice.  I was sticking with clarity and using the difference of two phase values from arctan. At least in that approach, a potential source of error will be the 2PI wrap-around of arctan angles.  I wonder if the formula 3 approach magically deals with that problem?  I kind of doubt it. Consider a primary frequency close to the frequency whose half-period you use as your sample-offset (to compare sample pairs).  Any harmonics or higher-frequency noise will make the angular velocity sometimes larger and sometimes smaller than the nominal value which is already close to +PI or -PI, and thus it can cause it to cross over and change sign.  If you simply average those together, you end up with something close to 0, which is nearly 180 degrees (sorry, PI radians) away from reality!  In my case, I had already low-pass filtered the 3 original signals (before the D,Q transform), and the cut-off was higher than any reasonable expectation for a power frequency, but still, it's a possibility, especially with strong harmonics (commonly at their strongest at 5x and 7x).  It's easy to see how this gives errors when your angular velocity can change sign, but if your spacing is small (like a single-sample offset), I can't picture how that could be a problem.  In my case, it was a possibility. To deal with that, I decided to average the positive angle differences separately from the negative angle differences, and whichever one had more instances (positive or negative), I would take that average and then push the other average across by 2PI to make it commensurate with the rest, to come up with an adjusted (and properly weighted) average.  Of course, computationally, I just kept the positive and negative sums along with the positive and negative counts, which makes it all quite trivial.  Then that adjusted average was to serve as an overall estimate or "target" angular velocity. Then I would re-process the entire angular-velocity signal (just computed) to force each value within 1 PI of that target (by adding or subtracting 2PI as needed).  In my case, I didn't expect massive changes to frequency (even if coming from a VFD), though in other applications, you might want to let your target value itself be dynamic, recomputing it for different chunks of the signal and letting it change very slowly.  Anyway, this should, theoretically, greatly reduce the chances of any unintended 2PI wrap-arounds introducing error.  And this is especially important because the resulting angular-velocity signal (scaled to frequency in Hz) needed to be further low-pass filtered.  By the way, it's the fact that such post-LPF was going to be needed at the end that made simple comparisons of two samples spread out by the pre-filtering cut-off frequency's half-period so reasonable (why bother with windowed averaging, further low-pass pre-filtering, or anything else when you're just going to LPF the devil out of it when you're done anyway?). Sorry if this doesn't make sense... I kept switching between past tense and present tense, which I don't usually do, but maybe you can still get the gist of it.  Maybe this idea for preventing your angular velocities from yanking across the 2PI boundary can be helpful to someone. To post reply to a comment, click on the 'reply' button attached to each comment. To post a new comment (not a reply to a comment) check out the 'Write a Comment' tab at the top of the comments.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 2, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8680946230888367, "perplexity": 1458.5151036383081}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 5, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-49/segments/1669446710662.60/warc/CC-MAIN-20221128203656-20221128233656-00603.warc.gz"}
http://lists.w3.org/Archives/Public/public-html/2007JanMar/0697.html
Re: MostlySemanticMarkup From: Murray Maloney <murray@muzmo.com> Date: Thu, 29 Mar 2007 08:28:54 -0500 Message-Id: <5.1.1.6.2.20070329082102.0ac2c7e0@mail.muzmo.com> At 01:41 PM 3/28/2007 -0700, T.V Raman wrote: >In markup languages like LaTeX the \em had a minor but key >difference with \it -- >LaTeX was smart enough to render \em as something distinctive if >it was used within content that was already italicized --- >otherwise \em and \it were equivalent. > >I dont believe visual web browsers have made this distinction >anyway, in which case there is no real distinction. Thanks for mentioning that. I have always tried to write style sheets to take nesting into account. so that italic inside of italic would be normal and italic inside of bold would be bold-italic, etc. I haven't figured out how to manage contexts like that with CSS. Maybe Hakon has some advice. Received on Thursday, 29 March 2007 12:27:19 GMT This archive was generated by hypermail 2.2.0+W3C-0.50 : Thursday, 29 March 2007 12:27:25 GMT
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.7976608872413635, "perplexity": 16342.104569201063}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2015-06/segments/1422122152935.2/warc/CC-MAIN-20150124175552-00137-ip-10-180-212-252.ec2.internal.warc.gz"}
https://arxiv.org/abs/hep-th/9810155
hep-th (what is this?) (what is this?) # Title: Out Of This World Supersymmetry Breaking Abstract: We show that in a general hidden sector model, supersymmetry breaking necessarily generates at one-loop a scalar and gaugino mass as a consequence of the super-Weyl anomaly. We study a scenario in which this contribution dominates. We consider the Standard Model particles to be localized on a (3+1)-dimensional subspace or 3-brane'' of a higher dimensional spacetime, while supersymmetry breaking occurs off the 3-brane, either in the bulk or on another 3-brane. At least one extra dimension is assumed to be compactified roughly one to two orders of magnitude below the four-dimensional Planck scale. This framework is phenomenologically very attractive; it introduces new possibilities for solving the supersymmetric flavor problem, the gaugino mass problem, the supersymmetric CP problem, and the mu-problem. Furthermore, the compactification scale can be consistent with a unification of gauge and gravitational couplings. We demonstrate these claims in a four-dimensional effective theory below the compactification scale that incorporates the relevant features of the underlying higher dimensional theory and the contribution of the super-Weyl anomaly. Naturalness constraints follow not only from symmetries but also from the higher dimensional origins of the theory. We also introduce additional bulk contributions to the MSSM soft masses. This scenario is very predictive: the gaugino masses, squark masses, and $A$ terms are given in terms of MSSM renormalization group functions. Comments: 42 pages, LateX, references added, corrections added as Eqs. (43, 53) Subjects: High Energy Physics - Theory (hep-th); High Energy Physics - Phenomenology (hep-ph) Journal reference: Nucl.Phys.B557:79-118,1999 DOI: 10.1016/S0550-3213(99)00359-4 Report number: MIT-CTP-2788, PUPT-1815, BUHEP-98-26 Cite as: arXiv:hep-th/9810155 (or arXiv:hep-th/9810155v2 for this version) ## Submission history From: Lisa Randall [view email] [v1] Tue, 20 Oct 1998 23:04:07 GMT (41kb) [v2] Fri, 2 Apr 1999 23:17:23 GMT (42kb)
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.831416666507721, "perplexity": 2570.264150805538}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2016-44/segments/1476988720471.17/warc/CC-MAIN-20161020183840-00262-ip-10-171-6-4.ec2.internal.warc.gz"}
https://www.finedictionary.com/generalize.html
# generalize ## Definitions • GENERAL VIEW of LIÈGE • WordNet 3.6 • v generalize become systemic and spread throughout the body "this kind of infection generalizes throughout the immune system" • v generalize cater to popular taste to make popular and present to the general public; bring into general or common use "They popularized coffee in Washington State","Relativity Theory was vulgarized by these authors" • v generalize draw from specific cases for more general cases • v generalize speak or write in generalities • *** GENERAL VIEW of ARNHEIM GENERAL PUTNAM LEAVING A PAIR OF TIRED STEERS GENERAL HOWE'S SUGGESTION States General at Tours——329 A WISE GENERAL GENERAL LUCAS J. MEYER GENERAL GROBLER Webster's Revised Unabridged Dictionary • Interesting fact: The lifespan of a rhinoceros is generally 50 years • generalize To apply to other genera or classes; to use with a more extensive application; to extend so as to include all special cases; to make universal in application, as a formula or rule. "When a fact is generalized , our discontent is quited, and we consider the generality itself as tantamount to an explanation." • generalize To bring under a genus or under genera; to view in relation to a genus or to genera. "Copernicus generalized the celestial motions by merely referring them to the moon's motion. Newton generalized them still more by referring this last to the motion of a stone through the air." • generalize To derive or deduce (a general conception, or a general principle) from particulars. "A mere conclusion generalized from a great multitude of facts." • v. i Generalize To form into a genus; to view objects in their relations to a genus or class; to take general or comprehensive views. • generalize To speak in generalities; to talk in abstract terms. • *** Century Dictionary and Cyclopedia • Interesting fact: Fires onland generally move faster uphill than downhill • generalize To render general; make more general; bring under a general description or notion; treat or apply generically. • generalize To infer inductively, as a general rule from a particular case or set of facts. • generalize In mathematics, to modify, as a proposition, so as to obtain a wider proposition from which the former can be immediately deduced. See generalization, 3 • generalize To recognize that two or more objects have a common character; to form a general notion. • generalize To reason inductively, from particular cases to general rules comprehending those cases. • generalize Also spelled generalise. • generalize In painting, to render large and typical characteristics rather than details. • *** ## Quotations • Oliver Goldsmith “Crime generally punishes itself.” • Samuel Taylor Coleridge “How inimitably graceful children are in general before they learn to dance!” • Benjamin Disraeli “As a general rule, the most successful man in life is the man who has the best information.” • William Hazlitt “Our friends are generally ready to do everything for us, except the very thing we wish them to do.” • George Eliot “To have in general but little feeling, seems to be the only security against feeling too much on any particular occasion.” • Joseph De Maistre “Man in general, if reduced to himself, is too wicked to be free.” ## Etymology Webster's Revised Unabridged Dictionary Cf. F. généraliser, ## Usage ### In literature: The proposition respecting a general action was decidedly negatived. "The Life of George Washington, Vol. 3 (of 5)" by John Marshall In command of this army of invasion was General McDowell, held to be the most scientific general in the North. "The Victim" by Thomas Dixon I had with me at that time General Hattingh, General Wessel Wessels, and General Michal Prinsloo. "Three Years' War" by Christiaan Rudolf de Wet The rest of the day he was either in the warehouse, or carried messages, and generally did such odd jobs as were required. "With Kitchener in the Soudan" by G. A. Henty The General signed the order and instructed the Director-General to go ahead. "A Journal From Our Legation in Belgium" by Hugh Gibson The American inquired where the general might be found. "Steve Yeager" by William MacLeod Raine General Mahone's brigades hastily called, rushed into position, and a general Confederate charge was ordered. "The Southerner" by Thomas Dixon The evening of that day, General Joffre issued orders for a general attack all along the line. "Kelly Miller's History of the World War for Human Rights" by Kelly Miller Among the former was general Meincke, and among the latter prince Henry and general Seydelitz. "The History of England in Three Volumes, Vol.II. From William and Mary to George II." by Tobias Smollett He passes as a fighting general; they pass as writing generals. "Diary of the Besieged Resident in Paris" by Henry Labouchère *** ### In poetry: The stealing tear-drop stagnates in the eye, The sudden sigh by friendship's bosom proved, I mark them rise—I mark the general sigh! Unhappy youth! and wert thou so beloved? "Elegy Occasioned" by Henry Kirke White What general who could assuage The poisonous serpent's bite, Assisted Turnus 'gainst his foe Eneas ? but alas ! laid low, Not long did he the battle wage, Or triumph in the light. "Enigma XV." by Elizabeth Hitchener But there are those who love to sit and trace Between all these some shy retiring light, For such, they know, shed through the veil of space The general halo that adorns the night. "On Receiving A Privately Printed Volume Of Poems From A Friend" by Thomas Buchanan Read Be hate that fruit or love that fruit, It forwards the general deed of man, And each of the Many helps to recruit The life of the race by a general plan; Each living his own, to boot. "By The Fire-Side" by Robert Browning "Yet in thy turn, thou frowning Preacher, hear; Are not these general maxims too severe? Say, cannot power secure its owner's bliss? Are victors bless'd with fame, or kings with ease?" "Solomon on the Vanity of the World, A Poem. In Three Books. - Power. Book III." by Matthew Prior And is this the ground Washington trod? And these waters I listlessly daily cross, are these the waters he cross'd, As resolute in defeat, as other generals in their proudest triumphs? "The Centerarian's Story" by Walt Whitman ### In news: Posted on November 20, 2012 at 10:45 pm by Jonathan Feigen in General, Houston Rockets, Rockets. General Atomics Aeronautical Systems Inc ( GA. The 5% of the participants who had highly affectionate mothers reported being about 14% less anxious and 9% less generally distressed than their peers. Congregations used to be a blend of political affiliations , but that's generally not the case anymore. Posted on September 20, 2012 at 11:15 pm by John McClain in General. Rhode Island Democrats won big on Tuesday, including General Assembly races. But for Mr Porter, as for the Left generally, those who are not affluent are not merely "less affluent ," they are "less fortunate. Attorney general wants Bruce found in contempt. The state Attorney General's Office on Friday warned in court papers it will seek contempt sanctions next week against Colorado Springs tax activist Douglas Bruce. Speaking to a packed room before the start of PLMA's General Session, Tim Simmons, PLMA's vice president of communications, said he had big news: For the first time, attendees could post their General Session questions on Twitter. A spokesman for the Texas Attorney General's office said the agency generally does not comment on pending opinion requests. The Annual Fall RV and Camper Show at GENERAL RV in Birch Run 98 KCQ will be live at General RV from 12 Noon until 2 p.m. General revenue receipts increase in Sept. OKLAHOMA CITY (AP) — After a disappointing revenue report in August, state finance officials are reporting collections to the state's general revenue fund returned to positive territory in September. Fewer El Pasoans were registered to vote and fewer cast ballots in this year's general election compared to the 2008 general election, statistics show. Gettysburg, Pennsylvania Dates: July 1-3, 1863 Generals: Union General: George G Meade Confederate General: Robert E Lee. *** ### In science: Definition 4.14. ([LT05]) Let the compact Lie group G with Lie algebra g act on a manifold M. A generalized moment map for an invariant H-twisted generalized K ¨ahler structure (J1, J2) is a generalized moment map for the generalized complex structure J1. Generalized geometry, equivariant $\bar{\partial}\partial$-lemma, and torus actions Note that [LT05] proposes a general method of constructing non-trivial explicit examples of generalized K ¨ahler structures as the generalized K ¨ahler quotient of the vector space Cn. Generalized geometry, equivariant $\bar{\partial}\partial$-lemma, and torus actions Nevertheless, the above results allow us to identify regions where the generalized kernel functional agrees with a generalized function or is even guaranteed to be a G∞ -regular generalized function. Generalized Oscillatory Integrals and Fourier Integral Operators A partially ordered (generalized) pattern (POP) is a generalized pattern some of whose letters are incomparable, an extension of generalized permutation patterns introduced by Babson and Steingr´ımsson. Partially ordered patterns and compositions We find that the deformations are classified by the first cohomology group of a Lie algebroid canonically associated to the generalized calibrated cycle, seen as a generalized complex submanifold with respect to the integrable generalized complex structure of the bulk. Deformations of calibrated D-branes in flux generalized complex manifolds ***
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.43906769156455994, "perplexity": 10877.651140448972}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-05/segments/1642320301592.29/warc/CC-MAIN-20220119215632-20220120005632-00232.warc.gz"}
https://de.mathworks.com/help/curvefit/csapi.html
# csapi Cubic spline interpolation ## Syntax ``pp = csapi(x,y)`` ``values = csapi(x,y,xx)`` ## Description example ``` NoteFor a simpler but less flexible method to interpolate cubic splines, try the Curve Fitter app or the `fit` function and see About Smoothing Splines. `pp = csapi(x,y)` returns the ppform of a cubic spline s with knot sequence `x` that takes the values `y(:,j)` at `x(j)` for `j=1:length(x)`. The values `y(:,j)` can be scalars, vectors, matrices, and ND-arrays. The function averages the data points with the same data site and then sorts them by their sites. With `x` the resulting sorted data sites, the spline `s` satisfies the not-a-knot end conditions, such as $jum{p}_{x\left(2\right)}{D}_{s}^{3}=0=jum{p}_{x\left(end-1\right)}{D}^{3}\left(s\right)$where D3s is the third derivative of s. If `x` is a cell array of sequences `x1`, ..., `xm` of lengths `n1`, ..., `nm`, then `y` is an array of size `[n1,...,nm]` (or of size `[d,n1,...,nm]` if the interpolant is `d`-valued). In that case, `pp` is the ppform of an `m`-cubic spline interpolant s to such data. In particular, $s\left(x\left({i}_{1}\right),\cdots ,x\left({i}_{m}\right)\right)=y\left(:,{i}_{1},\dots ,{i}_{m}\right)$with ${i}_{1}=1:nl$ and ${i}_{m}=1:nm$.To perform operations on this interpolating cubic spline, such as evaluation, differentiation, plotting, use the pp structure. For more information, see the `fnval`, `fnder`, `fnplt` functions.``` example ````values = csapi(x,y,xx)` returns the values of the smoothing spline evaluated at the points `xx`. This syntax is the same as `fnval(csapi(x,y),xx)`.This command is essentially the MATLAB® function `spline`, which, in turn, is a stripped-down version of the Fortran routine `CUBSPL` in PGS, except that `csapi` (and now also `spline`) accepts vector-valued data and can handle gridded data.``` ## Examples collapse all This example shows how to use the `csapi` command from Curve Fitting Toolbox™ to construct cubic spline interpolants. Interpolant to Two Points The command `values = csapi(x,y,xx)` returns the values at `xx` of the cubic spline interpolant to the given data (`x,y`), using the not-a-knot end condition. This interpolant is a piecewise cubic function with break sequence `x`, whose cubic pieces join together to form a function with two continuous derivatives. The "not-a-knot" end condition means that, at the first and last interior break, even the third derivative is continuous (up to round-off error). Specifying only two data points results in a straight line interpolant. ```x = [0 1]; y = [2 0]; xx = linspace(0,6,121); plot(xx,csapi(x,y,xx),'k-',x,y,'ro') title('Interpolant to Two Points')``` Interpolant to Three Points If you specify three data points, the function outputs a parabola. ```x = [2 3 5]; y = [1 0 4]; plot(xx,csapi(x,y,xx),'k-',x,y,'ro') title('Interpolant to Three Points')``` Interpolant to Five Points More generally, if you specify four or five data points, the function outputs a cubic spline. ```x = [1 1.5 2 4.1 5]; y = [1 -1 1 -1 1]; plot(xx,csapi(x,y,xx),'k-',x,y,'ro') title('Cubic Spline Interpolant to Five Points')``` Up to rounding errors, and assuming that `x` is a vector with at least four entries, the statement `pp = csapi``(x,y)` puts the same spline into `pp` as the following statement, except that the description of the spline obtained this second way does not use breaks at `x(2)` and `x(n-1)`: ```pp = fn2fm(spapi(augknt(x([1 3:(end-2) end]),4),x,y),"pp"); ``` As a simple bivariate example, plot a bicubic spline interpolant to a Ricker wavelet. ```x =.0001 + (-4:.2:4); y = -3:.2:3; [yy,xx] = meshgrid(y,x); r = pi*sqrt(xx.^2+yy.^2); z = sin(r)./r; bcs = csapi({x,y},z); fnplt(bcs) axis([-5 5 -5 5 -.5 1])``` Since MATLAB® considers the entry `z(i,j)` as the value at (`x(j)`,`y(i)`), the code reverses `x` and `y` in the call to `meshgrid`. The Curve Fitting Toolbox® instead follows the Approximation Theory standard whereas `z(i,j)` is the value at (`x(i)`,`y(j)`). For this reason, you have to be cautious when you are plotting values of such a bivariate spline with the aid of the MATLAB `mesh` function, as shown here: ```xf = linspace(x(1),x(end),41); yf = linspace(y(1),y(end),41); mesh(xf,yf,fnval(bcs,{xf,yf}).')``` Note the use of the transpose of the matrix of values obtained from `fnval`. ## Input Arguments collapse all Data sites of data values `y` to be fit, specified as a vector or as a cell array for multivariate data. Spline f is created with knots at each data site `x` such that f(`x(j)`) = `y(:,j)` for all values of `j`. For multivariate, gridded data, you can specify `x` as a cell array that specifies the data site in each variable dimension: f(`x1(i),x2(j),...xn(k)`) = `y(:,i,j,...,k)`. Data Types: `single` | `double` Data values to fit during creation of the spline, specified as a vector, matrix, or array. Data values `y(:,j)` can be scalars, matrices, or n-dimensional arrays. Data values given at the same data site `x` are averaged. Data Types: `single` | `double` Evaluation points over which the spline is evaluated, specified as a vector or as a cell array of vectors for multivariate data. Spline evaluation is performed using `fnval`. Data Types: `single` | `double` ## Output Arguments collapse all Spline in ppform, returned as a structure with these fields. For more information on ppform, see The ppform. Form of the spline, returned as `pp`. `pp` indicates that the spline is given in piecewise polynomial form. Knot positions of the spline, returned as a vector or as a cell array of vectors for multivariate data. Vectors contain strictly increasing elements that represent the start and end of each of the intervals over which the polynomial pieces are defined. Coefficients of polynomials for each piece, returned as a matrix or as an array for multivariate data. Number of polynomial pieces describing the spline, returned as a scalar or as a vector of numbers of pieces in each variable for multivariate data. Order of the polynomial function describing each polynomial piece of the spline, returned as a scalar or as a vector containing the order in each variable for multivariate data. Dimensionality of the target function, returned as a scalar. Evaluated spline, returned as a vector or as a matrix or array for multivariate data. The spline is evaluated at the given evaluation points `xx`. ## Algorithms `csapi` is an implementation of the Fortran routine `CUBSPL` from PGS. The algorithm constructs and solves the relevant tridiagonal linear system using the MATLAB sparse matrix capability. The algorithm also uses the not-a-knot end condition, forcing the first and second polynomial piece of the interpolant to coincide, as well as the second-to-last and the last polynomial piece. ## Version History Introduced in R2006b
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 4, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.6504421234130859, "perplexity": 3201.7956239757655}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-27/segments/1656103989282.58/warc/CC-MAIN-20220702071223-20220702101223-00013.warc.gz"}
https://taskvio.com/maths/probability-distributions/cross-entropy/
# Cross entropy The cross-entropy between two probability distributions p and q. #### Cross entropy True lable Distribution P (0 1 0) #### Input $$P (0 1 0)$$ $$Q (0.15 0.60 0.25)$$ #### Solution Cross-entropy H(p, q) will be: H(p, q) = -[0 * log₂(0.15) + 1 * log₂(0.6) + 0 * log₂(0.25)] H(p, q) = 0.736 ## The cross-entropy between two probability distributions p and q The cross-entropy between two probability distributions p and q over an equivalent underlying set of events measure the typical number of bits needed to spot an occasion drawn from the set if a coding the scheme used for the set is optimized for an estimated probability distribution q, instead of truth distribution p. It is most commonly used as machine learning as a function. Cross-entropy may be a measure from the sector of data theory, building upon entropy and usually calculating the difference between two probability distributions. It closely associated with but is different from KL divergence that calculates the relative entropy between two probability distributions, whereas cross-entropy is often thought to calculate the entire entropy between the distributions. Cross-entropy is additionally associated with and sometimes confused with logistic loss, called log loss. Although the 2 measures are derived from a special source when used as loss functions for classification models, both measures calculate an equivalent quantity and may be used interchangeably. Understanding Cross-Entropy In order to know cross-entropy, starting with the definition of the entropy, ‘ Cross-entropy, it's a measure of the degree of dissimilarities between two probability distributions, within the reference to supervised machine learning. Cross-Entropy is expressed by the equation; The cross-entropy equation Where x represents the anticipated results by ML algorithm, p(x) is that the probability distribution of the “true” label from training samples, and q(x) depicts the estimation of the ML algorithm. Cross-entropy may be a distinction measurement between two possible distributions for a group of given random variables or events. It builds on the concept of data-entropy and finds the variability of bits needed to rework an occasion from one distribution to a different distribution. Cross-entropy examines the predictions of models with a truth probability distribution. It constantly goes down if the predictions are mostly accurate and it also becomes zero when the prediction tends to be perfect. KL Divergence (Relative Entropy) The Kullback-Liebler Divergence (KL) divided into two parts of Divergence, i.e measure the difference between two probability distributions, a KL Divergence having value zero indicates the identical probability distribution. And when the potential distributions P and Q, KL Divergence is given by the equations, For discrete distributions, The equation shows KL Divergence for discrete distributions. For continuous distributions, The equation shows KL Divergence for continuous distributions. The KL Divergence is that the average number of additional bits needed to encode the info, thanks to the very fact that we'd like distribution q to encode the info rather than truth distribution p. Cross-Entropy as Loss Function Cross entropy is broadly used as a Loss Function when you optimizing classification models. In brief, classification tasks involve one or more input variables and prediction of a category label description, additionally, if the classification problems contain only two labels for the outcomes’ predictions refereed as a binary classification problem and if classification problems contain quite two variables are termed as categorical or multi-class classification problems. It can measure the achievement of a classification model that provides output in terms of probability having values between 0 and 1. It increases because the estimated probability deviates from the particular class label. E.g. A model contains a sample with a known class label having a probability of 1.0 and therefore the probability of 0.0 for other class labels, this model can measure the probability of every class label, now the role of cross-entropy comes here, it's then wont to find the difference between the probability distributions of various class labels. Also, cross-entropy enables one to settle on the plausible split that reduces the uncertainty about the classification. How to use this cross-entropy tool by taskvio.com To use this tool you don’t have to worry about anything you just have to come to our website and then select the tool you want to use and then you can start using it. It tool has been created for any individual who wants to do the mathematic calculation and for those who are students. As you can see on your desktop you have a text box where you can input the numbers. After you will enter the number properly you have to simply click on the calculate button to get the proper result. You can bookmark this tool if you want because it will be good for your future uses. ### Q. Where Uses Cross-entropy ? A. Cross-entropy Is Commonly Used In Machine Learning As A Loss Function.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8916009068489075, "perplexity": 1325.0120491146902}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 20, "end_threshold": 15, "enable": false}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-27/segments/1656103360935.27/warc/CC-MAIN-20220628081102-20220628111102-00092.warc.gz"}
http://crypto.stackexchange.com/tags/cryptanalysis/new
# Tag Info 5 I don't know of any practical attacks on these schemes that would break collision-resistance or pre-image resistance, but the existence of related-key attacks on AES is still worrisome. The Miyaguchi-Preneel hash construction is better in this sense, because the attacker doesn't directly control anything that goes into the key input. Miyaguchi-Preneel is ... 14 The most efficient related-key attacks on AES-256 and resulting weaknesses AES-256-based hash functions are summarized in my PhD thesis. Though collision and preimage attacks on hash functions are out of reach yet, the components of these functions still expose some properties that are not expected of good hash functions or random oracles. Getting to the ... 1 Perfect Secrecy (or information-theoretic secure) means that the ciphertext conveys no information about the content of the plaintext. In effect this means that, no matter how much ciphertext you have, it does not convey anything about what the plaintext and key were. It can be proved that any such scheme must use at least as much key material as there is ... 1 Perfect secrecy is the notion that, given an encrypted message (or ciphertext) from a perfectly secure encryption system (or cipher), absolutely nothing will be revealed about the unencrypted message (or plaintext) by the ciphertext. A perfectly secret cipher has a couple of other equivalent properties: Even if given a choice of two plaintexts, one the ... 2 In general there is no such recommendation. Python is quiet useful for quick prototyping, but is generally very slow. Too slow to do any expensive computations. However, you can, for instance, write you core analysis functions in c and then use them in your python analysis tools. This is actually a quiet common method of going about things. 0 There are diagnostic programs that will tell you the cipher type from a statistical analysis. For example: http://bionsgadgets.appspot.com/gadget_forms/refscore.html tells you immediately that this cipher is a Playfair. 3 Remark: in One Time Pad the pad is used once, thus this is not OTP, since here $k$ is reused. Hint for part 1: Write the relations between $k$, the message blocks $m_i$, the ciphertext blocks $\small C_i$ with the convention $\small\text{IV}=\small C_0$. Then, find equations that allow computing the desired $m_3⊕m_4$ from known quantities. Hint for part 2, ... 1 This is actually a good question, about how a mode of operation will affect analysis of a data stream. In regards to an implementation of AES-PCBC, if you have AES-ECB you can build a wrapper for PCBC around it with the appropriate block size. It is not too difficult, but unnecessary... In regards to security analysis, PCBC is no more resistant than CBC ... 3 A conceivable attack is inspired by this extract of LUKS On-Disk Format Specification Version 1.1.1, section 1: A partition can have as many user passwords as there are key slots. To access a partition, the user has to supply only one of these passwords. If a password is changed, the old copy of the master key encrypted by the old password must be ... Top 50 recent answers are included
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.3828752636909485, "perplexity": 1151.9517252000965}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 5, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-15/segments/1398223202457.0/warc/CC-MAIN-20140423032002-00338-ip-10-147-4-33.ec2.internal.warc.gz"}
https://skeptric.com/common-substring/
I've found pairs of near duplicates texts in the Adzuna Job Salary Predictions Kaggle Competition using Minhash. One thing that would be useful to know is what the common sections of the ads are. Typically if they have a high 3-Jaccard similarity it's because they have some text in common. The most asymptotically efficient to find the longest common substring would be to build a suffix tree, but for experimentation the heuristics in Python's DiffLib work well enough. I define a function that gets all common strings above a certain length. I look for all pairs difflib considers equal and print them out; this won't get all common substrings but works well enough on the job ads I tried them on. A benefit of using difflib is that if we want to find the longest common string of tokens we can just pass in a and b as lists of tokens. def common_substrings(a, b, min_length=7): seqs = [] seqmatcher = difflib.SequenceMatcher(a=a, b=b, autojunk=False) for tag, a0, a1, b0, b1 in seqmatcher.get_opcodes(): if tag == 'equal' and a1 - a0 >= min_length: seqs.append(a[a0:a1]) return seqs Once we have a way to find common substrings of pairs it's straightforward to extend it to a list of substrings. In particular we just look to see for each existing common substrings whether it has any common substrings with the next text. def all_common_substrings(args, min_length=7): seqs = None for arg in args: if seqs is None: seqs = [arg] continue new_seqs = [] for seq in seqs: new_seqs += common_substrings(arg, seq, min_length) seqs = new_seqs return seqs This could easily be extended to allow a few mismatches by collecting across difflib tags other than equal up to some length of tokens. # Exact brute force approach It's good to compare this with an exact solution to make sure the difflib heuristics are actually working. I always find it's good to start with a simple slow obviously correct solution before trying to build a more complex efficient algorithm. In particular we could start by producing all substrings of a string by iterating over each possible starting point and length: def all_substrings(s): for i in range(len(s)): for j in range(len(s)-i): yield s[i:i+j+1] To know whether one string is a substring of another we can just check whether it matches a any position: def contains_substring(a, b): """Does a constrin substring b""" for i in range(len(a) - len(b) + 1): if a[i:i+len(b)] == b: return True Then we could find the common substrings by just checking if any of the substrings of one are in the other: def naive_common_substrings(a, b): for substring in all_substrings(a): if contains_substring(b, substring): yield substring This will output a lot of substrings because any substring of a common substring is also a common substring. For example if "the" is in common, then so is "t", "th", "h", "he" and "e". We can filter this down to the "proper substrings", those that aren't contained in a larger substring. def proper_substrings(a): proper = [] for s in a: if any(contains_substring(p, s) for p in proper): continue supersequence = [contains_substring(s, p) for p in proper] if any(supersequence): val = s for idx, value in enumerate(supersequence): if value: proper[idx] = val val = None proper = [p for p in proper if p] else: proper.append(s) return proper Note that this implementation is awfully slow; the operations in calcualting proper_substrings(naive_common_substrings(a, b)) is quadratic in the length of a and roughly linear in the length of b. But it's good for a sanity check on some simple strings, and using it I find the difflib captures most of the common substrings on the job ads I tried.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.2919462323188782, "perplexity": 2384.8765837170577}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-40/segments/1664030337531.3/warc/CC-MAIN-20221005011205-20221005041205-00463.warc.gz"}
https://thatsmaths.com/2020/05/28/the-monte-carlo-method/
### The Monte-Carlo Method Learning calculus at school, we soon find out that while differentiation is relatively easy, at least for simple functions, integration is hard. So hard indeed that, in many cases, it is impossible to find a nice function that is the integral (or anti-derivative) of a given one. Thus, given ${f(x)}$ we can usually find ${d f /d x}$, whereas we may not be able to find ${\int f(x)\,d x}$. Stan Ulam was a remarkable genius. Originally from Poland, he spent his working life in America. He played a key role in the development of the atomic and hydrogen bombs at Los Alamos. He developed a method of evaluating integrals that proved very powerful and useful. Although the integral of a function may be defined in a precise, deterministic way, the method of evaluating it is based on probability, and yields an answer that is “very likely” to be correct. Integrating by Probability In many problems we need to find the area under a closed curve. In Fig. 1 (left) we show the function ${f(x) = 1/x}$ with the grey area showing the integral from 1 to 7.5. If we are unaware of the log-function, we cannot integrate analytically (to prove that the area is ${\log 7.5}$). Suppose we consider a rectangle around the curve, as in Fig. 1 (right). The sum of the red and blue areas is obviously the breadth by height, or 6.5. But we want the size of the red area. Ulam’s idea was this: suppose we pick a point ${p}$ at random within the rectangle. The probability that it falls within the red area is $\displaystyle \mbox{Prob}(p\mbox{\ in Red Area}) = \frac{\mbox{Red Area}}{\mbox{Total Area}} \,.$ where the total area is the the sum of the red and blue areas. If we do the experiment once, it tells us little. But, instead of choosing just one point at random, we choose a large number of them. Then, the proportion ${P}$ of these points falling below the curve should be approximately equal to the probability of a single randomly chosen point falling below the curve: $\displaystyle P = \mbox{Proportion below curve} = \frac{\mbox{Red Area}}{\mbox{Total Area}} \,.$ If we do the experiment many times, we will get a value for ${P}$. Then, since we know the area of the rectangle, we can solve the equation for the red area: $\displaystyle \mbox{Red Area} = P \times \mbox{Total Area}$ How could we do this experiment in practice? Well, one way might be to make a kind of rectangular dart-board with the function ${f(x)}$ drawn on it. We might then throw darts repeatedly at the board, and count the fraction of them that land on points below the function. In reality, we do not play darts; we have a mathematical description of the function${f(x)}$ and the rectangle. We generate a pair of random numbers ${(x,y)}$ and check if it falls below or above ${f(x)}$. We do this many times, and keep a note of how many times the point falls below ${f(x)}$. Then the proportion ${P}$ of these to the total number gives us an approximate value for the area integral ${\int f(x)\,d x}$. Why Monte-Carlo? The method was dubbed the Monte-Carlo Method by Nicholas Metropolis, a colleague of Ulam, because of its association with chance and gambling. There are many simpler ways to evaluate an integral like ${\int f(x)\,d x}$. However, in many problems in physics, multi-dimensional integrals must be computed, sometimes in more than 100 dimensions, and the Monte-Carlo method is very valuable in producing an accurate result. The random points are generated rapidly using a PNRG, or Pseudo-Random Number Generator. The Monte-Carlo Method is one of the most popular and widely-used methods of obtaining numerical answers to problems in physics, chemistry, engineering and other disciplines. As easy as Pi The use of a probabilistic method to compute a deterministic quantity goes back to Georges-Louis Leclerc, Comte de Buffon in 1733. He devised a method of calculating ${\pi}$ by dropping a stick or needle on a lined floor (see earlier post here). We will now look at another way to use the Monte-Carlo approach to estimate ${\pi}$. Suppose a unit circle is drawn within a square and points are chosen randomly within the square; think of a dart-board in a square frame, with darts thrown from a distance. The area of the square is 4 and the area of the circle is ${\pi}$. So, a proportion ${P=\pi/4}$ of the darts will fall within the circle. Fig. 2 (top left)shows the set-up, Fig. 2 (top right) shows 250 random points within the square. Fig. 2 (bottom left) shows 2500 random points and Fig. 2 (bottom right) shows 25000 random points within the square. We used Mathematica to generate random number pairs ${(x,y)}$ in the range ${[-1,+1]\times[-1,+1]}$ and count the proportion with ${r^2=x^2+y^2<1}$. This proportion should approach ${\pi/4}$ when the number of points is large. For N=250 it was 3.1554/4, for N=2500 it was 3.1901/4 and for N=25000 it was 3.1422/4. We see that it is approching the value 3.1416/4 or ${\pi/4}$.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 28, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8853947520256042, "perplexity": 290.88221067909825}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 5, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-40/segments/1664030337371.9/warc/CC-MAIN-20221003003804-20221003033804-00455.warc.gz"}
https://spot.lre.epita.fr/tut90.html
# The bdd_dict interface (advanced topic) Spot uses BDD for multiple purposes. The most common one is for labeling edges of automata: each edge stores a BDD representing its guard (i.e., a Boolean function over atomic propositions). Note that the automaton is still represented as a graph (with a vector of states and a vector of edges) and the BDD is only used for the guard. This differs from symbolic representations where the entire transition structure is represented as one large BDD. There are other algorithms where BDDs are used from different tasks. For instance, our simulation-based reduction function computes a signature of each state as a BDD that is essentially the disjunction of all outgoing edges, represented by their guard, their acceptance sets, and their destination classes. Also the translation of LTL formulas to transition-based generalized Büchi automata is using an intermediate representation of states that is similar to the aforementioned signatures, excepts that classes are replaced by subformulas. From the point of view of the BDD library, BDDs are just DAGs with nodes labeled by BDD variables (numbered from 0). From the point of view of Spot's algorithm, these BDD variables have a meaning. For instance if we want to synchronize two automata that have guards over the atomic propositions $$a$$ and $$b$$, we need to make sure that both automata agree on the BDD variables used to represent $$a$$ and $$b$$. ## The purpose of bdd_dict The spot::bdd_dict object is in charge of allocating BDD variables, and ensuring that multiple users reuse the same variables for similar purpose. When a twa_graph automaton is constructed, it takes a bdd_dict as argument. Every time an atomic proposition is registered through the twa::register_ap() method, the bdd_dict is queried. As an example, the following two automata share their bdd_dict, and although they do not declare their atomic propositions in the same order, they get compatible variable numbers. #include <spot/twa/twagraph.hh> int main() { spot::bdd_dict_ptr dict = spot::make_bdd_dict(); spot::twa_graph_ptr aut1 = spot::make_twa_graph(dict); int ap1a = aut1->register_ap("a"); int ap1b = aut1->register_ap("b"); std::cout << "aut1: a=" << ap1a << " b=" << ap1b << '\n'; spot::twa_graph_ptr aut2 = spot::make_twa_graph(dict); int ap2c = aut2->register_ap("c"); int ap2b = aut2->register_ap("b"); int ap2a = aut2->register_ap("a"); std::cout << "aut2: a=" << ap2a << " b=" << ap2b << " c=" << ap2c << '\n'; } aut1: a=0 b=1 aut2: a=0 b=1 c=2 Contrast the above result with the following example, where the two automata use different bdd_dict: #include <spot/twa/twagraph.hh> int main() { spot::bdd_dict_ptr dict1 = spot::make_bdd_dict(); spot::twa_graph_ptr aut1 = spot::make_twa_graph(dict1); int ap1a = aut1->register_ap("a"); int ap1b = aut1->register_ap("b"); std::cout << "aut1: a=" << ap1a << " b=" << ap1b << '\n'; spot::bdd_dict_ptr dict2 = spot::make_bdd_dict(); spot::twa_graph_ptr aut2 = spot::make_twa_graph(dict2); int ap2c = aut2->register_ap("c"); int ap2b = aut2->register_ap("b"); int ap2a = aut2->register_ap("a"); std::cout << "aut2: a=" << ap2a << " b=" << ap2b << " c=" << ap2c << '\n'; } aut1: a=0 b=1 aut2: a=2 b=1 c=0 For this reason, operations like spot::product(aut1, aut2) will require that aut1->get_dict() == aut2->get_dict(). In Python, many functions that would take an explicit bdd_dict in C++ will default to some global bdd_dict instead. So we can do: import spot aut1 = spot.make_twa_graph() ap1a = aut1.register_ap("a") ap1b = aut1.register_ap("b") print("aut1: a={} b={}".format(ap1a, ap1b)) aut2 = spot.make_twa_graph() ap2c = aut2.register_ap("c") ap2b = aut2.register_ap("b") ap2a = aut2.register_ap("a") print("aut1: a={} b={} c={}".format(ap2a, ap2b, ap2c)) aut1: a=0 b=1 aut1: a=0 b=1 c=2 In that case we did not mention any bdd_dict, but there is one that is implicitly used in both cases. Similarly, when we call spot.translate() the same global bdd_dict is used by default. What really confuses people, is that the association between an atomic proposition (a, b, …) and a BDD variable (0, 1, …) will only be held by the bdd_dict for the lifetime of the objects (here the automata) that registered this association to the bdd_dict. Here is a new C++ example where we use the bdd_dict::dump() method to display the contents of the bdd_dict (this method is only meant for debugging, please do not rely on its output). #include <spot/twa/twagraph.hh> int main() { spot::bdd_dict_ptr dict = spot::make_bdd_dict(); spot::twa_graph_ptr aut1 = spot::make_twa_graph(dict); int ap1a = aut1->register_ap("a"); int ap1b = aut1->register_ap("b"); std::cout << "aut1@" << aut1 << ": a=" << ap1a << " b=" << ap1b << '\n'; dict->dump(std::cout) << "---\n"; spot::twa_graph_ptr aut2 = spot::make_twa_graph(dict); int ap2c = aut2->register_ap("c"); int ap2b = aut2->register_ap("b"); std::cout << "aut2@" << aut2 << ": b=" << ap2b << " c=" << ap2c << '\n'; dict->dump(std::cout) << "---\n"; aut1 = nullptr; std::cout << "aut1 destroyed\n"; dict->dump(std::cout) << "---\n"; aut2 = nullptr; std::cout << "aut2 destroyed\n"; dict->dump(std::cout); } aut1@0x55851d4af400: a=0 b=1 Variable Map: 0 Var[a] x1 { 0x55851d4af400 } 1 Var[b] x1 { 0x55851d4af400 } Anonymous lists: [0] Free list: --- aut2@0x55851d4b0990: b=1 c=2 Variable Map: 0 Var[a] x1 { 0x55851d4af400 } 1 Var[b] x2 { 0x55851d4af400 0x55851d4b0990 } 2 Var[c] x1 { 0x55851d4b0990 } Anonymous lists: [0] Free list: --- aut1 destroyed Variable Map: 0 Free 1 Var[b] x1 { 0x55851d4b0990 } 2 Var[c] x1 { 0x55851d4b0990 } Anonymous lists: [0] Free list: (0, 1) --- aut2 destroyed Variable Map: 0 Free 1 Free 2 Free Anonymous lists: [0] Free list: (0, 3) For each BDD variable registered to the bdd_dict, we have one line that gives: the variable number, its meaning (e.g. Var[b]), its registration count (x2), and a list of pointers to the objects that registered the association. Every time twa::register_ap() is called, it calls a similar function in the bdd_dict to check for an existing association or register a new one. When aut1 is deleted, it unregisters all its variables, causing variable 0 to become free. The free list is actually a list of pairs representing ranges of free variables that can be reassigned by the BDD dict when needed. (The anonymous list serves when anonymous BDD variables are used.) Such a low-level registration is usually handled by the following interface: // return a BDD variable number for f int bdd_dict::register_proposition(formula f, const void* for_me); // release the BDD variable void bdd_dict::unregister_variable(int var, const void* me); // release all BDD variables registered by me void bdd_dict::unregister_all_my_variables(const void* me); // register the same variables as another object void bdd_dict::register_all_variables_of(const void* from_other, const void* for_me); The last function may be bit tricky to use, because we need to be sure that another object has registered some variables. You can rely on the fact that each twa automaton register its variables this way. Now, in most cases, there is no need to worry about the bdd_dict. Automata will register and unregister variables as needed. Other objects like spot::twa_word will do the same. There are at least two situations where one may need to deal with the bdd_dict: 1. One case is when creating a derived object that store some BDD representing a formula over atomic proposition (but without reference to their original automaton). 2. Another case is when more BDD variables (maybe unrelated to atomic propositions) are needed. These two cases are discussed in the next sections. ## Prolonging the association between a BDD variable and an atomic proposition Let us implement an object representing a set of transitions of the form $$(src, guard, dest)$$. This can for instance be used to store all transition that belong to a certain acceptance set. import spot class trans_set: def __init__(self, dict): self.set = set() self.dict = dict def str_trans(self, src, guard, dst): f = spot.bdd_format_formula(self.dict, guard) return "({},{},{})".format(src, f, dst) def __str__(self): return '{' + ",".join([ self.str_trans(*t) for t in self.set ]) + '}' def accepting_set(aut, num): ts = trans_set(aut.get_dict()) for e in aut.edges(): if e.acc.has(num): return ts The above code has two definitions. 1. The trans_set class is a set of transitions that can be printed. It stores a bdd_dict so that it can print the guard of the transition. 2. The accepting_set function iterates over an automaton, and saves all transitions that belong to a given acceptance set number. For instance we can now translate an automaton, compute its acceptance set 0, and print it as follows: aut = spot.translate('GF(a <-> XXa)') ts = accepting_set(aut, 0) print(ts) {(2,!a,2),(0,a,3),(1,a,1),(3,!a,0)} The code of trans_set is in fact bogus. The problem is that it assumes the association between the atomic propositions and the BDD variable is still available when the str_trans method is called. However, that might not be the case. The following call sequence demonstrates the problem: try: ts = accepting_set(spot.translate('GF(a <-> XXa)'), 0) print(ts) except RuntimeError as e: print("ERROR:", e) ERROR: bdd_to_formula() was passed a bdd with a variable that is not in the dictionary In this case, the temporary automaton constructed by spot.translate() and passed to the accepting_set() function is destroyed right after the ts object has been constructed. When the automaton is destroyed, it removes all its associations from the bdd_dict. This means that before the print(ts) the dictionary that was used by the automaton, and that is still stored in the ts objects is now empty: calling bdd_format_formula() raises an exception. This can be fixed in a couple of ways. The easy way is to store the automaton inside the trans_set object, to ensure that it will live at least as long as the trans_set object. But maybe the automaton is too big and we really want to get rid of it? In this case trans_set should tell the bdd_dict that it want to retain the associations. The easiest way in this case is to call the register_all_variables_of() method, because we know that each automaton registers its variables. import spot class trans_set: def __init__(self, aut): self.set = set() self.dict = aut.get_dict() self.dict.register_all_variables_of(aut, self) def __del__(self): self.dict.unregister_all_my_variables(self) def str_trans(self, src, guard, dest): f = spot.bdd_format_formula(self.dict, guard) return "({},{},{})".format(src, f, dest) def __str__(self): return '{' + ",".join([ self.str_trans(*t) for t in self.set ]) + '}' def accepting_set(aut, num): ts = trans_set(aut) for e in aut.edges(): if e.acc.has(num): return ts try: ts = accepting_set(spot.translate('GF(a <-> XXa)'), 0) print(ts) except RuntimeError as e: print("ERROR:", e) {(2,!a,2),(0,a,3),(1,a,1),(3,!a,0)} Notice that we have also added a destructor to trans_set to unregister all the variables. ## Anonymous BDD variables Another scenario where working with a bdd_dict is needed is when one needs to allocate anonymous BDD variables. These are variables that are not attached to any atomic proposition, and that can be used by one algorithm privately. If multiple algorithms (or objects) register anonymous variables, the bdd_dict will reuse anonymous variables allocated to other algorithms. One can allocate multiple anonymous variables with the following bdd_dict method: int bdd_dict::register_anonymous_variables(int n, const void* for_me); A range of n variables will be allocated starting at the returned index. For instance, let's say the our trans_set should now store a symbolic representation of a transition relation. For simplicity we assume we just want to store set of pairs (src,dst): each pair will be a conjunction $$v_{src}\land v'_{dst}$$ between two BDD variables taken from two ranges ($$v_i$$ representing a source state $$i$$ and $$v'i$$ representing a destination state $$i$$), and the entire set will be a disjunction of all these pairs. If the automaton has $$n$$ states, we want to allocate $$2n$$ BDD variables for this purpose. We call these variables anonymous because their meaning is unknown the the bdd_dict. import spot from buddy import * class trans_set: def __init__(self, aut): self.dict = d = aut.get_dict() self.num_states = n = aut.num_states() self.anonbase = b = d.register_anonymous_variables(2*n, self) s = bddfalse for e in aut.edges(): s |= self.src(e.src) & self.dst(e.dst) self.rel = s def src(self, n): return bdd_ithvar(self.anonbase + n) def dst(self, n): return bdd_ithvar(self.anonbase + n + self.num_states) def __del__(self): self.dict.unregister_all_my_variables(self) def __str__(self): isop = spot.minato_isop(self.rel) i = isop.next() res = [] while i != bddfalse: s = bdd_var(i) - self.anonbase d = bdd_var(bdd_high(i)) - self.anonbase - self.num_states res.append((s, d)) i = isop.next() return str(res) ts = trans_set(spot.translate('GF(a <-> XXa)')) print(ts) [(0, 2), (0, 3), (1, 0), (1, 1), (2, 2), (2, 3), (3, 0), (3, 1)]
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.33242055773735046, "perplexity": 7406.207386471749}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-49/segments/1669446710719.4/warc/CC-MAIN-20221130024541-20221130054541-00068.warc.gz"}
https://matheducators.stackexchange.com/questions/2246/practical-experience-with-teaching-differentials-in-freshman-calc?noredirect=1
# Practical experience with teaching differentials in freshman calc? There is a well known essay by Dray and Manogue which argues that differentials should be brought back into freshman calculus, and that we shouldn't worry too much about choosing a specific way of formalizing them, or about giving students a formal answer to the question of "what are differentials?" I find myself in almost complete agreement with Dray and Manogue, and I feel especially that differentials are the natural way to approach related rates and implicit differentiation. Since differentials are used universally in the sciences and engineering, I think freshman calc students should be exposed to them. I would like to throw in just a taste of differentials when doing related rates and implicit differentiation, without going whole hog and using them throughout the course. But I will admit to quite a bit of apprehension about actually going ahead with such an approach. I suspect that, for example, I would get some push-back from colleagues who believe that differentials are inherently inconsistent; that students would go to tutors for help, and that the tutors wouldn't understand the approach; and that the kind of casualness about foundational aspects advocated by Dray and Manogue would be interpreted as carelessness or incompetence on my part. Has anyone implemented anything like what Dray and Manogue advocate, and if so, what were their practical experiences like? Related: A calculus book that uses differentials? [I was surprised to find that Keisler's treatment of implicit differentiation looks exactly like a standard one, without differentials or infinitesimals.] • I for one was thoroughly confused by the (informal) infinitesimals rampant in engineering. .. May 11 '14 at 0:49 • Agreed, I always was confused by them too. May 11 '14 at 2:17 • A good question. But how to define "differential" in an intellectually honest (i.e. rigorous) way? May 12 '14 at 6:58 • This is all very interesting! I wrote my Bachelor's Thesis about Leibniz's development of differentials because my pre-university teacher had promised to tell us a little about what they meant, but he never did. BTW I am Danish. We also never defined limits with $\varepsilon,\delta$ before I got to university level. It was all $\Delta x$ approaches this and that. Very disturbing, I think! May 12 '14 at 7:47 • While I'm all in favor of teaching calculus with differentials, I don't agree with everything Dray and Manogue say. In particular, it seems to me that in addition to their many other uses, differentials are precisely suited for linear approximation, and that describing their role in that area is a nice complement to their use in the chain rule, as mentioned in David Butler's answer. Jul 3 '14 at 5:17 There is a hybrid approach which is possible: 1. teach $\frac{df}{dx}$ as traditionally is done. Motivate geometrically by secant lines and physically my instantaneous velocity. Emphasize that $x \mapsto \frac{df}{dx}$ is a new function defined pointwise by the derivative. 2. introduce differentials as they naturally appear in related rates problems. For example, if $A = lw$ and $l$ and $w$ are both functions of time then the chain-rule and product-rule for functions of time naturally yield: $$\frac{dA}{dt} = \frac{dl}{dt}w+l\frac{dw}{dt} \qquad (\ \star \ ) .$$ As a good approximation for small durations of time $\triangle t$ we have $\frac{dA}{dt}\triangle t = \triangle A$. This approximation can be explicitly understood as it connects with the limit definition given earlier and I try to use $\triangle t$ to denote a finite change in time. Generally, $\triangle f$ denotes some finite change. On the other hand, if all the variables involved are time-dependent and we consider a process where the duration of time $\triangle t$ is very small then I say as a heuristic we can think of $\triangle t \rightarrow dt$. Moreover, we may multiply by $dt$ to obtain the relation between the infinitesimal changes in $A,l,w$ for my token example: $$dA = wdl+ldw$$ My interpretation for them goes something like this: when we write $dA = wdl+ldw$ we intend to approximate the change in $A$ as it related to the change and values of $l,w$. This relation is a short-hand for the related-rates equation $\star$. Then, later in integration applications, I advocate the method once more. I call it the infinitesimal method. I tell them it is a notational convenience which is justified by more cumbersome expressions which make problems harder to work. Yes, we could set up an explicit Riemann sum for area, volume or work problems but it is far more efficient to calculate formally $dA,dV$ or $dW$ and express each in terms of one variable which parametrizes the situation. I include in my notes discussions of how we go back and forth from the infinitesimal to the currently-traditional function-based framework. The technique of $u$-substitution is far easier to grasp in the formal manipulations of the method of differentials. However, if you want to prove anything, the method of differentials can put you in a bad place. That said, I think there is room for both approaches. The question is what is the task at hand? If the task is foundations and geometric intuition then I tend to think differentials are not so helpful. However, if the task is problem-solving, especially involving integration as a continuous sum, then differentials are quite appropriate. The students can understand them as a convenient notation for a limit-based formulation. There is no need to choose, we can have the best of both worlds. A faculty member in my department is a former student of Dray's and 2-3 years ago showed me the differentials approach. He and I both use it quite extensively in our first semester calculus courses. Here's the pros and cons (in no particular order) and comments on them. Pro: 1) It's generally awesome. Students in my class struggle a lot less with chain rule, implicit differentiation, related rates, optimization, u-substitution, and integration by parts. (I could go on about how awesome it is, but that's not part of the original question.) In fact, the students that struggle the most are the ones who are retaking it (either again in college or just because they took it in high school) and won't embrace the differentials method. They usually end up making what I assume are the same mistakes they made the first time they took the course. 2) In Calculus II, it makes even more sense. Integration techniques make more sense because $dx$ or $du$ are not just marking the end of the integral, they actually mean something. For the geometrical computations you can talk generally and then have students apply it to the given situation. For example, instead of two formulas for volumes by cylindrical shells, I give them one: $\int 2\pi r h dr$. Then it's up to them to figure out what $r$, $h$, and $dr$ represent geometrically. Likewise, it makes introducing parametric and polar curves easier since students already are comfortable with equations that aren't $y=f(x)$. Also, it greatly opens up the applications of integration section. Work = (Force)*(distance) now becomes $dW = s \cdot dF$ (or $dW = F \cdot ds$ if appropriate, "What is the small quantity?", "Which are you assuming to be constant on your slice?"), which is really how advanced physics/engineering classes think about such relations. Pros/Cons (depending on how you look at it): 1) It makes calculus extremely algebraic to the point that you really don't have to understand much of the concepts going on. As long as you know what algebraic fact you need/are using (e.g. solving for $dA/dt$ or setting $dA=0$), it's just algebra. Of course, a lot of students are weak in their algebra skills... 2) Transition to Calculus II: If they have me again, no big deal. If they have someone else, I would guess they might stutter a bit in the beginning. But I usually use $d/dx$ at least once or twice around the end of the course and only once in the past 3 years have I had a student ask me what that meant. When I explained it meant, "Take a d, then divide by dx" they gave an "oh, duh" response. 3) Students mix up $3x^2$ and $3x^2dx$, i.e. they sometimes will write a function when they mean a differential or vice versa. If they write a differential and mean a function, it usually isn't too big a deal, since if they evaluate the function they then drop the $dx$. If they write a function and needed a differential, then the error is a problem because they typically need to solve for a ratio of differentials or something and they don't have the right pieces to do so. All in all, I see this as a small trade-off. 4) Students will get confused when they look at the book. I had a lot of problems when I first started using differentials because students would look to the book to do the homework. Now I have notes that are NOT my examples from lecture, but the examples in the book re-worked using differentials. I've had far less problems since. I'm hoping in an upcoming sabbatical to rewrite an open-source Calc book to put it into a differentials flavor... 5) Colleagues' opinions. Well, as I mentioned above, maybe I'm lucky that there's two of us out of the 6-7 Calc I instructors we have at our university. The others either think it's fine (as long as they don't have to do it) or are interested in using bits and pieces (but won't commit to going all out). Nobody challenges the rigor of it because, honestly, given our student population, there's little rigor in our Calc I anyway. 6) Tutors. Usually I warn the tutors who haven't had me what they might see when students come in. Our tutors are typically junior or senior math majors. When I show them the differential approach they take to it very quickly (after only a couple of examples) and usually wonder why everybody doesn't do it that way. So that's my (more than) 2 cents. I could go on and on, but I think that answers most of your questions. • Nice answer. I'd love to see your open-source book when it's done. – user507 May 12 '14 at 15:00 • That calculations make mathematics "more accessible" has long been recognized, at least since Descartes and certainly by Newton and Leibniz. Yet, limits have now become, even though they cannot be calculated---looking for a Skolem function $\delta = S(\epsilon)$ hardly qualifies, the rock on which the calculus rests ... and beginners founder. Is there really an open-source book in the making? Apr 6 '15 at 23:49 A great quote from Dray and Manogue: . . . many mathematicians think in terms of infinitesimal quantities: apparently, however, real mathematicians would never allow themselves to write down such thinking, at least not in front of the children. —Bill McCallum [16] I think in this type of discussion it is important to separate carefully between two issues: (a) what is the theoretical justification for using differentials/infinitesimals in the classroom, and (b) what is the actual situation on the ground, i.e. are teachers successful in imparting understanding of basic notions of the calculus such as slopes/derivatives, areas/integrals, seeking minima/maxima, basic theorems such as the extreme value theorem, etc. using such an approach? As far as (b) is concerned, I can report based on classroom experience with Keisler' book last term that the approach is successful. Not only was it successful but the students petitioned their department to make sure the class is taught exactly the same way next year. The true infinitesimal calculus course has now been taught for 4 years to over 400 freshmen, with testable success; see this article. As far as (a) is concerned, of course we know today that infinitesimals are consistent. It is even more important to make the point that traditional calculus courses do not typically establish the foundations of the subject such as constructing the real numbers. Some foundational material has to be taken as given, and is taken up again in more advanced analysis or algebra courses. Similarly, in the approach using calculus certain facts need to be taken as given and Keisler does a marvellous job of motivating whatever facts that are taken as given in this approach. It is worth mentioning the work by Kathlene Sullivan from the mid 1970s who performed a controlled experiment in the Chicago area, with some groups following the traditional approach, and others using Keisler's book. The groups using infinitesimals came away from the course with a slightly better understanding of the basic calculus notions, according to the conclusions of the study. • You make good points, but it's also important not to give the impression that differentials can only be done rigorously using infinitesimals. Jul 3 '14 at 4:33 • @MikeShulman, there are other treatments of differentials of course but the only pedagogically useful one at the level of freshman calculus is via infinitesimals. Jul 31 '14 at 14:26 • that's a very bold statement! Have you any proof? Jul 31 '14 at 22:42 • @MikeShulman, differential forms are too advanced for freshman calculus. As far as the approach with first calculating the derivative and then assigning arbitrary real values to $dx$ and $dy$ so as to fit the equation, it seems tautological and hardly explains anything compared to an explanation provided by an infinitesimal microscope figure. Aug 3 '14 at 7:30 • Yes, a different way to express my suggestion would be to say "differential forms are not necessarily too advanced for freshman calculus, since you can define them explicitly without needing to drag in words like 'multilinear map'". Aug 6 '14 at 19:26 I had the chance to read Dray & Minogue's online stuff shortly before I was first assigned to teach a Calculus course, an Applied Calculus course intended for biology and economics majors. As it was a terminal math course, I felt justified in taking an unusual approach; as it was applied, I didn't worry about rigour (as long as I knew that it could be done). So I used differentials. It seemed to go all right. That was three years ago. Since then, I've also taught the regular three-semester-equivalent Calculus series for physical science/engineering and mathematics majors. Sometimes I teach the whole sequence in a row, sometimes I only teach Caclulus 3 (multivariable). I have used differentials every time. I particularly love it for multivariable calculus, where I also introduce differential forms. It is generally not too hard to link this to what's in the book (which is chosen at the departmental level). After all, differentials (and differential forms in multivariable integrals) appear in the text; it's just that they only appear in certain combinations. So I can pick out a subexpression from the expressions in the book and explain that it has a meaning in its own right. Sometimes I write some new exercises, but mostly I just show them another way to do the exercises in the book. Sometimes they do it my way, sometimes the book's way; it's all good. In the applied course, I do differentials before derivatives, which I do before limits. We're not pretending to define things rigorously, so there is no harm in this, and this seems to be in order of increasing difficulty. I actually introduce rigorous definitions (even ones that are not in the textbook), but at the end, in the context of approximation methods (which is actually how they first appeared historically); I mention that such-and-such actually provides a rigorous definition, which they take as one of my remarks important for their well-rounded liberal education that won't be on the test, and we move on. In the regular course, I feel obligated to give the rigorous definitions a more prominent place, since they are explicitly part of the syllabus. This means an approach as in Aeryk's answer: limits, then derivatives, then differentials. I pretty much agree with everything that Aeryk has said, right down the line. But I will add that differentials are especially awesome in multivariable calculus, where I can introduce partial derivatives as in David Butler's answer. Often it is easier to work with the differentials directly, and never mind the individual partial derivatives that appear within them. I must caution against conflating different kinds of differentials and infinitesimals. In particular, Dray & Minogue's approach is NOT the same as Keisler's approach using nonstandard analysis. (Although a nice thing about Keisler's book is that it allows you to do derivatives before limits, rigorously even.) Dray & Minogue's (and hence my) differentials are the linear differentials from differential geometry (and in differential forms), not the nonstandard hyperreal numbers from nonstandard analysis (nor the nilpotent infinitesimals from synthetic differential geometry, for that matter). To distinguish these, consider: if ‘d’ indicates a finitesimal change, then dy/dx = f'(x) is an approximation; if it indicates a nonstandard infinitesimal, then dy/dx = f'(x) is an adequality (equality of standard parts); if it indicates a linear differential, then dy/dx = f'(x) is an equality, period. (All on the assumption that y = f(x), of course.) I also want to stress the triviality of the Chain Rule with differentials. I don't mean that you can prove it by cancelling du in dy/dx = dy/du du/dx; it is a real theorem that requires a real proof. (I once read a review of Keisler's book that praised Keisler for being able to prove the Chain Rule by a trivial cancellation; I can only assume that the reviewer did not read that part of the book, for Keisler himself was under no such illusions.) But when working with differentials, it becomes a theoretical result, not a tool for practical calculation. The equation dy/dx = dy/du du/dx is a triviality, but it is not the Chain Rule; or rather, it is the Chain Rule only if you change the meaning of the symbols from one place to another, an unfair trick to play on students. The real Chain Rule is d(f(u)) = f'(u) du. It tells you how, if you can differentiate [take the derivative of] a function, you can apply that function to any differentiable expression and differentiate [take the differential of] the resulting expression. An example will show the power of this approach. Suppose that you establish, say by the usual argument involving sum-angle formulas and special limits, that the derivative of the sin function is the cosine function. Then you apply the Chain Rule —once!— to conclude that d(sin u) = cos u du for any differentiable expression u; this fact is called the Sine Rule. Now when faced with such expressions as sin(3x²), sin(5x - sin x), sin(2x + 3y), etc, you do NOT use the Chain Rule; you use the Sine Rule. So in the end, instead of having a rule for each algebraic operation, a derivative for each special function, and a Chain Rule that requires you to analyse expressions as composites, you have a rule for each algebraic operation and a rule for each special function, and every differentiation is done by working your way from the outside in, applying the appropriate rule to whatever operation comes next in the expression as it is written down. (This is at least part of what Aeryk means by saying that Calculus becomes algebraic.) The Chain Rule is only needed explicitly if you are dealing with a new or unknown function. As you can see with sin(2x + 3y) above, differentials work seamlessly with any number of variables. Students have done Algebra with multiple variables, and they can just as easily do Calculus with multiple variables. Some things, such as optimization, graphing, and integration, are legitimately more complicated with multiple variables, but the basic operation of differentiating [taking the differential of] an expression is not. This is why related rates and implicit differentiation comes so smoothly, but it also means that you can talk about partial derivatives right away too. I make sure to do this, even though it is again an aside that's not on the text, because Calculus 3 is not required for everybody who is going to use partial derivatives, after all. You can see material for my courses at http://tobybartels.name/courses/. Generally the later material is more polished. All that you really need to look at are the notes at the bottom of each course's main page. You will need a DjVu reader to read some of the older stuff (which has been converted to this smaller file format only). • You make a good point about avoiding the conflation of differentials in the sense of abstract linearization vs. differentials as numbers in the extended number systems ala Keisler etc. When I think about differentials, I am open to the concept of them as "numbers" and I use that as a heuristic, but I always go back to limits, real numbers, and later the differential or differential forms to think about proofs of theorems. To me, the differential notation is a fortunate accident which is unreasonably sucessful in view of the tapestry of theorems it encodes... with a few isolated failures... Apr 22 '17 at 5:18 • So, while I acknowledge the possibility of building calculus without limits, I don't actually make a practice of it. In my thinking, ultimately, calculus may be thought of without regard to its foundational nuts and bolts. Much like the situation with other concrete models we provide. Complex numbers can be viewed as quotients of polynomials, matrices or vectors in the plane. I suspect it's best to say it "is" none of these things. Rather, complex numbers are objects of the form $a+bi$ where $i^2=-1$. Likewise, perhaps we should think harder about what calculus "is". Just an idea... Apr 22 '17 at 5:22 • Mon chapeau for mentioning adequality! Apr 25 '17 at 9:58 I think the best time to really claim that differentials make sense is during solids of revolution. Trying to convince students that $\int f(x) \textrm{dx}$ is the sum of infinitely many rectangles only works for the top students. The students don't actually use this knowledge when solving problems this early in calculus, so I think it is best skipped at that point. However, drawing a thin cylinder and claiming that its volume is $\pi r^2 \textrm{dx}$ when oriented one way and $\pi r^2 \textrm{dy}$ when oriented the other way is golden. Shell method? The thickness of the shell is "a little bit of x" (dx) when oriented one way, and dy the other way. The students get to practice using this while doing problems and so this is (in my opinion) the right time for the concept to be stressed. • Arc length is the integral of ds, Work is the integral of F dx, etc. The applications of integration chapter is the right place for this to happen. Jul 4 '14 at 14:34 I run the Maths Learning Centre at my University and I came up with the idea of using differentials on the spot when a student asked me to explain how the chain rule works. The problem is that the chain rule just doesn't relate directly to slopes drawn on a graph, so I needed to come up with a different approach. Following the tendency of the Economics lecturers to use differentials I started there. Later in a revision seminar for economics students, I used it as a way to unify their thinking about ordinary and partial differentiation. I'm not using pure differentials as described in the article but a bit of a mixture. I've been saying that we are looking for a way to describe how big dy is relative to dx. So I write "dy = (?)*dx". Then I say that the name for the ? is the derivative and it's represented by "dy/dx". Later, with partial derivatives (say with z dependent on x and y) we have dz = (?) dx + (??) dy and the name for (?) is $\partial z/\partial x$ and the name for (??) is $\partial z / \partial y$. I did make it clear that dy/dx is ALSO the formula for a function in its own right, which is where I introduce the f' notation, saying it's another function related to f. Finally, this is all connected to a pictorial approach that is not the graph of the function. I draw a horizontal number line for x and a separate horizontal number line for y. Then I talk about how when you start at a point on the x-line, the function produces a matching point on the y-line. I draw a few pairs of matching points. Then I pick one of them and draw a little arrow to represent dx. Then I draw a matching little (but slightly bigger) arrow for dy on the second line. And I point to these two arrows and discuss how big one is relative to the other. For the two-variable function, I draw the x-line and the y-line next to each other, and below in the middle I draw the z-line. Then the dx arrow and the dy arrow each produce a separate arrow on the z-line, which are arranged head-to-tail to make the whole dz arrow. The students I have used this with one-on-one have seemed to really like this, especially for the two-variable case. In the future I would like to actually make a table of x's, y's, dx's and dy's so that they can see how the relationship between dx and dy can be different for different x's and therefore be a formula rather than a single number. • The chain rule does relate directly to slopes drawn on the graph. What else could it relate to? Oct 2 '14 at 20:26 • Can you show me please? I would really like to see how the three slopes are geometrically related on a graph, because it really is not obvious to me. Oct 2 '14 at 21:49 • To clarify: I have seen descriptions where you have the outside function graphed, and the composed function graphed, and you see how an adjustment of the x-axis described by the inside function changes the slope of the graph. What's missing is a graph with the slope of the inside function. This is what I mean by it not directly relating to slopes on a graph. One of them seems to be missing! Oct 2 '14 at 22:00 • I cannot produce a picture currently, but I will describe one. We attempt to determine the derivative of $(g \circ f)$ at $x=a$. To do so introduce a 3 dimensional coordinate system $x,y,z$. Graph $f$ in the $xy$ plane, $g$ in the $yz$ plane, and $(g \circ f)$ in the $xz$ plane. Construct the graph of $g \circ f$ by first bouncing from $x$ to $f(x)$, then from $f(x)$ to $g(f(x))$. We want to see what happens at $a$ to the slopes. So increment $a$ by a small amount $\Delta x$. First $f$ stretches this to $f'(a)\Delta x$. Then $g$ stretches that to $g'(f(a))f'(a)\Delta x$. Oct 2 '14 at 22:06 • Very nice! But it's still not your ordinary graph is it? It's still at least one step away from your ordinary slope as students usually think of it. That's what I mean by "not directly related". Oct 2 '14 at 22:40
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8626706004142761, "perplexity": 517.6106533975553}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-43/segments/1634323585265.67/warc/CC-MAIN-20211019105138-20211019135138-00445.warc.gz"}
http://nrich.maths.org/1011/note
### Exploring Wild & Wonderful Number Patterns EWWNP means Exploring Wild and Wonderful Number Patterns Created by Yourself! Investigate what happens if we create number patterns using some simple rules. ### I'm Eight Find a great variety of ways of asking questions which make 8. ### Dice and Spinner Numbers If you had any number of ordinary dice, what are the possible ways of making their totals 6? What would the product of the dice be each time? # Abundant Numbers ## Abundant Numbers To find the factors of a number, you have to find all the pairs of numbers that multiply together to give that number. The factors of $48$ are: $1$ and $48$ $2$ and $24$ $3$ and $16$ $4$ and $12$ $6$ and $8$ If we leave out the number we started with, $48$, and add all the other factors, we get $76$: $1 + 2 + 3 + 4 + 6 + 8 + 12 + 16 + 24 = 76$ So .... $48$ is called an abundant number because it is less than the sum of its factors (without itself). ($48$ is less than $76$.) See if you can find some more abundant numbers! ### Why do this problem? This activity helps to reinforce the ideas surrounding factors. It could be used to help pupils learn to pursue calculations for a longer period of time and you could decide to focus on working systematically. It offers a lot of engaging arithmetic work from a bvery briefly described starting point. Systematic recording of results and conclusions is helpful in tackling this problem. ### Possible approach Introduce the idea of abundant numbers using the problem and then work with the whole class to explore a couple of other examples. You could try $12$ which has the factors $1$ and $12$, $2$ and $6$, $3$ and $4$. If you add together $1$, $2$, $3$, $4$ and $6$ you get $10$ which is less than $12$ so $12$ is not abundant. Then try $18$ which is abundant. There are plenty of other examples you could use and the children could be encouraged to make suggestions. Once they have the idea, they can explore on their own. ### Key questions What are the factors of...? Can you predict whether they will be abundant? How have you decided which numbers to choose? I see you seem to have a system for doing this, can you tell me about it? ### Possible extension Children could be encouraged to find all the abundant numbers below a certain target or to develop strategies for choosing numbers that may be abundant. ### Possible support A table square to $100$ may help to support some children in identifying multiples. They may need support in finding the pairs of factors by using cubes or counters to help them. They could be encouraged to try to find the factors of numbers to $20$ first.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.2555483877658844, "perplexity": 441.4176499381721}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2015-32/segments/1438042991019.80/warc/CC-MAIN-20150728002311-00175-ip-10-236-191-2.ec2.internal.warc.gz"}
https://link.springer.com/article/10.1007%2Fs00125-009-1547-9
Diabetologia , Volume 53, Issue 1, pp 144–152 # An empirical index of insulin sensitivity from short IVGTT: validation against the minimal model and glucose clamp indices in patients with different clinical characteristics • A. Tura • E. Succurro • L. Groop • G. Sesti • G. Pacini Article ## Abstract ### Aims/hypothesis Minimal model analysis for insulin sensitivity has been validated against the glucose clamp and is an accepted method for estimating insulin sensitivity from IVGTT. However minimal model analysis requires a 3 h test and relevant expertise to run the mathematical model. The aim of this study was to suggest a simple predictor of minimal model analysis index using only 1 h IVGTT. ### Methods We studied participants with different clinical characteristics who underwent 3 h regular (n = 336) or insulin-modified (n = 160) IVGTT, or 1 h IVGTT and euglycaemic–hyperinsulinaemic clamp (n = 247). Measures of insulin sensitivity were insulin sensitivity index estimated by minimal model analysis (SI) and the mean glucose infusion rate (clamp) (M). A calculated SI (CSI) predictor, $${\text{CS}}_{{\text{I}}} = {\text{ \alpha }} \times {K_{\text G} } \mathord{\left/ {\vphantom {{K_{G} } {{\left( {{\Delta {\text{AUC}}_{{{\text{INS}}}} } \mathord{\left/ {\vphantom {{\Delta {\text{AUC}}_{{{\text{INS}}}} } T}} \right. \kern-\nulldelimiterspace} T} \right)}}}} \right. \kern-\nulldelimiterspace} {{\left( {{\Delta {\text{AUC}}_{{{\text{INS}}}} } \mathord{\left/ {\vphantom {{\Delta {\text{AUC}}_{{{\text{INS}}}} } T}} \right. \kern-\nulldelimiterspace} T} \right)}}$$, was suggested, based on the calculation of the rate of glucose disappearance KG and the suprabasal AUC of insulin concentration ΔAUCINS over T = 40 min. For all the participants, α was assumed equal to the regression line slope between KG/(ΔAUCINS/T) and SI in control participants. ### Results CSI and SI showed high correlation (R2 = 0.68–0.96) and regression line slopes of approximately one in the majority of groups. CSI tended to overestimate SI in type 2 diabetic participants, but results were more reliable when CSI was computed with insulin-modified rather than regular IVGTT. CSI showed behaviours similar to SI as regards relationships with BMI, acute insulin response and sex. CSI showed good correlation with M (R2 = 0.82). ### Conclusions/interpretation A short test can achieve a good approximation of minimal model analysis and clamp insulin sensitivity. The importance of a method such as CSI is that it allows analysis of IVGTT datasets with samples limited to 1 h. ### Keywords Glucose tolerance Insulin action Insulin resistance One hour intravenous glucose tolerance test ### Abbreviations AIRG Acute insulin response to glucose AUCINS AUC of insulin concentration CSI Calculated SI IGMCL Impaired glucose metabolism (participants subjected to clamp) IGT Impaired glucose tolerance INSMOD Insulin-modified 3 h frequently sampled IVGTT M Mean glucose infusion rate (clamp) NGT Normal glucose tolerance NGTCL NGT participants subjected to clamp SI Insulin sensitivity index estimated by minimal model analysis ## Introduction Insulin sensitivity is paramount for characterising metabolic states. The glucose clamp is the experimental procedure yielding the gold standard measurement of this variable. Nonetheless, minimal model analysis of IVGTT data, i.e. insulin sensitivity index estimated by minimal model analysis (SI), is also widely used to assess insulin sensitivity [1, 2]. However, despite some efforts to develop automatic procedures and lower the need for user intervention [3], the minimal model procedure requires sophisticated computer programming and relevant expertise to run the mathematical model properly. Furthermore, reliable results based on minimal model require many plasma insulin and glucose samples over a time interval of at least 3 h after glucose injection. The aims of this study were: (1) to propose an index able to predict minimal model insulin sensitivity values based on direct calculations from easily measurable simple variables and not requiring complex mathematical models, while using IVGTT data limited to 1 h or less, as often happened before introduction of the minimal model [4]; and (2) to validate the new index against minimal model SI and the glucose clamp, by assessing its performance in several groups of participants with different degree of glucose tolerance and specific clinical characteristics. ## Methods Participants analysed in this study are presented in the following sections. All participants gave their consent to the investigations, which were approved by the Local Ethics Committees. ### Participants, 3 h regular IVGTT We analysed 336 participants partially studied in previous investigations [5, 6, 7, 8]. Of these, 114 were control participants with normal glucose tolerance (NGT), 128 had impaired glucose tolerance (IGT) (22 of whom also had impaired fasting glucose) and 22 had type 2 diabetes (Table 1). The type 2 diabetes patients (diabetes duration 6.2 ± 0.4 years) were diet-controlled; none of them were taking oral hypoglycaemic agents or insulin. We also analysed 52 participants with chronic renal disease from diabetic nephropathy, nine patients with hyperparathyroidism before and after parathyroidectomy, and 11 patients who previously had type 1 diabetes (prior to kidney–pancreas transplantation) (Table 1). All participants underwent a regular 3 h frequently sampled IVGTT [9]. Table 1 Main characteristics and insulin sensitivity in study groups Study groups per protocol Participant characteristics Insulin sensitivity n Age (years) BMI (kg/m2) Gb (mmol/l) SI (×10−4 min−1 [μU/ml]−1)a CSI (×10−4 min−1 [μU/ml]−1)a p value M (mg min−1 kg−1)b 3 h regular IVGTT Control 114 34.4 ± 1.6 23.6 ± 0.5 4.6 ± 0.05 5.55 ± 0.25 5.81 ± 0.28 0.22 IGT 128 42.8 ± 1.4 27.9 ± 0.6 4.7 ± 0.09 2.58 ± 0.17 2.68 ± 0.20 0.49 Type 2 diabetes 22 41.2 ± 4.8 23.8 ± 0.5 6.3 ± 0.20 2.31 ± 0.29 4.68 ± 0.69 0.0013 Renal disease 52 44.3 ± 2.9 25.7 ± 1.0 5.0 ± 0.08 4.71 ± 0.32 4.34 ± 0.33 0.20 Hyperparathyroidism, pre 9 66.0 ± 3.0 25.1 ± 2.5 5.2 ± 0.22 3.18 ± 0.53 3.87 ± 0.58 0.26 Hyperparathyroidism, post 9 66.0 ± 3.0 24.4 ± 2.3 5.2 ± 0.26 5.34 ± 0.67 6.66 ± 0.96 0.043 Former type 1 diabetes 11 40.0 ± 3.0 26.6 ± 2.0 5.2 ± 0.17 3.39 ± 0.63 2.70 ± 0.49 0.11 3 h INSMOD Type 2 diabetes INSMOD 160 51.2 ± 1.4 29.7 ± 0.4 10.3 ± 0.29 1.23 ± 0.08 1.32 ± 0.08 0.05 1 h IVGTT and clamp NGTCL 171 41.3 ± 1.0 27.4 ± 0.4 4.9 ± 0.05 5.87 ± 0.25 7.02 ± 0.23 IGMCL 55 46.1 ± 1.6 29.1 ± 0.7 5.4 ± 0.08 4.16 ± 0.39 6.08 ± 0.32 Type 2 diabetes clamp 21 52.4 ± 3.0 35.9 ± 3.0 6.7 ± 0.46 3.63 ± 0.54 4.22 ± 0.54 Values are mean ± SE aTo convert values for SI and CSI to SI units (× 10−4 min−1 [pmol/l]−1), multiply by 0.1667 bTo convert values for M to SI units (mmol min−1 kg−1), multiply by 0.005551 Gb, basal glucose ### Type 2 diabetic participants, 3 h insulin-modified IVGTT We analysed from previous studies [10, 11, 12] 160 type 2 diabetic participants who had undergone an insulin-modified, 3 h, frequently sampled IVGTT (INSMOD) with exogenous intravenous infusion of insulin (0.03 or 0.05 U/kg) at 20 min [9] (Table 1). Some of these participants were under pharmacological treatment, with gemfibrozil [10], sulfonylurea or biguanide preparations [11]. ### Participants, 1 h IVGTT and clamp We analysed 247 participants from the Botnia study [13], the EUGENE2 study [14] and another study [15]. All these participants underwent IVGTT (for at least 1 h) and 2 h euglycaemic–hyperinsulinaemic glucose clamp. Among participants undergoing the clamp, 171 had NGT (NGTCL), 55 had impaired glucose metabolism (IGMCL), i.e. either impaired fasting glucose or IGT or both, and 21 had type 2 diabetes (Table 1). Seven participants in the type 2 diabetes clamp group had severe obesity and subsequently underwent bariatric surgery (here we only report data before surgery). ### Calculation of insulin sensitivity In the participants with regular and INSMOD data, insulin sensitivity index was estimated by minimal model analysis (SI). In the participants with the clamp, insulin sensitivity was calculated as the mean glucose infusion rate (M) over the last 40 min of the test. For all participants, we calculated a surrogate index of SI, called calculated SI (CSI), with an expression similar to that originally proposed by Galvin et al. [16]. Justification of the difference between our approach and that of Galvin et al. [16] is discussed later. For the participants with regular IVGTT the expression for CSI was: $${\text{CS}}_{{\text{I}}} = {\text{ \alpha }}\frac{{K_{{\text{G}}} }}{{{\Delta {\text{AUC}}_{{{\text{INS1}}}} } \mathord{\left/ {\vphantom {{\Delta {\text{AUC}}_{{{\text{INS1}}}} } T}} \right. \kern-\nulldelimiterspace} T}}$$ (1) where α is a constant (scaling factor), KG is the rate of glucose disappearance (slope of log glucose), ΔAUCINS is the AUC of insulin concentration above basal value and T is the time interval between 10 and 50 min (=40 min) when KG and ΔAUCINS are computed. Initial time interval was not zero to avoid possible confounding effects due to mixing. The α constant was assumed equal to the slope of the regression line between the factor KG/(ΔAUCINS/T) and SI in the control group, i.e. α = 0.276. This value was used to calculate CSI in all the participants analysed in this study, including those undergoing INSMOD rather than regular IVGTT or clamp. For the participants with INSMOD the expression for CSI was: $${\text{CS}}_{{\text{I}}} = {\text{ \alpha }} \times {\left( {\frac{{{\text{Average}}{\left( {K_{{{\text{G}}1}} ,K_{{{\text{G}}2}} } \right)}}}{{{\text{Average}}{{\left( {\Delta {\text{AUC}}_{{{\text{INS1}}}} ,\Delta {\text{AUC}}_{{{\text{INS2}}}} } \right)}} \mathord{\left/ {\vphantom {{{\left( {\Delta {\text{AUC}}_{{{\text{INS1}}}} ,\Delta {\text{AUC}}_{{{\text{INS2}}}} } \right)}} T}} \right. \kern-\nulldelimiterspace} T}}} \right)}$$ (2) It is well known that the action of exogenous insulin on glucose disappearance is delayed [17], and hence we assumed a 5 min delay. Since insulin was injected at 20 min, KG1 and ΔAUCINS1 were computed between 10 and 25 min, whereas KG2 and ΔAUCINS2 were computed between 25 and 50 min. ### Statistical analysis Relationships between SI and CSI were investigated by linear regression analysis with no intercept. Difference between the mean value of SI and CSI in each of the different groups of participants was assessed through the paired t test. The same test was used to assess difference in insulin sensitivity in the hyperparathyroidism group before and after surgery. Difference in the mean value of each index among different groups was assessed through ANOVA. Similarly, we analysed the relationship between CSI and M by linear regression and used ANOVA to assess differences of both indices among different groups. Relationships between some variables were also investigated by accounting for measurement errors for both variables in the regression [18]. Normality of distributions was assessed before testing for possible differences in insulin sensitivity indices. In case of non-normal distributions, tests were performed on logarithmically transformed values (this applied to the majority of cases, except hyperparathyroidism and former type 1 diabetes groups). p < 0.05 was considered statistically significant. Values are reported as mean ± SE. ## Results ### Minimal model and CSI analyses of regular IVGTT Strong correlation between SI and CSI was found in the following groups: control (R2 = 0.89, p < 0.0001, slope = 1.00, 95% CI 0.93–1.07), IGT (R2 = 0.79, p < 0.0001, slope = 0.97, 95% CI 0.89–1.06), renal disease (R2 = 0.85, p < 0.0001, slope = 0.86, 95% CI 0.76-0.97), former type 1 diabetes patients (after kidney pancreas transplantation) (R2 = 0.89, p < 0.0001, slope = 0.75, 95% CI 0.57-0.93) and hyperparathyroidism (R2 = 0.83, p < 0.0001, slope = 1.09, 95% CI 0.68-1.49 before surgery; R2 = 0.96, p < 0.0001, slope = 1.24, 95% CI 1.02–1.46 after surgery) (Fig. 1). In the type 2 diabetes group the correlation between SI and CSI, though weaker than in the other groups, was still significant (R2 = 0.68, p < 0.0001), despite the fact that CSI overestimated SI (slope = 1.75, 95% CI 1.21-2.29). When the participants were considered all together, the correlation remained highly significant (R2 = 0.84, p < 0.0001, slope = 0.99, 95% CI 0.94-1.04). In each group, mean values of SI and CSI (Table 1) were not different except for the type 2 diabetes and hyperparathyroidism after surgery groups, which showed a slight difference as shown by p values (Table 1). Bland–Altman plot for all the participants (Fig. 2a) proved substantial equivalence between the two measurements. The absolute difference between SI and CSI in relation to the AUC of insulin in the time interval T (Fig. 2b) showed that only at low insulin levels did SI and CSI tend to diverge. ANOVA showed that SI was different between control and all the other groups (p < 0.03) except for the hyperparathyroidism after surgery group. Significant differences were also found in the renal disease vs IGT and type 2 diabetes groups (p < 0.0001), and in the hyperparathyroidism after surgery vs IGT and type 2 diabetes groups (p < 0.0006). Similar results were found for CSI (p value range: p < 0.0001 to p = 0.0313), except for comparisons of type 2 diabetes with the other groups. It is worth noting the difference in insulin sensitivity between hyperparathyroidism before and after surgery: as expected, SI was increased after surgery (p = 0.021) (Table 1) and similar results were found with CSI (p = 0.008). In the control group, we calculated the relationships between insulin sensitivity and acute insulin response to glucose (AIRG) (mean insulin value above basal in the 0 to 8 min period of the IVGTT). Both SI and CSI showed with AIRG a weak but significant nonlinear inverse relationship, which was better appreciated after performing linear regression analysis on logarithmically transformed values (R2 = 0.19, p = 0.0002 for SI; R2 = 0.09, p = 0.0009 for CSI) (Fig. 3). According to ordinary least-squares regression analysis, the relationship was not strictly hyperbolic, but it was similar with both indices. However, when the analysis was carried out through a regression method accounting for measurement errors in both variables, the relationship turned out to be hyperbolic, as the 95% CI for the slope included −1 for SI (slope: −1.33, 95% CI −2.08, −0.59) and CSI (slope: −1.25, 95% CI −2.15, −0.35). In all the participants, we also analysed insulin sensitivity with respect to BMI. As expected, SI showed an inverse relationship with BMI; in fact, after log-log transformation, a weak but significant linear regression was observed (R2 = 0.19, p < 0.0001), although the relationship was not hyperbolic (according to both regression methods). Similar results were found for CSI (R2 = 0.18, p < 0.0001). Participants were then classified as lean or overweight according to their BMI (threshold 25 kg/m2). Both SI and CSI showed significant differences in insulin sensitivity between the two groups (SI = 4.65 ± 0.32 × 10−4 min−1 [μU/ml]−1 lean; 3.09 ± 0.28 overweight; p = 0.0003; CSI = 5.03 ± 0.38 × 10−4 min−1 [μU/ml]−1 lean; 2.99 ± 0.28 overweight; p < 0.0001; to convert values for SI and CSI to SI units (× 10−4 min−1 [pmol/l]−1), multiply by 0.1667). We also studied possible differences in insulin sensitivity due to sex: neither SI nor CSI were different: SI = 3.87 ± 0.17 × 10−4 min−1 (μU/ml)−1 men; 4.09 ± 0.28 women; p > 0.4; CSI = 4.11 ± 0.20 × 10−4 min−1 (μU/ml)−1 men; 4.77 ± 0.33 women; p > 0.07. ### Minimal model and CSI analyses of insulin-modified IVGTT In the type 2 diabetes INSMOD group, SI and CSI showed strong significant correlation, with the slope of the regression line virtually equal to 1 (R2 = 0.85, p < 0.0001, slope = 0.96, 95% CI 0.89-1.02) (Fig. 4a). Bland–Altman plot showed that only a few samples were outside the limits for equivalence (Fig. 4b). The paired t test showed a borderline p value (Table 1). We also classified the participants as obese or non-obese. Since BMI was quite high on average (BMI = 29.7 ± 0.4 kg/m2), we assumed 27.5 as threshold between the two groups. As expected, SI was higher in the non-obese group, with similar results found for CSI (SI = 1.77 ± 0.18 × 10−4 min−1 [μU/ml]−1 non-obese; 0.94 ± 0.08 obese; p < 0.0001; CSI = 1.73 ± 0.17 × 10−4 min−1 [μU/ml]−1 non-obese; 1.10 ± 0.08 obese; p = 0.0002). Neither SI nor CSI were different between men and women (SI = 1.19 ± 0.11 × 10−4 min−1 [μU/ml]−1 men; 1.04 ± 0.14 women; p > 0.5; CSI = 1.30 ± 0.10 × 10−4 min−1 [μU/ml]−1 men; 1.03 ± 0.11 women; p > 0.2). ### Glucose clamp and CSI analyses In all the participants grouped together, M and CSI showed good correlation (R2 = 0.82, p < 0.0001) (Fig. 5a). When analysing the participants divided according to glucose tolerance, correlation remained significant (NGTCL: R2 = 0.84, p < 0.0001; IGMCL: R2 = 0.74, p < 0.0001; type 2 diabetes clamp: R2 = 0.81, p < 0.0001) (Fig. 5b). M (Table 1) was different in each group (p < 0.03). Similar differences (p < 0.002) were found with CSI (Table 1) except in the IGMCL and type 2 diabetes clamp groups, where statistical significance was not reached. M was higher in lean than in overweight participants (8.72 ± 0.33 and 5.58 ± 0.18 mg min−1 kg−1, p < 0.0001; to convert values for M to SI units [mmol min−1 kg−1], multiply by 0.005551), as was the case for CSI (7.56 ± 0.38 and 4.26 ± 0.20 × 10−4 min−1 [μU/ml]−1, p < 0.0001). As regards possible differences related to sex, neither M nor CSI showed any difference (p > 0.07). In a subgroup of participants, we corrected M for the steady-state insulin level, but results did not change significantly (not shown). It is worth noting that in the small group of type 2 diabetes clamp participants with severe obesity, 3 h IVGTT data were available, thus SI was computed. As expected, we found agreement between SI and M, with regression coefficient value (R2 = 0.63, p = 0.018) similar to those observed between CSI and M, as reported above. In this specific subgroup, CSI showed a very strong relationship with M (R2 = 0.95, p < 0.0001). ## Discussion The simple index of insulin sensitivity introduced and validated here (CSI) was revealed to be a good surrogate of that from the well accepted and widely used minimal model (SI). To our knowledge, only the study of Galvin et al. [16] suggested a simple index for the assessment of insulin sensitivity from IVGTT limited to 1 h. CSI reflects similar concepts, i.e. the quantification of glucose disappearance rate per changes of insulin, but it overcomes some limitations of that study. In fact, Galvin et al. [16] studied the correlation of their index with SI (and also with insulin sensitivity by the glucose clamp), but they did not seek to obtain indices really comparable, their units being different. In addition, they did not present any strategy to correct their index and make it comparable with SI derived from insulin-modified IVGTT. In contrast, CSI includes a time (T) factor (see Eq. 1) yielding the same units as SI and was adapted to be used also with the insulin-modified test (Eq. 2). Furthermore, in Galvin et al. [16], the slopes of the regression lines were far from one and different in every group. Moreover, only small groups of participants were studied (with no diabetic patients) and it was not shown whether their index has abilities, similar to SI, to discriminate between groups or clinical conditions with different degrees of insulin resistance. The Galvin index [16] was then used by Anderson et al. [19], but with essentially the same limitations, which probably prevented its diffusion. Prior to this study, we used calculations similar to those for CSI to compute a sensitivity index in mice [20], although not with exactly the same formula and without comparison with the clamp. After correcting our index with a factor derived from regression analysis of the control group (quite a large group, with a wide range of insulin sensitivity), several other groups of participants with different degrees of glucose tolerance and heterogeneous clinical characteristics were analysed. In the majority of groups, we found a good correlation between SI and CSI, and also CSI values similar to SI, as mirrored both by the slope of the regression lines, which were not (or only slightly) different from 1 (see 95% CI), and by the not significantly different mean values. The correction factor α included in the CSI expression was introduced to scale the values of our new index to those calculated with the minimal model. Thus, the interpretation of results obtained by CSI will be facilitated, given the previous wide experience with SI. This correction factor does not have a specific physiological meaning, similarly to the variables included in other empirical methods for the calculation of insulin sensitivity, such as HOMA-insulin resistance (IR) [21] or Stumvoll’s index [22]. The relevant aspect of the scaling operation was that the same value of the correction factor (α = 0.276) was proved to be appropriate in every group of participants (except type 2 diabetes, as discussed below). In fact, all the results were obtained by using the same correction factor in each group that underwent the regular IVGTT. The same α value was also proved correct in those groups of participants who underwent INSMOD (type 2 diabetes INSMOD) or the clamp (NGTCL, IGMCL, type 2 diabetes clamp). The comparison between SI and CSI was not completely satisfactory in type 2 diabetes (regular IVGTT). The fact that in situations of high insulin resistance CSI tended to overestimate SI is an important issue and should be discussed within the frame of basic questions, such as: how reliable is a low SI? This has been much debated among investigators using IVGTT [23, 24]. Thus, we acknowledge that, in situations of low insulin sensitivity, CSI may suffer from inaccuracy, but SI may also exhibit inaccuracy in those conditions [24, 25]. As regards our data, insulin levels in the type 2 diabetes group were usually low, but tended to remain higher than the fasting value: i.e. insulin levels did not return to the basal value during the whole 3 h IVGTT time interval. Thus, in the minimal model approach, the analysis of the last part of the IVGTT tended to decrease the SI value. Since the last part of the complete test is not accounted for by CSI, some discrepancy between the two indices may occur. On the other hand, the finding that in the majority of groups CSI behaves similarly to SI suggests that the information provided by the last part of the IVGTT is usually consistent with that provided by the first part, where CSI is calculated. Due to the unsatisfactory results in the type 2 diabetes group, we adapted the CSI expression to make it usable with data from the insulin-modified IVGTT as recommended in conditions of poor insulin response [26]. We analysed a large group of type 2 diabetic patients subjected to INSMOD where, as expected, CSI and SI showed low values of insulin sensitivity. They also exhibited a strong correlation with regression slope almost identical with 1, confirming that when dealing with low insulin sensitivity it is recommended to carry out the insulin-modified test even with the short 1 h protocol. We also analysed 208 insulin-modified IVGTT from 146 women with a history of gestational diabetes, who were non-diabetic at the time of examination [27]. We found strong relationship between SI and CSI, with R2 = 0.93 and slope of the regression almost equal to 1 (not shown). However, in non-diabetic participants the regular IVGTT has proven adequate for calculating CSI with sufficient accuracy; hence the insulin-modified protocol is not strictly necessary in these participants. It should be noted that other possible expressions were tested for the calculation of CSI with the insulin-modified IVGTT, such as the average between $${{\left( {{\text{ \alpha }} \times K_{{{\text{G1}}}} } \right)}} \mathord{\left/ {\vphantom {{{\left( {{\text{ \alpha }} \times K_{{{\text{G1}}}} } \right)}} {{\left( {{\Delta {\text{AUC}}_{{{\text{INS1}}}} } \mathord{\left/ {\vphantom {{\Delta {\text{AUC}}_{{{\text{INS1}}}} } T}} \right. \kern-\nulldelimiterspace} T1} \right)}}}} \right. \kern-\nulldelimiterspace} {{\left( {{\Delta {\text{AUC}}_{{{\text{INS1}}}} } \mathord{\left/ {\vphantom {{\Delta {\text{AUC}}_{{{\text{INS1}}}} } T}} \right. \kern-\nulldelimiterspace} T1} \right)}}$$ and $${{\left( {{\text{ \alpha }} \times K_{{{\text{G2}}}} } \right)}} \mathord{\left/ {\vphantom {{{\left( {{\text{ \alpha }} \times K_{{{\text{G2}}}} } \right)}} {{\left( {{\Delta {\text{AUC}}_{{{\text{INS2}}}} } \mathord{\left/ {\vphantom {{\Delta {\text{AUC}}_{{{\text{INS2}}}} } T}} \right. \kern-\nulldelimiterspace} T2} \right)}}}} \right. \kern-\nulldelimiterspace} {{\left( {{\Delta {\text{AUC}}_{{{\text{INS2}}}} } \mathord{\left/ {\vphantom {{\Delta {\text{AUC}}_{{{\text{INS2}}}} } T}} \right. \kern-\nulldelimiterspace} T2} \right)}}$$, with T1 = 15 and T2 = 25 min, and also the second expression alone (i.e. only post-injection information). However, the best results in diabetic and non-diabetic participants were obtained by combining pre- and post-injection information as in Equation (2). CSI was able to reproduce known findings related to insulin sensitivity. The existence of nonlinear inverse (hyperbolic) relationship between insulin sensitivity and insulin release was postulated some years ago [28] and several subsequent studies [29] have confirmed this finding, although it has recently been suggested that the hyperbola may not be evident in some groups of participants [30, 31, 32]. Our control group exhibited a weak, but still significant inverse relationship between insulin sensitivity and AIRG. According to traditional regression analysis, the relationship was not strictly hyperbolic, but when a more refined regression model was used the hyperbolic relationship emerged. It is worth noting that SI and CSI provided similar results in both cases. Insulin sensitivity was higher in lean than in overweight or obese participants with both indices, which also showed a nonlinear inverse relationship (though weak) with BMI, in agreement with previous studies [33]. As regards the effect of sex on insulin sensitivity, results from SI and CSI were again similar and in agreement with previous studies [34]. Even though a good agreement was found between SI and CSI, we aimed to validate CSI against the measurement obtained from the glucose clamp. CSI exhibited a good degree of correlation with M and a similar ability to discriminate between participants with different glucose tolerance, as well as between lean and overweight participants. This agreement with the clamp further strengthened the ability of CSI to describe insulin sensitivity in different metabolic conditions. In this study we included three groups of type 2 diabetic patients. As regards the type 2 diabetes and type 2 diabetes INSMOD groups, it must be noted (Table 1) that both SI and CSI were higher in the former than the latter (p < 0.0001 by ANOVA). This possible inconsistency warrants further comment. First, it cannot be excluded that this difference in insulin sensitivity was real, since type 2 diabetic populations may be significantly heterogeneous [35]. On the other hand, as already pointed out, SI may be inaccurate in participants with low insulin values, and CSI exhibits similar limitations in those conditions. Another confounding factor may be the fact that the type 2 diabetes and type 2 diabetes INSMOD groups were studied in different laboratories, probably using different insulin assays: this remains a problem known to be a possible source of error [36]. In any case, we believe that the lower insulin sensitivity in the type 2 diabetes INSMOD than in the type 2 diabetes group may not be an artefact: in fact, HOMA-IR was also clearly higher in the former (7.85 vs 3.47 [non-dimensional], p < 0.007), possibly also due the much higher BMI (Table 1). Similar comments hold for the significant difference in CSI values (p < 0.0001) between IGT and IGMCL. In conclusion, although the minimal model analysis remains the reference method to assess insulin sensitivity from the 3 h IVGTT, the proposed simple, empirical index CSI generally proved to be a reliable index. In the condition of low insulin sensitivity, quite common in type 2 diabetes, analysis of insulin-modified rather than regular IVGTT data should be performed to obtain more reliable estimations, although it is known that in such conditions the assessment of insulin sensitivity becomes intrinsically more uncertain and possibly inaccurate. The great advantage of CSI is that it allows assessment of insulin sensitivity from IVGTT data limited to 1 h, which cannot be analysed with the minimal model. The possibility of analysing less expensive short IVGTTs makes performance of the test easier and less of a burden for participants and investigators, allowing in larger populations the simultaneous assessment of insulin sensitivity and beta cell function (e.g. AIRG variable) with a simple approach. CSI also allows retrospective studies on all the short IVGTTs commonly performed before the introduction of the minimal model. ## Acknowledgements The Botnia study was supported by a grant from the Sigrid Juselius Foundation. The EUGENE2 study was supported by the European Community (EUGENE2, n. LSHM-CT-2004-512013). We would like to thank A. Mari (ISIB-CNR, Padova, Italy), G. Mingrone (Università Cattolica Sacro Cuore, Rome, Italy), S. Salinari (University of Rome La Sapienza, Rome, Italy) and A. Kautzky-Willer (Medical University of Vienna, Vienna, Austria) for their help and suggestions. Preliminary results were presented at the EASD 2008 Annual Meeting in Rome, Italy. ### Duality of interest L. Groop has been a consultant for and served on advisory boards for sanofi-aventis, GSK, Novartis, Merck, Tethys Bioscience and Xoma, and received lecture fees from Lilly and Novartis. G. Pacini is currently consultant for Novo-Nordisk. All other authors declare that there is no duality of interest associated with this manuscript. • A. Tura • 1 • 1 • E. Succurro • 2 • L. Groop • 3 • 4 • G. Sesti • 2 • G. Pacini • 1
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 2, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.7915416359901428, "perplexity": 1968.2417978356111}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-30/segments/1500549425766.58/warc/CC-MAIN-20170726042247-20170726062247-00184.warc.gz"}
http://mathhelpforum.com/discrete-math/182720-number-possibilities.html
# Math Help - Number of possibilities? 1. ## Number of possibilities? A class of 20 is selecting a new president, with 5 candidates. How many different ways can the tally come out? 2. Originally Posted by TeaWithoutMussolini A class of 20 is selecting a new president, with 5 candidates. How many different ways can the tally come out? Is that $\binom{20+5-1}{20}~?$
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 1, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.6463851928710938, "perplexity": 2122.932339809594}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-15/segments/1398223211700.16/warc/CC-MAIN-20140423032011-00593-ip-10-147-4-33.ec2.internal.warc.gz"}
http://math.stackexchange.com/questions/491372/fourier-coefficients-frequency-shifting
# Fourier Coefficients : Frequency Shifting The FS coefficients of a signal $x(t)$ is given by $$x(t) \longrightarrow C_k$$ The frequency shift property says that: if we multiply a signal $x(t)$ by $e^{j m\omega_0 t}$ the fourier series coefficients are now defined by $C_{k-m}$. $$e^{j m \omega_0 t}x(t) \longrightarrow C_{k-m}$$ Now consider a signal $x(t) = \sin \dfrac{\pi}{4}t$ The fourier series coefficients are $C_1=\dfrac{1}{2j},C_{-1}=\dfrac{-1}{2j}$. Now consider a signal $y(t) = e^{j\frac{\pi}{4} t}x(t)=e^{j\frac{\pi}{4} t}\sin \dfrac{\pi}{4} t$ The fourier series coefficients according to the property should be $(m=1)$: $$C_{1-1}=C_0=\dfrac{1}{2j}$$ $$C_{-1-1}=C_{-2}=-\dfrac{1}{2j}$$ But if we expand the $e^{j\frac{\pi}{4} t}\sin \dfrac{\pi}{4} t$ using the formula for $\sin t = \dfrac{e^{jt}-e^{-jt}}{2j}$ we get the FS Coefficients as $$C_0 = -\dfrac{1}{2j}$$ $$C_2 = \dfrac{1}{2j}$$ Can somebody explain this behavior? Am I using the property correctly or not ? Thanks -
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9980466961860657, "perplexity": 309.5667784006167}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2015-06/segments/1422121785385.35/warc/CC-MAIN-20150124174945-00198-ip-10-180-212-252.ec2.internal.warc.gz"}
http://eprint.iacr.org/2011/131/20110318:122933
## Cryptology ePrint Archive: Report 2011/131 Trapdoor oneway functions associated with exponentiation Virendra Sule Abstract: This paper shows that if exponentiation $b=X^{k}$ in groups of finite field units or $B=[k]X$ in elliptic curves is considered as encryption of $X$ with exponent $k$ treated as symmetric key, then the decryption or the computation of $X$ from $b$ (respectively $B$) can be achieved in polynomial time with a high probability under random choice of $k$. Since given $X$ and $b$ or $B$ the problem of computing the discrete log $k$ is not known to have a polynomial time solution, the exponentiation has a trapdoor property associated with it. This paper makes this property precise. Further the decryption problem is a special case of a general problem of solving equations in groups. Such equations lead to more such trapdoor one way functions when solvable in polynomial time. The paper considers single and two variable equations on above groups and determines their solvability. Category / Keywords: secret-key cryptography / Exponential function, elliptic curves, division polynomials Date: received 15 Mar 2011 Contact author: vrs at ee iitb ac in Available format(s): PDF | BibTeX Citation Short URL: ia.cr/2011/131 [ Cryptology ePrint archive ]
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8000411987304688, "perplexity": 1243.7969601249254}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 20, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2016-07/segments/1454701154221.36/warc/CC-MAIN-20160205193914-00107-ip-10-236-182-209.ec2.internal.warc.gz"}
https://gmatclub.com/forum/if-a-and-b-are-positive-integers-is-3a-2-b-divisible-by-210448.html
GMAT Question of the Day - Daily to your Mailbox; hard ones only It is currently 24 Apr 2019, 01:43 ### GMAT Club Daily Prep #### Thank you for using the timer - this advanced tool can estimate your performance and suggest more practice questions. We have subscribed you to Daily Prep Questions via email. Customized for You we will pick new questions that match your level based on your Timer History Track every week, we’ll send you an estimated GMAT score based on your performance Practice Pays we will pick new questions that match your level based on your Timer History # If a and b are positive integers, is 3a^2*b divisible by 60? Author Message TAGS: ### Hide Tags Math Expert Joined: 02 Sep 2009 Posts: 54493 If a and b are positive integers, is 3a^2*b divisible by 60?  [#permalink] ### Show Tags 20 Dec 2015, 06:00 00:00 Difficulty: 35% (medium) Question Stats: 70% (01:30) correct 30% (01:35) wrong based on 225 sessions ### HideShow timer Statistics If a and b are positive integers, is 3a^2*b divisible by 60? (1) a is divisible by 10. (2) b is divisible by 18. _________________ Senior Manager Joined: 10 Mar 2013 Posts: 495 Location: Germany Concentration: Finance, Entrepreneurship GMAT 1: 580 Q46 V24 GPA: 3.88 WE: Information Technology (Consulting) Re: If a and b are positive integers, is 3a^2*b divisible by 60?  [#permalink] ### Show Tags 20 Dec 2015, 06:25 1 Bunuel wrote: If a and b are positive integers, is 3a^2*b divisible by 60? (1) a is divisible by 10. (2) b is divisible by 18. Question: $$\frac{3b*a^2}{60}$$ (1) $$\frac{3b*(10k)^2}{60}$$ = $$\frac{b*(10k)^2}{20}$$ a is a multiple of 10 and >0 means even if it's only equal to 10 it's divisible by 20 in the last expression. Sufficient (2) Clearly not sufficient _________________ When you’re up, your friends know who you are. When you’re down, you know who your friends are. 800Score ONLY QUANT CAT1 51, CAT2 50, CAT3 50 GMAT PREP 670 MGMAT CAT 630 KAPLAN CAT 660 Intern Joined: 08 Nov 2015 Posts: 31 Schools: Pepperdine '19 Re: If a and b are positive integers, is 3a^2*b divisible by 60?  [#permalink] ### Show Tags 30 Jan 2016, 14:53 1 Some people may choose c because they forget to take into account that the question stem has a^2 in it. So, if a is divisible by 10 = 5*2. a^2 will have at least two 5s and two 2s. Thats what the question stem is indirectly asking. Does a^2b have two 2s and one 5. Thus, 1 is sufficient. Current Student Joined: 12 Aug 2015 Posts: 2613 Schools: Boston U '20 (M) GRE 1: Q169 V154 Re: If a and b are positive integers, is 3a^2*b divisible by 60?  [#permalink] ### Show Tags 16 Mar 2016, 05:32 For a moment i was about to mark C then realised that 10 *10 which is the least value of A^2 will be divisible by 20 hence choose A nice question _________________ Manager Joined: 27 Dec 2016 Posts: 232 Concentration: Marketing, Social Entrepreneurship GPA: 3.65 WE: Marketing (Education) Re: If a and b are positive integers, is 3*a^2*b  [#permalink] ### Show Tags 05 Sep 2017, 07:59 If a and b are positive integers, is 3*a^2*b divisible by 60? 1) a is divisible by 10. 2) b is divisible by 18. Kudos if it helps. Is $$3a^2b$$ divisible by 60? - We can change 60 into $$2^2*3*5$$. - Because we already have 3 in the numerator, so our job is to make sure whether a or b has $$2^2$$ and 5 as its factor. #1 - a divisible by 10 or $$2^5$$. Since our numerator change a into $$a^2$$, so we MUST HAVE $$2^2*5^2$$ in our numerator. - Whatever value of b, $$3a^2b$$ divisible by 60. SUFFICIENT. #2 - b divisible by 18 or $$2*3^2$$, Since we still need to have 5 as factor, we do not know whether a have this factor. - Divisibility of $$3a^2b$$ by 60 depends solely on the a value - which we don't know here. INSUFFICIENT. A. _________________ There's an app for that - Steve Jobs. Re: If a and b are positive integers, is 3*a^2*b   [#permalink] 05 Sep 2017, 07:59 Display posts from previous: Sort by
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.5398282408714294, "perplexity": 3691.9624710458093}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-18/segments/1555578636101.74/warc/CC-MAIN-20190424074540-20190424100540-00413.warc.gz"}
https://spacedonkey.de/date/2015/07/?post_type=post&cat=43
## Test 1 ``````<h7>Test Header</h7> <strong>Strong Text</strong> This is a bit of text that goes on for awhile just to create the illusion that there is something important to say. After some time we might want to create a little teaser for [expand title="Read more" swaptitle="Read Less" trigclass="arrowright" targpos="inline" targtag="span" trigpos="below"]this text that was hidden but now is not. [/expand] `````` Strong Text This is a bit of text that goes on for awhile just to create the illusion that there is something important to say. After some time we might want to create a little teaser for this text that was hidden but now is not. What if we placed all of this inside a LI element: Strong Text This is a bit of text that goes on for awhile just to create the illusion that there is something important to say. After some time we might want to create a little teaser for this text that was hidden but now is not. ## Paragraphs If the shortcode needs to be placed inside a paragraph, it’s best to use the roll-your-own method like so: ``````This is a paragraph of text that we just are going to throw in here. You might not like the text, as it really just rambles on and on, but it does fill the purpose of filler text. this is the <div id="target-paratest" class="collapseomatic_content_inline colomat-inline span_fix" style="display: inline;">last text in the first paragraph. <p>This is the next paragraph of text</p> <p>This is the final paragraph of text</p></div> <span class="collapseomatic span_fix noarrow" id="paratest" title="Read more..." tabindex="0">Read more...</span> <span id="swap-paratest" class="colomat-swap" style="display:none;">Read less...</span> <p>Now let's see what happens.</p> `````` This is a paragraph of text that we just are going to throw in here. You might not like the text, as it really just rambles on and on, but it does fill the purpose of filler text. this is the last text in the first paragraph. This is the next paragraph of text This is the final paragraph of text
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8376098275184631, "perplexity": 1509.5030081131727}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-43/segments/1570987781397.63/warc/CC-MAIN-20191021171509-20191021195009-00515.warc.gz"}
https://mail.scipy.org/pipermail/numpy-tickets/2008-January/001604.html
# [Numpy-tickets] [NumPy] #638: var should take absolute value for complex numbers. NumPy numpy-tickets@scipy.... Sat Jan 12 02:45:05 CST 2008 #638: var should take absolute value for complex numbers. -------------------------+-------------------------------------------------- Reporter: akumar | Owner: somebody Type: defect | Status: new Priority: normal | Milestone: 1.0.5 Component: numpy.core | Version: none Severity: normal | Resolution: Keywords: var complex | -------------------------+-------------------------------------------------- Comment (by cdavid): I though a bit about it since the discussion on the numpy ML, and I have to say I disagree with Robert on this one. I don't think the only meaningful definition is to treat C as R^2. Variance is a special case of covariance, and for complex random variables, covariance of X and Y, assuming they are centered, is E[X conj(Y)] with conj(Y) the conjugate of Y. This is the definition used in statistical signal processing (at least the one I have always seen) When considering complex random variables, it is often assumed some kind of properties of the real part and the complex part (such as they have the same variance, for example). For example, if you use complex Gaussian random variables, by definition, Z = X + jY, with X and Y independent Gaussian and same variance \sigma, Z have a variance equal to 2 * \sigma variance, that is the trace of the covariance matrix of the real random vector (X, Y), also obtained using the definition \sigma_Z \triangleq \mathbb{E}[Z \bar{Z}]. With Robert's definition, even for scalar complex random variables, the density of a complex normal would involve matrices: having a definition using only scalar is more appealing IMHO. Those 2 arguments, variance as a special case of the covariance of two variable, and staying scalar for complex random variables seem pretty strong to me. -- Ticket URL: <http://scipy.org/scipy/numpy/ticket/638#comment:11> NumPy <http://projects.scipy.org/scipy/numpy> The fundamental package needed for scientific computing with Python.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.93289715051651, "perplexity": 2888.690713608488}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2015-48/segments/1448398474527.16/warc/CC-MAIN-20151124205434-00288-ip-10-71-132-137.ec2.internal.warc.gz"}
http://astrokode.wordpress.com/2012/11/18/how-to-make-random-point-or-direction-at-spherical-coordinate/
# How to make random point/direction in spherical coordinate If we want to pick a random point on the surface of a sphere, it is incorrect to select spherical coordinates $\theta$ and $\phi$ from uniform distributions $0 \leq \phi \leq 2\pi$ and $0 \leq \theta \leq \pi$, since the area element $\sin(\theta) d\theta d\phi$ is a function of $\theta$, and hence points picked in this way will be “bunched” near the poles (top). To obtain points such that any small area on the sphere is expected to contain the same number of points, choose $r_{1}$ and $r_{2}$ to be random variates on (0,1). Then $\phi = 2 \pi r_{1}$ $\theta = arccos(1 - 2 r_{2} )$ gives the spherical coordinates for a set of points which are uniformly distributed over $S^{2}$ (bottom). not uniform distribution uniform distribution // Copyleft (c) 2012. Ridlo W. Wibowo #include<iostream> #include<stdlib.h> #include<math.h> #include<stdio.h> #include<time.h> #include<fstream> #define _USE_MATH_DEFINES using namespace std; double unirand(){return (double) rand()/(double) RAND_MAX;} int main(){ double l = 0.1, phi, theta; double x, y, z; int n=10000; srand(time(NULL)); ofstream out("bola.txt"); for (int i=0;i<n;i++){ phi = 2.*M_PI*unirand(); theta = acos(1. - 2.*unirand()); //theta = M_PI*unirand(); x = l*sin(theta)*cos(phi); y = l*sin(theta)*sin(phi); z = l*cos(theta); out << x << " " << y << " " << z << " " << endl; } out.close(); return 0; }
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 11, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8401361107826233, "perplexity": 1074.391652129621}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-35/segments/1409535921869.7/warc/CC-MAIN-20140901014521-00381-ip-10-180-136-8.ec2.internal.warc.gz"}
http://tex.stackexchange.com/questions/3983/can-i-use-pgfplots-to-make-a-boxplot?answertab=oldest
# Can I use pgfplots to make a boxplot My current plotting tool for my papers is pgfplots for nice consistent plots. Now I would like to add a boxplot. Is this possible with help of pgfplots, or any helper package? - I am not aware of any easy-to-use packages for producing box plots in pgfplots but this TeXample example and this topic at LaTeX Community show that it's not too hard to produce one. - PGFPlots supports boxplots natively as of version 1.8 See Boxplot in latex for an example. The remainder of this answer should be considered obsolete. There is a much improved version of this code at Simpler boxplots in pgfplots - is this possible?. It allows creating box plots with a single command, and adds much more flexibility to the data format and the plot styles: Not out of the box, and you'd have to do the quantile calculations outside of PGFplots, but then you can draw box plots with a bit of style trickery. This code \begin{axis} [enlarge x limits=0.5,xtick=data] \addplot [box plot median] table {testdata.dat}; \addplot [box plot box] table {testdata.dat}; \addplot [box plot top whisker] table {testdata.dat}; \addplot [box plot bottom whisker] table {testdata.dat}; \end{axis} can generate this plot if testdata.dat is of the form index median box_top box_bottom whisker_top whisker_bottom Here's a full compilable example: \documentclass{article} \usepackage{pgfplots} \usepackage{filecontents} \begin{filecontents}{testdata.dat} 0 1 1.2 0.4 1.5 0.2 1 2 2.3 1.5 2.7 1 2 0.7 1.4 0.5 1.9 0.1 \end{filecontents} \pgfplotsset{ box plot/.style={ /pgfplots/.cd, black, only marks, mark=-, mark size=1em, /pgfplots/error bars/.cd, y dir=plus, y explicit, }, box plot box/.style={ /pgfplots/error bars/draw error bar/.code 2 args={% \draw ##1 -- ++(1em,0pt) |- ##2 -- ++(-1em,0pt) |- ##1 -- cycle; }, /pgfplots/table/.cd, y index=2, y error expr={\thisrowno{3}-\thisrowno{2}}, /pgfplots/box plot }, box plot top whisker/.style={ /pgfplots/error bars/draw error bar/.code 2 args={% \pgfkeysgetvalue{/pgfplots/error bars/error mark}% {\pgfplotserrorbarsmark}% \pgfkeysgetvalue{/pgfplots/error bars/error mark options}% {\pgfplotserrorbarsmarkopts}% \path ##1 -- ##2; }, /pgfplots/table/.cd, y index=4, y error expr={\thisrowno{2}-\thisrowno{4}}, /pgfplots/box plot }, box plot bottom whisker/.style={ /pgfplots/error bars/draw error bar/.code 2 args={% \pgfkeysgetvalue{/pgfplots/error bars/error mark}% {\pgfplotserrorbarsmark}% \pgfkeysgetvalue{/pgfplots/error bars/error mark options}% {\pgfplotserrorbarsmarkopts}% \path ##1 -- ##2; }, /pgfplots/table/.cd, y index=5, y error expr={\thisrowno{3}-\thisrowno{5}}, /pgfplots/box plot }, box plot median/.style={ /pgfplots/box plot } } \begin{document} \begin{tikzpicture} \begin{axis} [enlarge x limits=0.5,xtick=data] \addplot [box plot median] table {testdata.dat}; \addplot [box plot box] table {testdata.dat}; \addplot [box plot top whisker] table {testdata.dat}; \addplot [box plot bottom whisker] table {testdata.dat}; \end{axis} \end{tikzpicture} \end{document} - I'm having problems getting this to work with symbolic x coords. Is there something special I have to set? –  knittl Jun 27 '12 at 16:41 @knittl: It works fine for me. Could you open a new question with an example where it doesn't work? –  Jake Jun 27 '12 at 16:48 I've created a new question: tex.stackexchange.com/q/61446/15110 –  knittl Jun 27 '12 at 16:57 I'm trying to plot the real data values behind the boxes. I already tried on background layer (from backgrounds library) for the data points and fill=white for the boxes to no avail. Is there something about error bars, I'm missing (always drawn "in the back")? –  knittl Jul 25 '12 at 17:44 @knittl: If you put the \addplot command for your data before the box plot commands, the lines should be plotted behind the boxplots. Markers will always be drawn on top, if you want to avoid that, you'll have to use a second axis environment that coincides with the first one. You might want to open a new question if this doesn't work. –  Jake Jul 25 '12 at 18:28 For every point more than 3/2 times the interquartile range from the end of a box, is a dot. The only special optional arguments, beside all other which are valid for drawing lines and filling areas, are IQLfactor, barwidth, and arrowlength, where the latter is a factor which is multiplied with the barwidth for the line ends. The IQLfactor, preset to 1.5, defines the area for the outliers. Run it with xelatex \documentclass{article} \usepackage{pst-plot} \begin{document} \begin{pspicture}(-1,-1)(12,14) \psset{yunit=0.1,fillstyle=solid} \psaxes[dy=1cm,Dy=10,ticksize=4pt 0,axesstyle=frame](0,0)(12,130) \rput(1,0){\psBoxplot[fillcolor=red!30]{ 100 90 120 115 120 110 100 110 100 90 100 100 120 120 120}}\rput(1,105){2001} \rput(3,0){\psBoxplot[arrowlength=0.5,fillcolor=blue!30]{ 90 120 115 116 115 110 90 130 120 120 120 85 100 130 130}}\rput(3,107){2008} \rput(5,0){\psBoxplot[barwidth=40pt,arrowlength=1.2,fillcolor=red!30]{ 35 70 90 60 100 60 60 80 80 60 50 55 90 70 70}}\rput(5,65){2001} \rput(7,0){\psBoxplot[barwidth=40pt,fillcolor=blue!30]{ 60 65 60 75 75 60 50 90 95 60 65 45 45 60 90}}\rput(7,65){2008} \rput(9,0){\psBoxplot[fillcolor=red!30]{ 20 20 25 20 15 20 20 25 30 20 20 20 30 30 30}}\rput(9,22){2001} \rput(11,0){\psBoxplot[fillcolor=blue!30,linestyle=dashed]{ 20 30 20 35 35 20 20 60 50 20 35 15 30 20 40}}\rput(11,25){2008} \end{pspicture} \end{document} - This is so cool! I want to re-purpose this for generating a performance plot. How would I display dates instead of numbers along the x axis as well as add a description of what the x and y axis are? Sorry, I am rather new to LaTeX. –  The Dude May 8 '12 at 22:55 @TheDude: can you give an example (maybe a graphic) of what you really whant to show –  Herbert May 23 '12 at 12:19 If you use R, you can output the boxplot using tikzDevice. Here's an example: library(tikzDevice) tikz('normal.tex', standAlone = TRUE, width=5, height=5) boxplot(mpg~cyl,data=mtcars, main="Car Milage Data", + xlab="Number of Cylinders", ylab="Miles Per Gallon") dev.off() - QtiPlot is a free program that can take your spreadsheet data and create a box plot with a consistent look. You can change the title, x-axis, etc., and then export the image into a .tex file thereby generating all the code from \begin{tikzpicture} to \end{tikzpicture}. Copy that code into your LaTeX document. Here is what I generated from some sample data. -
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.5934740900993347, "perplexity": 2900.5684807455527}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 20, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2015-35/segments/1440644064160.12/warc/CC-MAIN-20150827025424-00318-ip-10-171-96-226.ec2.internal.warc.gz"}
https://www.physicsforums.com/threads/calculues-questions.377738/
# Calculues Questions 1. Feb 12, 2010 ### TheForumLord 1. The problem statement, all variables and given/known data A. Let $$g(x)= \sigma\frac{1}{sqrt(n)}(x^{2n}-x^{2n+1})$$ Prove g(x) is continous in [0,1]. B. Let f be a function such as f(0)=1 and there's a neighouhood of x=0 in which : $$f ' (x)= 1+(f(x))^{10}$$ . Find the MacLorin Polynom of degree 3 of f(x). 2. Relevant equations 3. The attempt at a solution 2. Feb 12, 2010 ### Staff: Mentor For A you need to show that g is continuous at each point in [0, 1]. What's the definition of continuity of a function at a point? Do you have to do this by using the definition of continuity or can you use the fact that this is a polynomial and all polynomials are everywhere continuous? For B you need f(0), f'(0), f''(0), and f'''(0). You already are given that f(0) = 1, and you have f'(x), which you can evaluate at x = 0. To find f''(0), you need to find f''(x), which you can do by differentiating f'(x), and then evaluate f''(x) at x = 0. To find f'''(0), differentiate f''(x), and then evaluate at x = 0. 3. Feb 12, 2010 ### LCKurtz That's "Maclaurin Polynomial". Well, don't you just need f(0), f'(0), f''(0) and f''(0) to calculate that series? You are given formulas for f(0) and f'(x). You must need a couple more derivatives. You might need the chain rule. Show us your derivatives. 4. Feb 13, 2010 ### TheForumLord Well, B is completely understandable... About A->I need to show it's continous using power-series theorems...If I'll prove that the given power-series is convergeing uniformly to g - I'll be done...I've no idea about it... I'll be delighted to get some help Thanks! 5. Feb 13, 2010 ### HallsofIvy Power series? g(x) is a polynomial! You don't need to worry about any power series or convergence! A is not asking about a limit as n goes to infinity is it? It is just about a single polynomial for a fixed value of n. Or was that $\sigma$ supposed to be $\Sigma$? That is, is this a sum over all n? In that case, because it is a power series, it converges uniformly inside its radius of convergence. You need only show that its radius of convergence includes [0, 1]. Last edited by a moderator: Feb 13, 2010 6. Feb 13, 2010 ### TheForumLord That's excatly what I can't understand....how can I find the eadius of con. in this specific case? tnx 7. Feb 13, 2010 ### Staff: Mentor TheForumLord, A fair amount of time has been wasted because we didn't understand what you were trying to communicate. It has now come to light that your first problem problem is a summation. The Greek alphabet has upper case letters and lower case letters. In particular, upper case sigma, $\Sigma$, is used to represent summations. Lower case sigma, $\sigma$, is used in statistics to represent the population standard deviation. I interpreted $\sigma$ in this problem as a constant. It didn't occur to me that you really meant a summation. Also, at this stage of your mathematical education, you really ought to learn how to spell "calculus." It's clear to me that you're not likely to be in the finals of a math spelling bee, but at least get calculus right. 8. Feb 13, 2010 ### TheForumLord Dear Mark44... My english is pretty lame indeed but in this particular case, writing calculus in a wrong way was just a typing mistake - which can occure to anyone.... I didn't know how to write Upper case sigma in Latex so plz don't judge me... 9. Feb 13, 2010 ### Staff: Mentor Sure, anyone can make a typo, but you can eliminate at least some of them by looking over what you've written before you hit the submit button. [ tex] \sigma[/tex] or [ itex] \sigma[/itex] (without the leading space) produces $\sigma$. [ tex] \Sigma[/tex] or [ itex] \Sigma[/itex] (without the leading space) produces $\Sigma$. Same for all the rest of the Greek letters. 10. Feb 13, 2010 ### TheForumLord Thanks 11. Feb 14, 2010 ### HallsofIvy Better is \sum : $$\sum$$. By the way, just clicking on a formula in any post will show you the LaTex code used for it. Your series can be written by separating the even and odd powers- it is $\sum a_mx^m$ with $$a_m= \sqrt{\frac{2}{m}}$$ if m is even and $$a_m= \sqrt{\frac{2}{m-1}}$$ if m is odd. As for finding the radius of convergence, using the ratio test gives $$|x|< \sqrt{\frac{m+1}{m}}$$ if n= 2m and $$|x|< \sqrt{\frac{m}{m-1}}$$ if n= 2m+1 What is the limit of those as n goes to infinity? Of course, you will need to check if the sum converges at x= 1 but that is easy.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9479246139526367, "perplexity": 1089.348299833782}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-13/segments/1521257647406.46/warc/CC-MAIN-20180320111412-20180320131412-00639.warc.gz"}
https://gateoverflow.in/661/gate2000-2-14
2.2k views Consider the values of $A$ = 2.0 x 10$^{30}$, $B$ = -2.0 x 10$^{30}$, $C$ = 1.0, and the sequence X:= A + B Y:= A + C X:= X + C Y:= Y + B executed on a computer where floating point numbers are represented with $32$ bits. The values for $X$ and $Y$ will be 1. $X = 1.0, Y = 1.0$ 2. $X = 1.0, Y = 0.0$ 3. $X = 0.0, Y = 1.0$ 4. $X = 0.0, Y = 0.0$ edited | 2.2k views Given 32 bits representation. So, the maximum precision can be 32 bits (In 32-bit IEEE representation, maximum precision is 24 bits but we take best case here). This means approximately 10 digits. A = 2.0 * 1030, C = 1.0 So, A + C should make the 31st digit to 1, which is surely outside the precision level of A (it is 31st digit and not 31st bit). So, this addition will just return the value of A which will be assigned to Y. So, Y + B will return 0.0 while X + C will return 1.0. B choice. Sample program if any one wants to try: #include<stdio.h> int main() { float a = 2.0e30; float b = -2.0e30; float c = 1.0; float y = a+c; printf("a = %0.25f y = %0.25f\n",a, y); y = y + b; float x = a + b; printf("x = %0.25f\n",x); x = x + c; printf("x = %0.25f\n",x); } selected by +1 @Arjun sir Thanks a lot for clarifying But, one small doubt : precision decreases from 32 bits possible to 24 bits. Shouldn't it be 23 bits Out of 32 : 1 bit for sign, 8 for Biased exponent and remaining 23 for Mantissa 0 Why precision is 10 digits?Shoudn't it be 8 digits? Maximum number in 24 bits is $2^{24-1}$ Now $2^{24}$=$10^x$ => x = log($2^{24}$)base 10 =>log($10^{8}$)base 10 ( $2^{10}$=$10^{3}$) x=8,so maximum precision should be approx 8.Please verify once +1 Given 32 bits representation. So, the maximum precision can be 32 bits (In 32-bit IEEE representation, maximum precision is 24 bits but we take best case here). This means approximately 10 digits. 0 @RamSharma1 ,is is answer to ,my above comment? I didnt get you. @Bikaram Sir,Arjun Sir:- A = 2.0 * 1030, C = 1.0 When we add 1 ,then it will set Least significant digit as 1 so we will need only 30 digits only.Why adding one number will increase digits by 1,why will number of digits increases from 30 to 31? If i say 2*10^3 + 1 = 2001. Number of digits are same as 2*10^3. Can you clarify? +1 @rahul first of all we are not considering here IEEE representation so we are using all 32 bits instead of 24 bits to represent the mantissa. and 2*1030 is 31 bits ... 2 followed by 30 zeros.... 0 ok .adding one i will set least significant digit to 1.I am not adding any extra digit. So it means that the given A itself will not be represent with precision of 31 digits,because if A can be represented then surely A+C can also be represented? 0 @rahul read above comments by me and by arjun sir ..ur doubt will be cleared 0 thanx a lot Bikram sir. Very cogent solution. 0 @vs can you give a small versionn of example it wil clarify more. 0 that means 32 bits are equal to 10 decimal digits . So, this has nothing to do with the context of the question, since it s not mentioned that it is IEEE 754 representation. Am I right ? It is given in the question that " floating point numbers are represented with 32 bits " so from 32 bits we can get  232 = 4, 294 , 967 , 296 = total 10 digits in decimal . that means 32 bits are equal to 10 decimal digits . A = 2.0 * 1030    this represents 31 digits and C = 1.0 this is 1 digit , so A+C =  total (31+1) = 31 digits.( addition in decimal ) A is one 2 followed by thirty 0's = 31 digits and C is 1 digit . this 31st digit is outside the precision level of A . as we need to do Y = A + C , so it does not take the value of C .( Y = A is assigned and at max it takes 10 digits and rest are overflow that's why this addition only return value of A, one extra digit it can not take ) this addition will  return the value of A which will be assigned to Y. so Y = A+C = A and Y = Y + B = ( 2.0 * 1030 ) + ( - 2.0 * 1030  ) = 0 .0 X = A+B = ( 2.0 * 1030 ) + ( - 2.0 * 1030  ) = 0 .0 and X = X+C = 0.0 + 1.0 = 1.0 1 2
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.45955121517181396, "perplexity": 1718.972685698054}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-47/segments/1542039742685.33/warc/CC-MAIN-20181115120507-20181115142507-00139.warc.gz"}
https://bedops.readthedocs.io/en/latest/content/reference/file-management/compression/starchcat.html
# 6.3.2.3. starchcat¶ The starchcat utility efficiently merges per-chromosome records contained within one or more BEDOPS Starch-formatted archives. This is an equivalent operation to bedops --everything or bedops -u (a multiset union), but inputs are starch archives rather than uncompressed BED files. As a further advantage to using this over bedops, in the case where a starch input contains BED elements exclusive to one chromosome, this utility will directly and quickly copy over compressed elements to a new archive, avoiding the need for costly and wasteful extraction and re-compression. In the general case, where two or more starch inputs contain BED elements from the same chromosome, a sorted merge is performed and the stream reprocessed into a Starch-formatted archive. ## 6.3.2.3.1. Parallelization¶ Those with access to a computational cluster such as an Oracle/Sun Grid Engine or a group of hosts running SSH services should find starchcat highly useful, as this facilitates: To demonstrate the first application of this utility, we have packaged a helper script with the BEDOPS suite called starchcluster, which archives data much faster than starch alone. By distributing work across the nodes of a computational cluster, the upper bound on compression time is almost entirely determined by the largest chromosome, reducing compression time by an order of magnitude. ## 6.3.2.3.2. Inputs and outputs¶ ### 6.3.2.3.2.1. Input¶ The input to starchcat consists of one or more BEDOPS Starch-formatted archive files. Note If a single archive is provided as input, it may be reprocessed with specified options. When two or more archives are specified, the output will be the equivalent of a multiset union of the inputs. Note This utility does not accept standard input. ### 6.3.2.3.2.2. Output¶ The starchcat tool outputs a starch -formatted archive to standard output, which is usually redirected to a file. Additionally, an optional compression flag specifies if the final starch output should be compressed with either the bzip2 or gzip method (the default being bzip2). Note If starch inputs use a different backend compression method, the input stream is re-compressed before integrated into the larger archive. This will incur extra processing overhead. ## 6.3.2.3.3. Usage¶ Use the --help option to list all options: starchcat citation: http://bioinformatics.oxfordjournals.org/content/28/14/1919.abstract version: 2.4.36 (typical) authors: Alex Reynolds and Shane Neph USAGE: starchcat [ --note="..." ] [ --bzip2 | --gzip ] [ --omit-signature ] [ --report-progress=N ] <starch-file-1> [<starch-file-2> ...] * At least one lexicographically-sorted, headerless starch archive is required. * While two or more inputs make sense for a multiset union operation, you can starchcat one file in order to update its metadata, recompress it with a different backend method, or add a note annotation. * Compressed data are sent to standard output. Use the '>' operator to redirect to a file. Process Flags -------------------------------------------------------------------------- --note="foo bar..." Append note to output archive metadata (optional). --bzip2 | --gzip Specify backend compression type (optional, default is bzip2). --omit-signature Skip generating per-chromosome data integrity signature (optional, default is to generate signature). --report-progress=N Report compression progress every N elements per chromosome to standard error stream (optional) --version Show binary version. --help Show this usage message. ### 6.3.2.3.3.1. Per-chromosome data integrity signature¶ By default, a data integrity signature is generated for each chromosome. This can be used to verify if chromosome streams from two or more Starch archives are identical, or used to test the integrity of a chromosome, to identify potential data corruption. Generating this signature adds to the computational cost of compression, or an integrity signature may not be useful for all archives. Add the --omit-signature option, if the compression time is too high or the data integrity signature is not needed. ### 6.3.2.3.3.2. Example¶ Let’s say we have a set of 23 starch archives, one for each chromosome of the human genome: chr1.starch, chr2.starch, and so on, to chrY.starch. (To simplify this example, we leave out mitochondrial, random, pseudo- and other chromosomes.) We would like to build a new starch archive from these 23 separate files: \$ starchcat chr1.starch chr2.starch ... chrY.starch > humanGenome.starch The starchcat utility parses the metadata from each of the 23 inputs, determines what data to either simple copy or reprocess, and then it performs the merge. Cleanup is performed afterwards, as necessary, and the output is a brand new starch file, written to humanGenome.starch. Note No filtering or processing is performed on extracted BED elements, before they are written to the final output. Thus, it is possible for duplicate BED elements to occur. It would be easy to use the --signature option to validate the expected content of a new Starch archive. However, the final archive is sorted per sort-bed ordering, so that data extracted from this archive will be ready for use with BEDOPS utilities. Note When input archives contain data on disjoint chromosomes, use of starchcat is very efficient as data are simply copied, instead of extracted and re-compressed.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.2220519632101059, "perplexity": 7581.107806712047}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-22/segments/1558232256163.40/warc/CC-MAIN-20190520222102-20190521004102-00174.warc.gz"}
https://www.nag.com/numeric/cl/nagdoc_cl26.2/html/f01/f01vbc.html
# NAG C Library Function Document ## 1Purpose nag_ztrttp (f01vbc) copies a complex triangular matrix, stored in a full format array, to a packed format array. ## 2Specification #include #include void nag_ztrttp (Nag_OrderType order, Nag_UploType uplo, Integer n, const Complex a[], Integer pda, Complex ap[], NagError *fail) ## 3Description nag_ztrttp (f01vbc) packs a complex $n$ by $n$ triangular matrix $A$, stored conventionally in a full format array, into an array of length $n\left(n+1\right)/2$. The matrix is packed by rows or columns depending on order. This function is intended for possible use in conjunction with functions from Chapters f06, f07, f08 and f16 where some functions use triangular matrices stored in the packed form. Packed storage format is described in Section 3.3.2 in the f07 Chapter Introduction. None. ## 5Arguments 1:    $\mathbf{order}$Nag_OrderTypeInput On entry: the order argument specifies the two-dimensional storage scheme being used, i.e., row-major ordering or column-major ordering. C language defined storage is specified by ${\mathbf{order}}=\mathrm{Nag_RowMajor}$. See Section 3.3.1.3 in How to Use the NAG Library and its Documentation for a more detailed explanation of the use of this argument. Constraint: ${\mathbf{order}}=\mathrm{Nag_RowMajor}$ or $\mathrm{Nag_ColMajor}$. 2:    $\mathbf{uplo}$Nag_UploTypeInput On entry: specifies whether $A$ is upper or lower triangular. ${\mathbf{uplo}}=\mathrm{Nag_Upper}$ $A$ is upper triangular. ${\mathbf{uplo}}=\mathrm{Nag_Lower}$ $A$ is lower triangular. Constraint: ${\mathbf{uplo}}=\mathrm{Nag_Upper}$ or $\mathrm{Nag_Lower}$. 3:    $\mathbf{n}$IntegerInput On entry: $n$, the order of the matrix $A$. Constraint: ${\mathbf{n}}\ge 0$. 4:    $\mathbf{a}\left[\mathit{dim}\right]$const ComplexInput Note: the dimension, dim, of the array a must be at least ${\mathbf{pda}}×{\mathbf{n}}$. On entry: the triangular matrix $A$. If ${\mathbf{order}}=\mathrm{Nag_ColMajor}$, ${A}_{ij}$ is stored in ${\mathbf{a}}\left[\left(j-1\right)×{\mathbf{pda}}+i-1\right]$. If ${\mathbf{order}}=\mathrm{Nag_RowMajor}$, ${A}_{ij}$ is stored in ${\mathbf{a}}\left[\left(i-1\right)×{\mathbf{pda}}+j-1\right]$. If ${\mathbf{uplo}}=\mathrm{Nag_Upper}$, the upper triangular part of $A$ must be stored and the elements of the array below the diagonal are not referenced. If ${\mathbf{uplo}}=\mathrm{Nag_Lower}$, the lower triangular part of $A$ must be stored and the elements of the array above the diagonal are not referenced. 5:    $\mathbf{pda}$IntegerInput On entry: the stride separating row or column elements (depending on the value of order) of the matrix $A$ in the array a. Constraint: ${\mathbf{pda}}\ge \mathrm{max}\phantom{\rule{0.125em}{0ex}}\left(1,{\mathbf{n}}\right)$. 6:    $\mathbf{ap}\left[\mathit{dim}\right]$ComplexOutput Note: the dimension, dim, of the array ap must be at least ${\mathbf{n}}×\left({\mathbf{n}}+1\right)/2$. On exit: the $n$ by $n$ triangular matrix $A$, packed by rows or columns depending on order. The storage of elements ${A}_{ij}$ depends on the order and uplo arguments as follows: • if ${\mathbf{order}}=\mathrm{Nag_ColMajor}$ and ${\mathbf{uplo}}=\mathrm{Nag_Upper}$, ${A}_{ij}$ is stored in ${\mathbf{ap}}\left[\left(j-1\right)×j/2+i-1\right]$, for $i\le j$; • if ${\mathbf{order}}=\mathrm{Nag_ColMajor}$ and ${\mathbf{uplo}}=\mathrm{Nag_Lower}$, ${A}_{ij}$ is stored in ${\mathbf{ap}}\left[\left(2n-j\right)×\left(j-1\right)/2+i-1\right]$, for $i\ge j$; • if ${\mathbf{order}}=\mathrm{Nag_RowMajor}$ and ${\mathbf{uplo}}=\mathrm{Nag_Upper}$, ${A}_{ij}$ is stored in ${\mathbf{ap}}\left[\left(2n-i\right)×\left(i-1\right)/2+j-1\right]$, for $i\le j$; • if ${\mathbf{order}}=\mathrm{Nag_RowMajor}$ and ${\mathbf{uplo}}=\mathrm{Nag_Lower}$, ${A}_{ij}$ is stored in ${\mathbf{ap}}\left[\left(i-1\right)×i/2+j-1\right]$, for $i\ge j$. 7:    $\mathbf{fail}$NagError *Input/Output The NAG error argument (see Section 3.7 in How to Use the NAG Library and its Documentation). ## 6Error Indicators and Warnings NE_ALLOC_FAIL Dynamic memory allocation failed. See Section 2.3.1.2 in How to Use the NAG Library and its Documentation for further information. On entry, argument $〈\mathit{\text{value}}〉$ had an illegal value. NE_INT On entry, ${\mathbf{n}}=〈\mathit{\text{value}}〉$. Constraint: ${\mathbf{n}}\ge 0$. NE_INT_2 On entry, ${\mathbf{pda}}=〈\mathit{\text{value}}〉$ and ${\mathbf{n}}=〈\mathit{\text{value}}〉$. Constraint: ${\mathbf{pda}}\ge \mathrm{max}\phantom{\rule{0.125em}{0ex}}\left(1,{\mathbf{n}}\right)$. NE_INTERNAL_ERROR An internal error has occurred in this function. Check the function call and any array sizes. If the call is correct then please contact NAG for assistance. See Section 2.7.6 in How to Use the NAG Library and its Documentation for further information. NE_NO_LICENCE Your licence key may have expired or may not have been installed correctly. See Section 2.7.5 in How to Use the NAG Library and its Documentation for further information. Not applicable. ## 8Parallelism and Performance nag_ztrttp (f01vbc) is not threaded in any implementation. None. ## 10Example This example reads in a triangular matrix and copies it to packed format. ### 10.1Program Text Program Text (f01vbce.c) ### 10.2Program Data Program Data (f01vbce.d) ### 10.3Program Results Program Results (f01vbce.r)
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 69, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.931211531162262, "perplexity": 2562.6664276331276}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-25/segments/1623487611641.26/warc/CC-MAIN-20210614074543-20210614104543-00309.warc.gz"}
https://www.physicsoverflow.org/39937/physics-application-of-%24so-8-%24-and-spin-8-triality
# Physics application of $SO(8)$ and Spin(8) triality + 3 like - 0 dislike 464 views Triality is a relationship among three vector spaces. It describes those special features of the Dynkin diagram D4 and the associated Lie group Spin(8), the double cover of 8-dimensional rotation group SO(8). SO(8) is unique among the simple Lie groups in that its Dynkin diagram (below) (D4 under the Dynkin classification) possesses a three-fold symmetry. This gives rise to a surprising feature of Spin(8) known as triality. Related to this is the fact that the two spinor representations, as well as the fundamental vector representation, of Spin(8) are all eight-dimensional (for all other spin groups the spinor representation is either smaller or larger than the vector representation). The triality automorphism of Spin(8) lives in the outer automorphism group of Spin(8) which is isomorphic to the symmetric group $S_3$ that permutes these three representations. What are physics applications of $SO(8)$ and Spin(8) triality? For example, one of physics applications of $SO(8)$ and Spin(8) triality is that, in the classifications of interacting fermionic topological phases protected by global symmetries, the 1+1D BDI Time-Reversal invariant Topological Superconductor and 2+1D $Z_2$-Ising-symmetric Topological Superconductor have $\mathbb{Z}_8$ classifications (see a related post here), that can be deduced from adding non-trivial four-fermion interaction terms respect the $SO(8)$ and Spin(8) triality, see for example the Appendix A of this web version (free access). Are there other examples, other applications in physics? This post imported from StackExchange Physics at 2017-09-30 21:54 (UTC), posted by SE-user wonderich Please use answers only to (at least partly) answer questions. To comment, discuss, or ask for clarification, leave a comment instead. To mask links under text, please type your text, highlight it, and click the "link" button. You can then enter your link URL. Please consult the FAQ for as to how to format your post. This is the answer box; if you want to write a comment instead, please use the 'add comment' button. Live preview (may slow down editor)   Preview Your name to display (optional): Email me at this address if my answer is selected or commented on: Privacy: Your email address will only be used for sending these notifications. Anti-spam verification: If you are a human please identify the position of the character covered by the symbol $\varnothing$ in the following word:p$\hbar\varnothing$sicsOverflowThen drag the red bullet below over the corresponding character of our banner. When you drop it there, the bullet changes to green (on slow internet connections after a few seconds). To avoid this verification in future, please log in or register.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.5969114303588867, "perplexity": 1404.3905651043228}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-17/segments/1618038057142.4/warc/CC-MAIN-20210410134715-20210410164715-00345.warc.gz"}
https://awsh.org/nixie-tubes/
# nixie tubes I finally received a batch of 1n-14 nixie tubes that I purchased from ebay for a future clock project. The tubes need a power supply that can supply 170 volts DC, which I didn’t have, so I built one. I came across a few switching designs online and I settled on the one from here, mainly because I had most of the parts on hand. I made a few changes from the schematic which I’ll detail below. My changes (based mostly on parts that I had available): L1 = 150uH D1 = BAV21 T1 = IRF610 RSENSE = 1 ohm C4 = 10uF C6 = 470nF VR1 = 10k ohms I built the circuit on a piece of perf board with screw terminals for the inputs/outputs. It worked pretty well from the start. I was getting 181.5V DC during my first run. From 3.6 volts in, I was getting the full voltage. I will run it on 12 volts during actual use. (Ignore the messy desk.) I plugged one of the tubes into a breadboard and just hardwired the power supply to the anode through a resistor and one of the cathodes and tried it out. It works! Now, to give it some logic. I wired up an MPSA42 transistor for each cathode and connected them to a bcd to decimal converter. I then put together a quick arduino sketch to scroll through the numbers. It was a bit overkill, but its what I had laying on the desk at the time.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.2816479802131653, "perplexity": 1991.214188263685}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-21/segments/1652662517485.8/warc/CC-MAIN-20220517130706-20220517160706-00594.warc.gz"}
https://www.physicsforums.com/threads/is-double-parton-scattering-useful.515218/
# Is double parton scattering useful? 1. Jul 19, 2011 ### petergreat Is double parton scattering merely something of curiosity, or is it useful in the production of interesting particles (Higgs, top, B meson etc.)? 2. Jul 19, 2011
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.960793673992157, "perplexity": 13008.567498422972}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-30/segments/1531676589251.7/warc/CC-MAIN-20180716095945-20180716115945-00494.warc.gz"}
https://publications.iitm.ac.in/publication/experimental-investigation-on-preconditioned-rate-induced-tipping
X Experimental investigation on preconditioned rate induced tipping in a thermoacoustic system J. Tony, S. Subarna, K. S. Syamkumar, G. Sudha, S. Akshay, Published in Nature Publishing Group 2017 PMID: 28710448 Volume: 7 Issue: 1 Abstract Many systems found in nature are susceptible to tipping, where they can shift from one stable dynamical state to another. This shift in dynamics can be unfavorable in systems found in various fields ranging from ecology to finance. Hence, it is important to identify the factors that can lead to tipping in a physical system. Tipping can mainly be brought about by a change in parameter or due to the influence of external fluctuations. Further, the rate at which the parameter is varied also determines the final state that the system attains. Here, we show preconditioned rate induced tipping in experiments and in a theoretical model of a thermoacoustic system. We provide a specific initial condition (preconditioning) and vary the parameter at a rate higher than a critical rate to observe tipping. We find that the critical rate is a function of the initial condition. Our study is highly relevant because the parameters that dictate the asymptotic behavior of many physical systems are temporally dynamic. © 2017 The Author(s).
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8217854499816895, "perplexity": 1093.3311430090437}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2023-06/segments/1674764499899.9/warc/CC-MAIN-20230201013650-20230201043650-00600.warc.gz"}
https://brilliant.org/problems/a-simple-double-integral/
A simple double integral. Calculus Level 5 Let : $I= \int_0^1 \int_0^1 \left\{ \frac{x}{y} \right\} \ \mathrm{d}x\ \mathrm{d}y$ Find $$\lfloor1000I\rfloor$$. $$\{x\}$$ denotes to the fractional part of $$x$$, and $$\lfloor x\rfloor$$ denotes to the floor function for $$x$$. Remark : Numerical integration using software can be wrong. ×
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9918755888938904, "perplexity": 726.1848492384092}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-22/segments/1495463608665.33/warc/CC-MAIN-20170526124958-20170526144958-00173.warc.gz"}
http://math.stackexchange.com/questions/304527/the-area-of-the-triangle-with-vertices-3-2-3-8-and-x-y-is-24-what-is
# The area of the triangle with vertices (3, 2), (3, 8), and (x, y) is 24. What is x? The area of the triangle with vertices (3, 2), (3, 8), and (x, y) is 24. A possible value for x is: a) 7 b) 9 c) 11 d) 13 e) 15 - is it a homework? –  Yimin Feb 15 '13 at 3:44 It's for an upcoming test. But I don't understand. –  user62366 Feb 15 '13 at 3:48 Graph it. It should be obvious. –  Tpofofn Feb 15 '13 at 4:12 One side of the triangle lies on the line $x = 3$, and is length $6$. Why?. Take that to be your base, $b$. The area of a triangle is given by $$\text{Area}\;=\;\dfrac 12 bh$$ where $h$ is the height of the triangle measured from the base (connecting the third point perpendicular to the base, so $$\dfrac12(6)h = 24 \iff h = 8$$ Now, height, h, is the perpendicular distance from the base, which is on $x = 3$, and the only possible choices for $x$ that are given are all positive. Hence $h = 8 \implies x = 3 + 8 = 11.$ - Thank you! This helped. But would this way only work with right angle triangles? –  user62366 Feb 15 '13 at 4:07 Ok.But what if the one side of the triangle did not lie on the same line? –  user62366 Feb 15 '13 at 4:21 It doesn't work only if the triangle is a right triangle: We don't know yet what y might be, or where the perpendicular line from $(x,y)$ intersects the line x=3, where the base lies (the perpendicular distance from the point (x, y) to the which is height), only that it must intersect the line $x = 3$. It would be a right triangle iff y = 2 or y = 8, $y$ being the value of the unknown point. –  amWhy Feb 15 '13 at 14:10 We only know that the unknown point $x, y$ is limited by a perpendicular distance of $8$ to the line $x = 3$, and that the point is somewhere to the right of the line $x = 3$, because the choices for the unknown x value are all positive and $> 3$. –  amWhy Feb 15 '13 at 14:13 Hinz Take the first twopoint as base line. It has length 6. Therefore the height must be 8. - We know the area of a triangle (Article#25) having vertices $(x_i,y_i)$ for $i=1,2,3$ is $$\frac12\det\begin{pmatrix} x_1 & y_1 & 1\\x_2&y_2&1\\ x_3 & y_3 &1\end{pmatrix}$$ Now, $$\det\begin{pmatrix}x & y &1\\ 3 & 2 & 1 \\ 3 & 8 & 1 \end{pmatrix}$$ $$=\det\begin{pmatrix}x-3 & y-2 &0\\ 3 & 2 & 1 \\ 0 & 6 & 0 \end{pmatrix}\text { (Applying } R_3'=R_3-R_2,R_1'=R_1-R_2)$$ $$=6(x-3)$$ As we take the area in absolute value,the are here will be $\frac12\cdot6|x-3|=3|x-3|$ If $x\ge 3, 3(x-3)=24\implies x=11$ If $x<3, 3(3-x)=24\implies -5$ -
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8419064283370972, "perplexity": 311.82987515203496}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-35/segments/1408500833115.44/warc/CC-MAIN-20140820021353-00008-ip-10-180-136-8.ec2.internal.warc.gz"}
http://alt-sci.ru/_en/wiki/index.php?title=Aether_theory&printable=yes
# Aether theory Corresponding Wikipedia article: Aether theories The aether is the intermediator between space and matter, which is available by a mental perception, and it appears indirectly in a form of the observed physical phenomena and the measured physical values. The existence of aether is denied due to its immateriality, and therefore its properties are shared between the properties of space and matter. The aether fills entire space, and it is the reason for the existence of matter and its laws. The aether is a primary matter or a “pramatter”. The material objects are the clots of condensed aether. The entire world energy is contained in the aether. Every type of energy is derived from the aetheric energy in the eternal cycle. The using of a clean (free) aetheric energy looks like the perpetual motion. The main aether manifestation in the everyday life is the forces of inertia and gravity. The aether presses the objects to the ground, and it resists to their acceleration and braking. The aether pressure is almost canceled during the free fall. The aether is a concept of the ancient Greek and European philosophy. In the Japanese philosophy, the aether corresponds to a supernatural power "mukoku". In the Chinese philosophy, the aether corresponds to the energy “Qi” ("chi"), which fills the empty space "Tai xu". In the Indian philosophy, the aether corresponds to the "Maya", "Prakriti", "prana" and "Akasha", although the last two concepts refer to the spiritual energy. The great scientists and inventors somehow believed in the aether existence: • R. Descartes (1596–1650) is a philosopher, one of the algebra founders, creator of the analytic geometry. He considered the light as a linear motion of some rarefied matter[1]. • C. Huygens (1629-1695) is an inventor and a physicist. He considered the light as the movements from one aetheric particle to another. • R. Hooke (1635–1703) is one of the founders of physics and mechanics. He calculated the gravity equation, the authorship of which is dedicated to Newton solely[2]. • M. Lomonosov (1711–1765) is the author of the kinetic theory, which became a basis of thermodynamics. He explained the gravity as the aetheric pressure[3]. • A.J. Fresnel (1788–1827) is a physicist, who founded the wave theory of light. He considered the aether as an elastic medium. • M. Faraday (1791–1867) is an inventor and a physicist. He is the discoverer of the electromagnetic induction law and other phenomena. He considered the electromagnetic fields as the special aether conditions. • H. Helmholtz (1821–1894). is a versatile scientist, who created theories of the physiological perception of color and sound. He considered the aether as an incompressible fluid. • D. Mendeleev (1834–1907) is a versatile scientist, chemist, economist. The author of the Periodic table. As a chemist, he considered the aether as an ideal gas (Newtonium). • J.C. Maxwell (1831–1879) is a physicist, who created the classical electrodynamics based on the mechanical analogies. However, he invented the non-existent "displacement current" to prove the electromagnetic waves of Hertz. • J.J. Thomson (1856-1940) is a physicist, who discovered the electron. The author of an idea about the equivalence of mass and electromagnetic energy. • Umov N. (1846-1915) is a physicist, who approximately determined the equation of mass and electromagnetic energy[4]. • O. Heaviside (1850–1925) is an engineer and a mathematician, who created the modern electric circuit theory. He is the author of the electromagnetic theory of aether and of the famous formula $$E=mc^2$$, the authorship of which was awarded to Einstein solely. • N. Tesla (1856–1943) is the great inventor of the various transformers, electrical machines (motors, generators), multi-phase AC power, as well as many less known devices. He is the author of the unpublished physical theories. He is a contemporary of Einstein, who argued with him about the aether existence. The belief in the aether existence was undermined by the following theoretical physicists despite their merits: • Newton, a co-author of several mathematical inventions (the Leibniz integral and the binomial theorem, also known to the ancient Indian mathematicians) and of the Galileo's mechanics laws. He called an empirical gravity formula the fundamental law. • Einstein, an unemployed teacher of physics, who hardly became a patent clerk (an expert on the inventions of others). He is a co-author of the sensational theories of Lorentz, Minkowski etc., which complicatedly explain the obvious nature of the light aberration and of Sagnac's effect, but predict the questionable gravitational waves, black holes, wormholes etc. • Schrödinger and Dirac, the university teachers, the authors of the formalized theories, which allow existence of the particle’s discrete parameters independently of an interaction with other particles. The Schrödinger equation pretends to explain Mendeleev’s periodic law, but it does not explain this law without introducing an “Aufbau principle” known as the empirical Madelung rule. The modern published aetheric theories have the delusions, denying the basic aether properties: • Aether is neither ideal nor real (as believes Atsukowsky W.A.) gas in the full sense of this word, having a temperature etc. In the ancient philosophers views, the aether is a separate element in the same group with the gas (air), plasma (fire), liquid (water) and solid (ground). The aether itself is an empty space, which have neither temperature nor pressure etc. • Aether is not an elastic continuum, which distributes the mechanical waves like the acoustic waves. The light is a linear aether motion in the form of wave. The real waves exist only in the matter, and they never propagate perfectly linearly within a free space, in contrast to the light rays. The gravitational waves also do not exist. • Aether provides the fundamental laws of electromagnetism as the nature of light, which differ from the mechanical laws of inertia and gravity. The modern aetheric theories reduce the electromagnetism to a mechanical pressure in any ways, forgetting about a difference between the electromagnetic and gravitational interactions. • The aetheric particles do not have their own inertial and gravitational mass, but they produce a material mass from the electromagnetic energy. This property is most consistent with the Paramahamsa Tewari's space and with the torsion field particles of A. Akimov and G. Shipov. The new aether theory assumes the following aetheric materialistic world structure: Component Categories Objects Spacetime Geometry, kinematics, field Abstract bodies, rays, fields Aether (pramatter) Gravity, dynamics, magnetism, electricity, optics Electromagnetic and gravitational fields, vortices, light Matter Waves, reflection, refraction, dispersion, chemestry, thermodynamics, friction Atoms, moleculas, crystals The list of physical phenomena explained by the new aether theory: ## References 1. Descartes, René (1637). Discourse on Method, Optics, Geometry, and Meteorology. 2. V.I. Arnol'd, Huygens and Barrow, Newton and Hooke: Pioneers in mathematical analysis and catastrophe theory from evolvents to quasicrystalsISBN 3764323833, ISBN 978-3764323837 3. Ломоносов М. В. [Заметки о тяжести тел] / Пер. Я. М. Боровского // Ломоносов М. В. Полное собрание сочинений / АН СССР. — М.; Л., 1950—1983. Т. 1: Труды по физике и химии, 1738—1746. — М.; Л.: АН СССР, 1950. — С. 237—251. 4. Умов Н. А. Теория простых сред и её приложение к выводу основных законов электростатических и электродинамических взаимодействий. — Одесса, т. 9, 1873.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8789401650428772, "perplexity": 3667.2321053232095}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-49/segments/1669446710941.43/warc/CC-MAIN-20221203212026-20221204002026-00652.warc.gz"}
https://www.physicsforums.com/threads/moment-tension-problem.304965/
# Moment/Tension problem 1. Apr 4, 2009 ### fableblue 1. The problem statement, all variables and given/known data It is known that a force with a moment of 7840 lb in about D is required to straighten the fence post CD. If a=8in. ,b=52 in., and d=112 in., determine the tension that must be developed in the cable of winch puller AB to create the required moment about point D. 2. Relevant equations Find angle AED = arctan (52/120) = 23.43degrees Use M =D x F ; M=7840lb in, D=ED=112 in, F=unknown Substitute use algebra to find T(AB) 3. The attempt at a solution I have attached my attempt but i think I missed something because when i check it does not check. 176.05 x sin23.43 does NOT = 7840 (given) Now i am stuck Would someone please give me a little nudge/ where did i go wrong? What i am not seeing? Thanks #### Attached Files: • ###### Equilibrium 2001.jpg File size: 19.2 KB Views: 112 Last edited: Apr 4, 2009 2. Apr 5, 2009 ### tiny-tim Hi fableblue! I can't see your attachment yet, but I notice that your answer is out by a factor of 11.2. which looks suspiciously like d … does that help? 3. Apr 5, 2009 ### fableblue Not really but thanks, tiny-tim. May be when you are able to see the attachment you see what/where my problem lies. I am going to try it again later on with the use of the law of cosins. What is throwing me off is the pole not being straight. Can I use$$\angle$$EDC, which is at the base on the post that needs to be strightened? $$\angle$$EDC=~59$$\circ$$, I used the law of sines for that but it does not look right; $$\frac{112sin(23.43)}{52}$$ Can i use that angle? 4. Apr 5, 2009 ### Staff: Mentor When finding torque using T = rFsinθ, θ is the angle between r and F. Consider the angle ECD. 5. Apr 5, 2009 ### fableblue Thank you, I also was looking at that angle, actually i was looking at ALL the angles. But now i can see why. And i am going to assume, from your reply, that the angles are valid no matter if the post is leaning or straight. Now that i think about it the problem is looking for the intial tension in the cable. So we get r=52sin81.25, to get the $$\angle$$ of 81.25$$\circ$$ i used arctan(52/8) and oppisite interior angles are =, and the used algebra and my answer is F=152.544lbs. Does that look about right? 6. Apr 5, 2009 ### Staff: Mentor Not exactly. The way I look at it, r is the length of the post from D to C. Solve for that distance. The angle that you need is BCD. (Finding the angle of 81.25° is a useful step along the way.) 7. Apr 5, 2009 ### fableblue OK so r=$$\sqrt{(8^2 + 52^2)}$$=52.61 in the Mp=rFsin$$\Theta$$; substitute- 7840 lb in = 52.61sin81.25F; do algebra and F=147.29 lbs= tension in cable ABC. Is this correct? How do i go about checking it? 8. Apr 5, 2009 ### Staff: Mentor Your r is correct, but your angle is still wrong. Find angle BCD. 9. Apr 5, 2009 ### fableblue deep breath, ok i should have looked at my drawing because then i would have seen that $$\angle$$EDC and $$\angle$$ ECD were not oppisite interior angles. What i did this time was use 180-81.25 for $$\angle$$EDC=98.75 and then i used the law of cosins to find side EC =130.78 and from there i used the law of cosines again to find $$\angle$$ECD=57.83, then i used Mp=rFsin$$\theta$$ $$\rightarrow$$ F=7840/(52.61sin57.83) = 176.04:uhh: Is this correct? That is my intial result way back in the begining. I see that the angle should be 89.78 then F=149.02 and 7840~(52.61sin89.78) * 149.02 i found this through some algebra but now i am trying to see why i can not get it trough trig I really do apperciate the help you are giving me. Thank You Last edited: Apr 5, 2009 10. Apr 5, 2009 ### Staff: Mentor That looks right to me. I don't understand what you're doing here. Where does that angle of 89.78° come from? 11. Apr 5, 2009 ### fableblue I did some switching around of things to try and make it fit:shy: So are you saying that my intial results are correct? How do i go about checking this? I substitute 176.04 in for F and they do not = each other. 7840 = ....wait a minute... SHAZAM>>>> I was checking my solution the WRONG way not using the whole equation i was leaving out sin57.83. WOW So the way that i did it intially is that a correct way? r=112sin23.43 and TAB= F and i understood what i was doing so is the way i did W A Y back there correct? Thanks a bunch 12. Apr 5, 2009 ### Staff: Mentor I'd say that your initial method was unorthodox (at least to me), but correct. The "obvious" way to define rFsinθ in this problem is to have r be the distance DC, since we are finding the torque on the post. But there's nothing stopping you from measuring r from D to any point along the line of the force, since the torque will be the same. You used r = the distance DE and the associated angle. That works. 13. Apr 5, 2009 ### fableblue Yes, that was because i was told that a force/torque is the same anywhere in the post. It seemed to easy for me that is why I did not have faith in my result and all a long is was the way i was checking it. Thank You Last edited: Apr 5, 2009 Know someone interested in this topic? Share this thread via Reddit, Google+, Twitter, or Facebook Similar Discussions: Moment/Tension problem
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8155295252799988, "perplexity": 1673.3728156591023}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": false}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-34/segments/1502886104560.59/warc/CC-MAIN-20170818024629-20170818044629-00711.warc.gz"}
https://moodle.org/plugins/view.php?plugin=mod_scheduler&moodle_version=21
## Activities: Scheduler mod_scheduler Maintained by Henning Bostelmann An appointment scheduler for planning face-to-face meetings between teachers and students. Teachers can set up time slots, and students can choose one of these on Moodle. 2541 sites 60 fans The Scheduler module helps you in scheduling appointments with your students. Teachers specify time slots for meetings, students then choose one of them on Moodle. Teachers in turn can record the outcome of the meeting - and optionally a grade - within the scheduler. Group scheduling is supported; that is, each time slot can accomodate several students, and optionally it is possible to schedule appointments for entire groups at the same time. Please note that minor updates to this module will normally be released on github only. For help and discussion about the module, please use the Scheduler forum on moodle.org. For an earlier version supporting Moodle up to release 1.9, see the Scheduler 1.9 module. ### Awards • Wed, 4 Oct 2017, 5:40 PM Hi Henning, I've just submitted a PR on GitHub to propose an implementation of core_calendar notifications in Moodle 3.3's new core block_myoverview block. Details are in the PR. Would you mind having a look at it? Thanks • Fri, 6 Oct 2017, 7:39 PM Hi Nicolas, thanks for your patch (and rest assured that I do get notifications from github). I will have a look at it when I find the time. The next major release of Scheduler is not due before Moodle 3.5. • Mon, 13 Nov 2017, 4:45 AM Note to plugin users: I believe that version 3.3.0 works with Moodle 3.4, and have marked it as such. Some individual behat test cases seem to fail, but as far as I could find out, these are problems entirely on the side of the test tool, and not with the actual functionality. • Fri, 1 Dec 2017, 10:19 AM Hi All, Would like to report a bug since I haven't seen it reported here (as far as I checked). When I delete a Scheduler activity that contains appointment(s), as a result the appointment slot does not get removed from its table. Thus when create a new appointment in another scheduler activity in the same, if use identical date and time as the deleted one, I will get a conflict error and it doesn't get created unless ignore the conflict which is not preferable . Tested on 3.3 Cheers. • Fri, 1 Dec 2017, 10:51 AM lib.php function scheduler_delete_instance($id) { ...... • Fri, 8 Dec 2017, 7:22 PM Hi, I noted an issue... A teacher in my university uses the scheduler activity to ask students to register for an exam. There is only one slot and over 300 students. When he wants to edit the slot (url like scheduler/view.php?id=xxx&what=updateslot&subpage=allappointments&offset=-1) the edit form doesn't show (banner and course header are rendered but neither navigation drawer, activity content nor footer). Any hint? Cheers • Mon, 11 Dec 2017, 6:01 AM We are currently using version 3.1.2 (Moodle 3.1) and have found a minor issue. When a student deletes an appointment slot they previously booked it disappears from the list and the slot is freed up for another user however the user who deleted an appt (for tomorrow) over a week ago still got a reminder email of her appt with me this morning (reminder for tomorrows meeting). It seems the reminder info isn't purged on deleted slots maybe? Regards, Yvonne • Fri, 29 Dec 2017, 12:35 AM Hi Henning et al, Thank you for reading this message. I have a question, the current version of Scheduler can only add people to an appointment upon creation. In the future, will you guys implement a version that you can add people to recurring appointments upon creation? Thank you! • Tue, 9 Jan 2018, 2:34 PM Hi Henning, Our teachers love the scheduler plugin a lot. They use this plugin to let students to book online tutorials. There's one issue if you could help to solve, it will be really great. When teachers add slots, the system will automatically save the location when the teacher inputs at the first time. Then this location will be the default location for that teacher. But the location will be changed frequently. If the teacher forget to change the default location, students will go to the wrong room. The teachers prefer to have the location field be blank. Can you help with this? Many thanks. Best wishes, Lina • Tue, 23 Jan 2018, 2:33 AM Hi Henning, longtime no exchanged... how are you going ? i am asked by a customer to complete the scheduler with moodle WS, covering the essential crude questions about scheduler instances, slotsand appointement. Would you be ok for integrating the resulting externallib.php and db/services.php files in the scheduler distribution ? Cheers ! Valery • Wed, 7 Feb 2018, 2:33 AM Dear Sir, when i want install plugin i have this error how can i solve it please Validating repository_scheduler ... خطاء [Error] Unsupported syntax detected in version.php file [$module] Installation aborted due to validation failure • Fri, 23 Feb 2018, 7:00 PM Hi Henning, A little suggestion for next version maybe. It would be nice to be allowed to set preferred export format in site config. Many users export data to use them inside excel but don't change the default export format. When files contain accents, they get weird characters in Excel. Regards Jean-Roch • Tue, 27 Feb 2018, 1:29 AM Hi Henning, I have a little problem. When I put a file link in Booking form and student-supplied data, like instructions and other info, but students see this URL, http://site.com/mod/scheduler/@@PLUGINFILE@@/blablabla.pdf and it is no a vali link -> NOT FOUND. Any idea? Just happen there.... Thank you • Sun, 18 Mar 2018, 1:41 AM @Valery, long time no see! The WS API was something on my long-term to-do list. I am always happy to consider contributions, although I can't promise to integrate them (it really depends what the code looks like, whether it's of wider interest, etc.). If you like, open an issue in the tracker and link a github branch, once your work is finished. • Sun, 18 Mar 2018, 5:19 AM @David, thanks for mentioning this - there is indeed a bug, I'm tracking this as CONTRIB-7222 (see there foe updates).
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.24493734538555145, "perplexity": 3846.432284834928}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-13/segments/1521257647280.40/warc/CC-MAIN-20180320033158-20180320053158-00553.warc.gz"}
http://math.stackexchange.com/users/44660/pcepkin
# pcepkin less info reputation 9 bio website location age member for 2 years, 1 month seen Sep 30 '13 at 14:55 profile views 28 mathstudent # 15 Questions 4 Hilbert space and its dual 3 dynamical systems and invariant sets 3 a question on $O$- notation 3 Estimation of $\int \limits_{0}^{\infty}e^{-ax}\frac{1}{x}dx.$ 2 3d poisson's equation # 172 Reputation This user has no recent positive reputation changes 1 how to prove this statement # 21 Tags 1 proof-writing 0 integration × 2 1 elementary-set-theory 0 functional-analysis × 2 0 pde × 4 0 hilbert-spaces × 2 0 differential-equations × 3 0 distribution-theory 0 analysis × 3 0 fourier-analysis # 5 Accounts Mathematics 172 rep 9 Stack Overflow 29 rep 7 English Language Learners 23 rep 3 TeX - LaTeX 8 rep 3 English Language & Usage 1 rep 1
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8456162810325623, "perplexity": 7271.667561342747}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-49/segments/1416931014049.81/warc/CC-MAIN-20141125155654-00222-ip-10-235-23-156.ec2.internal.warc.gz"}
https://wiki.swarma.org/index.php/%E5%A4%9A%E9%87%8D%E5%88%86%E5%BD%A2%E7%B3%BB%E7%BB%9F
# 多重分形系统 Example of a multifractal electronic eigenstate at the Anderson localization transition in a system with 1367631 atoms. A multifractal system is a generalization of a fractal system in which a single exponent (the fractal dimension) is not enough to describe its dynamics; instead, a continuous spectrum of exponents (the so-called singularity spectrum) is needed.[1] A multifractal system is a generalization of a fractal system in which a single exponent (the fractal dimension) is not enough to describe its dynamics; instead, a continuous spectrum of exponents (the so-called singularity spectrum) is needed. Multifractal systems are common in nature. They include the length of coastlines, mountain topography,[2] fully developed turbulence, real-world scenes, heartbeat dynamics,[3] human gait[4]模板:Failed verification and activity,[5] human brain activity,[6][7][8][9][10][11][12] and natural luminosity time series.[13] Models have been proposed in various contexts ranging from turbulence in fluid dynamics to internet traffic, finance, image modeling, texture synthesis, meteorology, geophysics and more.[citation needed] The origin of multifractality in sequential (time series) data has been attributed to mathematical convergence effects related to the central limit theorem that have as foci of convergence the family of statistical distributions known as the Tweedie exponential dispersion models,[14] as well as the geometric Tweedie models.[15] The first convergence effect yields monofractal sequences, and the second convergence effect is responsible for variation in the fractal dimension of the monofractal sequences.[16] Multifractal systems are common in nature. They include the length of coastlines, mountain topography, fully developed turbulence, real-world scenes, heartbeat dynamics, human gait and activity, human brain activity, and natural luminosity time series. Models have been proposed in various contexts ranging from turbulence in fluid dynamics to internet traffic, finance, image modeling, texture synthesis, meteorology, geophysics and more. The origin of multifractality in sequential (time series) data has been attributed to mathematical convergence effects related to the central limit theorem that have as foci of convergence the family of statistical distributions known as the Tweedie exponential dispersion models, as well as the geometric Tweedie models. The first convergence effect yields monofractal sequences, and the second convergence effect is responsible for variation in the fractal dimension of the monofractal sequences. Multifractal analysis is used to investigate datasets, often in conjunction with other methods of fractal and lacunarity analysis. The technique entails distorting datasets extracted from patterns to generate multifractal spectra that illustrate how scaling varies over the dataset. Multifractal analysis techniques have been applied in a variety of practical situations, such as predicting earthquakes and interpreting medical images.[17][18][19] Multifractal analysis is used to investigate datasets, often in conjunction with other methods of fractal and lacunarity analysis. The technique entails distorting datasets extracted from patterns to generate multifractal spectra that illustrate how scaling varies over the dataset. Multifractal analysis techniques have been applied in a variety of practical situations, such as predicting earthquakes and interpreting medical images. ## Definition In a multifractal system $\displaystyle{ s }$, the behavior around any point is described by a local power law: In a multifractal system s, the behavior around any point is described by a local power law: = = 定义 = = 在多重分形系统中,任何点周围的行为都用局部幂定律来描述: $\displaystyle{ s(\vec{x}+\vec{a})-s(\vec{x}) \sim a^{h(\vec{x})}. }$ s(\vec{x}+\vec{a})-s(\vec{x}) \sim a^{h(\vec{x})}. S (vec { x } + vec { a })-s (vec { x }) sim a ^ { h (vec { x })}. The exponent $\displaystyle{ h(\vec{x}) }$ is called the singularity exponent, as it describes the local degree of singularity or regularity around the point $\displaystyle{ \vec{x} }$.[citation needed] The exponent h(\vec{x}) is called the singularity exponent, as it describes the local degree of singularity or regularity around the point \vec{x}. The ensemble formed by all the points that share the same singularity exponent is called the singularity manifold of exponent h, and is a fractal set of fractal dimension $\displaystyle{ D(h): }$ the singularity spectrum. The curve $\displaystyle{ D(h) }$ versus $\displaystyle{ h }$ is called the singularity spectrum and fully describes the statistical distribution of the variable $\displaystyle{ s }$.[citation needed] The ensemble formed by all the points that share the same singularity exponent is called the singularity manifold of exponent h, and is a fractal set of fractal dimension D(h): the singularity spectrum. The curve D(h) versus h is called the singularity spectrum and fully describes the statistical distribution of the variable s. In practice, the multifractal behaviour of a physical system $\displaystyle{ X }$ is not directly characterized by its singularity spectrum $\displaystyle{ D(h) }$. Rather, data analysis gives access to the multiscaling exponents $\displaystyle{ \zeta(q),\ q\in{\mathbb R} }$. Indeed, multifractal signals generally obey a scale invariance property that yields power-law behaviours for multiresolution quantities, depending on their scale $\displaystyle{ a }$. Depending on the object under study, these multiresolution quantities, denoted by $\displaystyle{ T_X(a) }$, can be local averages in boxes of size $\displaystyle{ a }$, gradients over distance $\displaystyle{ a }$, wavelet coefficients at scale $\displaystyle{ a }$, etc. For multifractal objects, one usually observes a global power-law scaling of the form:[citation needed] In practice, the multifractal behaviour of a physical system X is not directly characterized by its singularity spectrum D(h). Rather, data analysis gives access to the multiscaling exponents \zeta(q),\ q\in{\mathbb R}. Indeed, multifractal signals generally obey a scale invariance property that yields power-law behaviours for multiresolution quantities, depending on their scale a. Depending on the object under study, these multiresolution quantities, denoted by T_X(a), can be local averages in boxes of size a, gradients over distance a, wavelet coefficients at scale a, etc. For multifractal objects, one usually observes a global power-law scaling of the form: $\displaystyle{ \langle T_X(a)^q \rangle \sim a^{\zeta(q)}\ }$ \langle T_X(a)^q \rangle \sim a^{\zeta(q)}\ langle t _ x (a) ^ q rangle sim a ^ { zeta (q)} at least in some range of scales and for some range of orders $\displaystyle{ q }$. When such behaviour is observed, one talks of scale invariance, self-similarity, or multiscaling.[20] at least in some range of scales and for some range of orders q. When such behaviour is observed, one talks of scale invariance, self-similarity, or multiscaling. ## Estimation Using so-called multifractal formalism, it can be shown that, under some well-suited assumptions, there exists a correspondence between the singularity spectrum $\displaystyle{ D(h) }$ and the multi-scaling exponents $\displaystyle{ \zeta(q) }$ through a Legendre transform. While the determination of $\displaystyle{ D(h) }$ calls for some exhaustive local analysis of the data, which would result in difficult and numerically unstable calculations, the estimation of the $\displaystyle{ \zeta(q) }$ relies on the use of statistical averages and linear regressions in log-log diagrams. Once the $\displaystyle{ \zeta(q) }$ are known, one can deduce an estimate of $\displaystyle{ D(h), }$ thanks to a simple Legendre transform.[citation needed] Using so-called multifractal formalism, it can be shown that, under some well-suited assumptions, there exists a correspondence between the singularity spectrum D(h) and the multi-scaling exponents \zeta(q) through a Legendre transform. While the determination of D(h) calls for some exhaustive local analysis of the data, which would result in difficult and numerically unstable calculations, the estimation of the \zeta(q) relies on the use of statistical averages and linear regressions in log-log diagrams. Once the \zeta(q) are known, one can deduce an estimate of D(h), thanks to a simple Legendre transform. Multifractal systems are often modeled by stochastic processes such as multiplicative cascades. The $\displaystyle{ \zeta(q) }$ are statistically interpreted, as they characterize the evolution of the distributions of the $\displaystyle{ T_X(a) }$ as $\displaystyle{ a }$ goes from larger to smaller scales. This evolution is often called statistical intermittency and betrays a departure from Gaussian models.[citation needed] Multifractal systems are often modeled by stochastic processes such as multiplicative cascades. The \zeta(q) are statistically interpreted, as they characterize the evolution of the distributions of the T_X(a) as a goes from larger to smaller scales. This evolution is often called statistical intermittency and betrays a departure from Gaussian models. Modelling as a multiplicative cascade also leads to estimation of multifractal properties.脚本错误:没有“Footnotes”这个模块。 This methods works reasonably well, even for relatively small datasets. A maximum likely fit of a multiplicative cascade to the dataset not only estimates the complete spectrum but also gives reasonable estimates of the errors.[21] Modelling as a multiplicative cascade also leads to estimation of multifractal properties. This methods works reasonably well, even for relatively small datasets. A maximum likely fit of a multiplicative cascade to the dataset not only estimates the complete spectrum but also gives reasonable estimates of the errors. ## Estimating multifractal scaling from box counting Multifractal spectra can be determined from box counting on digital images. First, a box counting scan is done to determine how the pixels are distributed; then, this "mass distribution" becomes the basis for a series of calculations.[22][23][24] The chief idea is that for multifractals, the probability $\displaystyle{ P }$ of a number of pixels $\displaystyle{ m }$, appearing in a box $\displaystyle{ i }$, varies as box size $\displaystyle{ \epsilon }$, to some exponent $\displaystyle{ \alpha }$, which changes over the image, as in Eq.0.0 (NB: For monofractals, in contrast, the exponent does not change meaningfully over the set). $\displaystyle{ P }$ is calculated from the box-counting pixel distribution as in Eq.2.0. $\displaystyle{ P_{[i,\epsilon]} \varpropto \epsilon^{-\alpha_i} \therefore\alpha_i \varpropto \frac{\log{P_{[i,\epsilon]}}}{\log{\epsilon^{-1}}} }$ (Eq.0.0) Multifractal spectra can be determined from box counting on digital images. First, a box counting scan is done to determine how the pixels are distributed; then, this "mass distribution" becomes the basis for a series of calculations. The chief idea is that for multifractals, the probability P of a number of pixels m, appearing in a box i, varies as box size \epsilon, to some exponent \alpha, which changes over the image, as in (NB: For monofractals, in contrast, the exponent does not change meaningfully over the set). P is calculated from the box-counting pixel distribution as in . $\displaystyle{ \epsilon }$ = an arbitrary scale (box size in box counting) at which the set is examined \epsilon = an arbitrary scale (box size in box counting) at which the set is examined $\displaystyle{ i }$ = the index for each box laid over the set for an $\displaystyle{ \epsilon }$ i = the index for each box laid over the set for an \epsilon i = 在集合上放置的每个盒子的索引为 ε $\displaystyle{ m_{[i,\epsilon]} }$ = the number of pixels or mass in any box, $\displaystyle{ i }$, at size $\displaystyle{ \epsilon }$ m_{[i,\epsilon]} = the number of pixels or mass in any box, i, at size \epsilon m {[ i,epsilon ]} = 任何方框中像素或质量的数量,i,大小为 epsilon $\displaystyle{ N_\epsilon }$ = the total boxes that contained more than 0 pixels, for each $\displaystyle{ \epsilon }$ N_\epsilon = the total boxes that contained more than 0 pixels, for each \epsilon n epsilon = 每个 epsilon 包含超过0像素的总盒子 $\displaystyle{ M_\epsilon = \sum_{i=1}^{N_\epsilon}m_{[i,\epsilon]} = }$ the total mass or sum of pixels in all boxes for this $\displaystyle{ \epsilon }$ (Eq.1.0) $\displaystyle{ P_{[i,\epsilon]} = \frac{m_{[i,\epsilon]}}{M_\epsilon} = }$ the probability of this mass at $\displaystyle{ i }$ relative to the total mass for a box size (Eq.2.0) $\displaystyle{ P }$ is used to observe how the pixel distribution behaves when distorted in certain ways as in Eq.3.0 and Eq.3.1: P is used to observe how the pixel distribution behaves when distorted in certain ways as in and : P 用于观察像素分布在以下特定方式扭曲时的表现: $\displaystyle{ Q }$ = an arbitrary range of values to use as exponents for distorting the data set Q = an arbitrary range of values to use as exponents for distorting the data set q = 一个任意范围的值,用作扭曲数据集的指数 $\displaystyle{ I_{{(Q)}_{[\epsilon]}} = \sum_{i=1}^{N_\epsilon} {P_{[i,\epsilon]}^Q} = }$ the sum of all mass probabilities distorted by being raised to this Q, for this box size (Eq.3.0) • When $\displaystyle{ Q=1 }$, Eq.3.0 equals 1, the usual sum of all probabilities, and when $\displaystyle{ Q=0 }$, every term is equal to 1, so the sum is equal to the number of boxes counted, $\displaystyle{ N_\epsilon }$. $\displaystyle{ \mu_{{(Q)}_{[i,\epsilon]}} = \frac{P_{[i,\epsilon]}^Q}{I_{{(Q)}_{[\epsilon]}}} = }$ how the distorted mass probability at a box compares to the distorted sum over all boxes at this box size (Eq.3.1) • When Q=1, equals 1, the usual sum of all probabilities, and when Q=0, every term is equal to 1, so the sum is equal to the number of boxes counted, N_\epsilon. These distorting equations are further used to address how the set behaves when scaled or resolved or cut up into a series of $\displaystyle{ \epsilon }$-sized pieces and distorted by Q, to find different values for the dimension of the set, as in the following: These distorting equations are further used to address how the set behaves when scaled or resolved or cut up into a series of \epsilon-sized pieces and distorted by Q, to find different values for the dimension of the set, as in the following: • An important feature of Eq.3.0 is that it can also be seen to vary according to scale raised to the exponent $\displaystyle{ \tau }$ in Eq.4.0: $\displaystyle{ I_{{(Q)}_{[\epsilon]}} \varpropto \epsilon^{\tau_{(Q)}} }$ (Eq.4.0) • An important feature of is that it can also be seen to vary according to scale raised to the exponent \tau in : • 一个重要的特点是,它也可以被视为根据提高到指数 τ 的比例而变化: Thus, a series of values for $\displaystyle{ \tau_{(Q)} }$ can be found from the slopes of the regression line for the log of Eq.3.0 versus the log of $\displaystyle{ \epsilon }$ for each $\displaystyle{ Q }$, based on Eq.4.1: Thus, a series of values for \tau_{(Q)} can be found from the slopes of the regression line for the log of versus the log of \epsilon for each Q, based on : $\displaystyle{ \tau_{(Q)} = {\lim_{\epsilon\to0}{\left[ \frac {\log{I_{{(Q)}_{[\epsilon]}}}} {\log{\epsilon}} \right ]}} }$ (Eq.4.1) $\displaystyle{ D_{(Q)} = {\lim_{\epsilon\to0} { \left [ \frac{\log{I_{{(Q)}_{[\epsilon]}}}}{\log{\epsilon^{-1}}} \right ]}} {(1-Q)^{-1}} }$ (Eq.5.0) $\displaystyle{ D_{(Q)} = \frac{\tau_{(Q)}}{Q-1} }$ (Eq.5.1) $\displaystyle{ \tau_{{(Q)}_{}} = D_{(Q)}\left(Q-1\right) }$ (Eq.5.2) $\displaystyle{ \tau_{(Q)} = \alpha_{(Q)}Q - f_{\left(\alpha_{(Q)}\right)} }$ (Eq.5.3) • For the generalized dimension: • 关于一般维度: • $\displaystyle{ \alpha_{(Q)} }$ is estimated as the slope of the regression line for log A$\displaystyle{ \epsilon }$,Q versus log $\displaystyle{ \epsilon }$ where: • \alpha_{(Q)} is estimated as the slope of the regression line for versus where: • alpha _ {(q)}估计为回归线的斜率,其中: $\displaystyle{ A_{\epsilon,Q} = \sum_{i=1}^{N_\epsilon}{\mu_{{i,\epsilon}_{Q}}{P_{{i,\epsilon}_{Q}}}} }$ (Eq.6.0) • Then $\displaystyle{ f_{\left(\alpha_{{(Q)}}\right)} }$ is found from Eq.5.3. • Then f_{\left(\alpha_模板:(Q)\right)} is found from . • 然后 f _ { left (alpha _ {{(q)} right)}从。 • The mean $\displaystyle{ \tau_{(Q)} }$ is estimated as the slope of the log-log regression line for $\displaystyle{ \tau_{{(Q)}_{[\epsilon]}} }$ versus $\displaystyle{ \epsilon }$, where: • The mean \tau_{(Q)} is estimated as the slope of the log-log regression line for \tau_{{(Q)}_{[\epsilon]}} versus \epsilon, where: • 平均 tau _ {(q)}被估计为 tau _ {{(q)} _ {[ epsilon ]}}相对于 epsilon 的对数对数回归线的斜率,其中: $\displaystyle{ \tau_{(Q)_{[\epsilon]}} = \frac{\sum_{i=1}^{N_\epsilon} {P_{[i,\epsilon]}^{Q-1}}} {N_\epsilon} }$ (Eq.6.1) In practice, the probability distribution depends on how the dataset is sampled, so optimizing algorithms have been developed to ensure adequate sampling.[22] In practice, the probability distribution depends on how the dataset is sampled, so optimizing algorithms have been developed to ensure adequate sampling. ## Applications Multifractal analysis has been successfully used in many fields, including physical, information, and biological sciences.[25] For example, the quantification of residual crack patterns on the surface of reinforced concrete shear walls.[26] Multifractal analysis has been successfully used in many fields, including physical, information, and biological sciences. For example, the quantification of residual crack patterns on the surface of reinforced concrete shear walls. ### Dataset distortion analysis Multifractal analysis is analogous to viewing a dataset through a series of distorting lenses to home in on differences in scaling. The pattern shown is a Hénon map. Multifractal analysis has been used in several scientific fields to characterize various types of datasets.[27][5][8] In essence, multifractal analysis applies a distorting factor to datasets extracted from patterns, to compare how the data behave at each distortion. This is done using graphs known as multifractal spectra, analogous to viewing the dataset through a "distorting lens", as shown in the illustration.[22] Several types of multifractal spectra are used in practise. Multifractal analysis has been used in several scientific fields to characterize various types of datasets. In essence, multifractal analysis applies a distorting factor to datasets extracted from patterns, to compare how the data behave at each distortion. This is done using graphs known as multifractal spectra, analogous to viewing the dataset through a "distorting lens", as shown in the illustration. Several types of multifractal spectra are used in practise. #### DQ vs Q DQ vs Q spectra for a non-fractal circle (empirical box counting dimension = 1.0), mono-fractal Quadric Cross (empirical box counting dimension = 1.49), and multifractal Hénon map (empirical box counting dimension = 1.29). One practical multifractal spectrum is the graph of DQ vs Q, where DQ is the generalized dimension for a dataset and Q is an arbitrary set of exponents. The expression generalized dimension thus refers to a set of dimensions for a dataset (detailed calculations for determining the generalized dimension using box counting are described below). One practical multifractal spectrum is the graph of DQ vs Q, where DQ is the generalized dimension for a dataset and Q is an arbitrary set of exponents. The expression generalized dimension thus refers to a set of dimensions for a dataset (detailed calculations for determining the generalized dimension using box counting are described below). #### Dimensional ordering The general pattern of the graph of DQ vs Q can be used to assess the scaling in a pattern. The graph is generally decreasing, sigmoidal around Q=0, where D(Q=0) ≥ D(Q=1) ≥ D(Q=2). As illustrated in the figure, variation in this graphical spectrum can help distinguish patterns. The image shows D(Q) spectra from a multifractal analysis of binary images of non-, mono-, and multi-fractal sets. As is the case in the sample images, non- and mono-fractals tend to have flatter D(Q) spectra than multifractals. The general pattern of the graph of DQ vs Q can be used to assess the scaling in a pattern. The graph is generally decreasing, sigmoidal around Q=0, where D(Q=0) ≥ D(Q=1) ≥ D(Q=2). As illustrated in the figure, variation in this graphical spectrum can help distinguish patterns. The image shows D(Q) spectra from a multifractal analysis of binary images of non-, mono-, and multi-fractal sets. As is the case in the sample images, non- and mono-fractals tend to have flatter D(Q) spectra than multifractals. = = = 维序 = = = = DQ 与 q 图的一般模式可以用来评估一个模式中的标度。图一般是递减的,在 q = 0左右,其中 d (q = 0)≥ d (q = 1)≥ d (q = 2)。如图所示,这个图形光谱的变化可以帮助区分模式。图像显示了 d (q)谱从非,单分形和多分形集的二进制图像的多重分形分析。正如在样本图像的情况,非和单分形往往比多分形有更平坦的 d (q)光谱。 The generalized dimension also gives important specific information. D(Q=0) is equal to the capacity dimension, which—in the analysis shown in the figures here—is the box counting dimension. D(Q=1) is equal to the information dimension, and D(Q=2) to the correlation dimension. This relates to the "multi" in multifractal, where multifractals have multiple dimensions in the D(Q) versus Q spectra, but monofractals stay rather flat in that area.[22][23] The generalized dimension also gives important specific information. D(Q=0) is equal to the capacity dimension, which—in the analysis shown in the figures here—is the box counting dimension. D(Q=1) is equal to the information dimension, and D(Q=2) to the correlation dimension. This relates to the "multi" in multifractal, where multifractals have multiple dimensions in the D(Q) versus Q spectra, but monofractals stay rather flat in that area. #### $\displaystyle{ f(\alpha) }$ versus $\displaystyle{ \alpha }$ Another useful multifractal spectrum is the graph of $\displaystyle{ f(\alpha) }$ versus $\displaystyle{ \alpha }$ (see calculations). These graphs generally rise to a maximum that approximates the fractal dimension at Q=0, and then fall. Like DQ versus Q spectra, they also show typical patterns useful for comparing non-, mono-, and multi-fractal patterns. In particular, for these spectra, non- and mono-fractals converge on certain values, whereas the spectra from multifractal patterns typically form humps over a broader area. Another useful multifractal spectrum is the graph of f(\alpha) versus \alpha (see calculations). These graphs generally rise to a maximum that approximates the fractal dimension at Q=0, and then fall. Like DQ versus Q spectra, they also show typical patterns useful for comparing non-, mono-, and multi-fractal patterns. In particular, for these spectra, non- and mono-fractals converge on certain values, whereas the spectra from multifractal patterns typically form humps over a broader area. = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =.这些曲线通常会上升到最大值,接近于 q = 0的分形维数,然后下降。与 DQ 和 q 光谱一样,它们也显示了用于比较非分形、单分形和多分形模式的典型模式。特别是,对于这些光谱来说,非分形和单分形集中在某些特定的值上,而多分形图案的光谱通常在更广的区域形成峰。 ### Generalized dimensions of species abundance distributions in space One application of Dq versus Q in ecology is characterizing the distribution of species. Traditionally the relative species abundances is calculated for an area without taking into account the locations of the individuals. An equivalent representation of relative species abundances are species ranks, used to generate a surface called the species-rank surface,[28] which can be analyzed using generalized dimensions to detect different ecological mechanisms like the ones observed in the neutral theory of biodiversity, metacommunity dynamics, or niche theory.[28][29] One application of Dq versus Q in ecology is characterizing the distribution of species. Traditionally the relative species abundances is calculated for an area without taking into account the locations of the individuals. An equivalent representation of relative species abundances are species ranks, used to generate a surface called the species-rank surface, which can be analyzed using generalized dimensions to detect different ecological mechanisms like the ones observed in the neutral theory of biodiversity, metacommunity dynamics, or niche theory. = = = 空间物种多度分布的广义维数 = = = = Dq 与 q 在生态学中的一个应用是描述物种的分布。传统上计算一个地区的相对物种丰度时没有考虑个体的位置。相对物种丰度的等价表示是物种等级,用于生成一个称为物种等级面的表面,这个表面可以用广义维度来分析,以发现不同的生态机制,如生物多样性中性理论、协整动力学或生态位理论中观察到的机制。 • Fractional Brownian motion • Detrended fluctuation analysis • Tweedie distributions • Markov switching multifractal • Weighted planar stochastic lattice (WPSL) # = = • 分数布朗运动 • 去趋势涨落分析 • Tweedie 分布 • Markov 切换多重分形 • 加权平面随机晶格(WPSL) ## References 1. Harte, David (2001). Multifractals. London: Chapman & Hall. ISBN 978-1-58488-154-4. 2. Gerges, Firas; Geng, Xiaolong; Nassif, Hani; Boufadel, Michel C. (2021). "Anisotropic Multifractal Scaling of Mount Lebanon Topography: Approximate Conditioning". Fractals (in English). 29 (05): 2150112. doi:10.1142/S0218348X21501127. ISSN 0218-348X. 3. Ivanov, Plamen Ch.; Amaral, Luís A. Nunes; Goldberger, Ary L.; Havlin, Shlomo; Rosenblum, Michael G.; Struzik, Zbigniew R.; Stanley, H. Eugene (1999-06-03). "Multifractality in human heartbeat dynamics". Nature (in English). 399 (6735): 461–465. arXiv:cond-mat/9905329. doi:10.1038/20924. ISSN 0028-0836. PMID 10365957. Unknown parameter |s2cid= ignored (help) 4. Simon, Sheldon R.; Paul, Igor L.; Mansour, Joseph; Munro, Michael; Abernethy, Peter J.; Radin, Eric L. (January 1981). "Peak dynamic force in human gait". Journal of Biomechanics. 14 (12): 817–822. doi:10.1016/0021-9290(81)90009-9. PMID 7328088. 5. França, Lucas Gabriel Souza; Montoya, Pedro; Miranda, José Garcia Vivas (2019). "On multifractals: A non-linear study of actigraphy data". Physica A: Statistical Mechanics and Its Applications. 514: 612–619. arXiv:1702.03912. doi:10.1016/j.physa.2018.09.122. ISSN 0378-4371. Unknown parameter |s2cid= ignored (help) 6. Papo, David; Goñi, Joaquin; Buldú, Javier M. (2017). "Editorial: On the relation of dynamics and structure in brain networks". Chaos: An Interdisciplinary Journal of Nonlinear Science (in English). 27 (4): 047201. Bibcode:2017Chaos..27d7201P. doi:10.1063/1.4981391. ISSN 1054-1500. PMID 28456177. 7. Ciuciu, Philippe; Varoquaux, Gaël; Abry, Patrice; Sadaghiani, Sepideh; Kleinschmidt, Andreas (2012). "Scale-free and multifractal properties of fMRI signals during rest and task". Frontiers in Physiology (in English). 3: 186. doi:10.3389/fphys.2012.00186. ISSN 1664-042X. PMC 3375626. PMID 22715328. 8. França, Lucas G. Souza; Miranda, José G. Vivas; Leite, Marco; Sharma, Niraj K.; Walker, Matthew C.; Lemieux, Louis; Wang, Yujiang (2018). "Fractal and Multifractal Properties of Electrographic Recordings of Human Brain Activity: Toward Its Use as a Signal Feature for Machine Learning in Clinical Applications". Frontiers in Physiology (in English). 9: 1767. arXiv:1806.03889. Bibcode:2018arXiv180603889F. doi:10.3389/fphys.2018.01767. ISSN 1664-042X. PMC 6295567. PMID 30618789. 9. Ihlen, Espen A. F.; Vereijken, Beatrix (2010). "Interaction-dominant dynamics in human cognition: Beyond 1/ƒα fluctuation". Journal of Experimental Psychology: General (in English). 139 (3): 436–463. doi:10.1037/a0019098. ISSN 1939-2222. PMID 20677894. 10. Zhang, Yanli; Zhou, Weidong; Yuan, Shasha (2015). "Multifractal Analysis and Relevance Vector Machine-Based Automatic Seizure Detection in Intracranial EEG". International Journal of Neural Systems (in English). 25 (6): 1550020. doi:10.1142/s0129065715500203. ISSN 0129-0657. PMID 25986754. 11. Suckling, John; Wink, Alle Meije; Bernard, Frederic A.; Barnes, Anna; Bullmore, Edward (2008). "Endogenous multifractal brain dynamics are modulated by age, cholinergic blockade and cognitive performance". Journal of Neuroscience Methods. 174 (2): 292–300. doi:10.1016/j.jneumeth.2008.06.037. ISSN 0165-0270. PMC 2590659. PMID 18703089. 12. Zorick, Todd; Mandelkern, Mark A. (2013-07-03). "Multifractal Detrended Fluctuation Analysis of Human EEG: Preliminary Investigation and Comparison with the Wavelet Transform Modulus Maxima Technique". PLOS ONE (in English). 8 (7): e68360. Bibcode:2013PLoSO...868360Z. doi:10.1371/journal.pone.0068360. ISSN 1932-6203. PMC 3700954. PMID 23844189. 13. Gaston, Kevin J.; Richard Inger; Bennie, Jonathan; Davies, Thomas W. (2013-04-24). "Artificial light alters natural regimes of night-time sky brightness". Scientific Reports (in English). 3: 1722. Bibcode:2013NatSR...3E1722D. doi:10.1038/srep01722. ISSN 2045-2322. PMC 3634108. 14. Kendal, WS; Jørgensen, BR (2011). "Tweedie convergence: a mathematical basis for Taylor's power law, 1/f noise and multifractality". Phys. Rev. E. 84 (6 Pt 2): 066120. Bibcode:2011PhRvE..84f6120K. doi:10.1103/physreve.84.066120. PMID 22304168. 15. Jørgensen, B; Kokonendji, CC (2011). "Dispersion models for geometric sums". Braz J Probab Stat. 25 (3): 263–293. doi:10.1214/10-bjps136. 16. Kendal, WS (2014). "Multifractality attributed to dual central limit-like convergence effects". Physica A. 401: 22–33. Bibcode:2014PhyA..401...22K. doi:10.1016/j.physa.2014.01.022. 17. Lopes, R.; Betrouni, N. (2009). "Fractal and multifractal analysis: A review". Medical Image Analysis. 13 (4): 634–649. doi:10.1016/j.media.2009.05.003. PMID 19535282. 18. Moreno, P. A.; Vélez, P. E.; Martínez, E.; Garreta, L. E.; Díaz, N. S.; Amador, S.; Tischer, I.; Gutiérrez, J. M.; Naik, A. K.; Tobar, F. N.; García, F. (2011). "The human genome: A multifractal analysis". BMC Genomics. 12: 506. doi:10.1186/1471-2164-12-506. PMC 3277318. PMID 21999602. 19. Atupelage, C.; Nagahashi, H.; Yamaguchi, M.; Sakamoto, M.; Hashiguchi, A. (2012). "Multifractal feature descriptor for histopathology". Analytical Cellular Pathology. 35 (2): 123–126. doi:10.1155/2012/912956. PMC 4605731. PMID 22101185. 20. A.J. Roberts and A. Cronin (1996). "Unbiased estimation of multi-fractal dimensions of finite data sets". Physica A. 233 (3): 867–878. arXiv:chao-dyn/9601019. Bibcode:1996PhyA..233..867R. doi:10.1016/S0378-4371(96)00165-3. Unknown parameter |s2cid= ignored (help) 21. Roberts, A. J. (7 August 2014). "Multifractal estimation—maximum likelihood". University of Adelaide. Retrieved 4 June 2019. 22. Karperien, A (2002), What are Multifractals?, ImageJ, archived from the original on 2012-02-10, retrieved 2012-02-10 23. Chhabra, A.; Jensen, R. (1989). "Direct determination of the f(α) singularity spectrum". Physical Review Letters. 62 (12): 1327–1330. Bibcode:1989PhRvL..62.1327C. doi:10.1103/PhysRevLett.62.1327. PMID 10039645. 24. Posadas, A. N. D.; Giménez, D.; Bittelli, M.; Vaz, C. M. P.; Flury, M. (2001). "Multifractal Characterization of Soil Particle-Size Distributions". Soil Science Society of America Journal. 65 (5): 1361. Bibcode:2001SSASJ..65.1361P. doi:10.2136/sssaj2001.6551361x. 25. Lopes, R.; Betrouni, N. (2009). "Fractal and multifractal analysis: A review". Medical Image Analysis. 13 (4): 634–649. doi:10.1016/j.media.2009.05.003. PMID 19535282. 26. Ebrahimkhanlou, Arvin; Farhidzadeh, Alireza; Salamone, Salvatore (2016-01-01). "Multifractal analysis of crack patterns in reinforced concrete shear walls". Structural Health Monitoring (in English). 15 (1): 81–92. doi:10.1177/1475921715624502. ISSN 1475-9217. Unknown parameter |s2cid= ignored (help) 27. Trevino, J.; Liew, S. F.; Noh, H.; Cao, H.; Dal Negro, L. (2012). "Geometrical structure, multifractal spectra and localized optical modes of aperiodic Vogel spirals". Optics Express. 20 (3): 3015–33. Bibcode:2012OExpr..20.3015T. doi:10.1364/OE.20.003015. PMID 22330539. 28. Saravia, Leonardo A. (2015-08-01). "A new method to analyse species abundances in space using generalized dimensions". Methods in Ecology and Evolution. 6 (11): 1298–1310. doi:10.1111/2041-210X.12417. ISSN 2041-210X. 29. Saravia, Leonardo A. (2014-01-01). "mfSBA: Multifractal analysis of spatial patterns in ecological communities". F1000Research. 3: 14. doi:10.12688/f1000research.3-14.v2. PMC 4197745. PMID 25324962. 30. Hassan, M. K.; Hassan, M. Z.; Pavel, N. I. (2010). "Scale-free network topology and multifractality in a weighted planar stochastic lattice". New Journal of Physics. 12 (9): 093045. arXiv:1008.4994. Bibcode:2010NJPh...12i3045H. doi:10.1088/1367-2630/12/9/093045. Unknown parameter |s2cid= ignored (help)
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8769224286079407, "perplexity": 3210.461139774334}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-05/segments/1642320303779.65/warc/CC-MAIN-20220122073422-20220122103422-00480.warc.gz"}
http://www.computer.org/csdl/trans/ts/1978/03/01702518-abs.html
The Community for Technology Leaders Subscribe Issue No.03 - May (1978 vol.4) pp: 169-178 U.R. Kodres , Department of Computer Science, Naval Postgraduate School ABSTRACT The concept of a data flowgraph is formalized as a bipartite directed graph. Each execution sequence of a computer program has a corresponding data flowgraph which describes functionally what happens to the data if that execution sequence is followed. INDEX TERMS program analysis, Bipartite graph, control complexity, data flowgraph, discrete systems, execution sequence, independence, parallel processes CITATION U.R. Kodres, "Analysis of Real-Time Systems by Data Flowgraphs", IEEE Transactions on Software Engineering, vol.4, no. 3, pp. 169-178, May 1978, doi:10.1109/TSE.1978.231495 FULL ARTICLE SEARCH 461 ms (Ver 2.0) Marketing Automation Platform
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8478227257728577, "perplexity": 10095.938387177623}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 20, "end_threshold": 15, "enable": false}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2015-22/segments/1432207927592.52/warc/CC-MAIN-20150521113207-00041-ip-10-180-206-219.ec2.internal.warc.gz"}
http://www.physicsforums.com/printthread.php?t=120727
Physics Forums (http://www.physicsforums.com/index.php) -   Calculus (http://www.physicsforums.com/forumdisplay.php?f=109) -   -   Integral: square root of tan x (http://www.physicsforums.com/showthread.php?t=120727) Jeremy May13-06 05:11 PM integral: square root of tan x My class, teacher included, cannot seem to figure out the integral of the square root of tan x. Maybe someone here can help? thanks, jeremy Emieno May13-06 06:03 PM why don't you volunteer to get acreditted ? Orion1 May13-06 11:05 PM Examine the given references listed in the archive. Reference: arildno May14-06 06:14 AM The simplest way is to set u=sqrt(tan(x)); you'll end up with a rational integrand that you may decompose with partial fractions. (Remember that sec^2(x)=tan^2(x)+1=u^4+1) Hootenanny May14-06 06:32 AM This may be a bit simplistic but why can't you simply do; $$\int \sqrt{\tan x} \;\; dx = \int \tan^{\frac{1}{2}}x \;\; dx$$ $$= \frac{3}{2}\tan^{\frac{3}{2}} x = \frac{3}{2}\tan x \sqrt{\tan x}$$ ~H arildno May14-06 07:15 AM Hmm..because it is wrong perhaps? (Differentiate your last expression and see if you get your integrand) arunbg May14-06 07:30 AM Well Hoot, what you have done is considered tan(x)=u and integrated u^1/2 du .But you haven't changed dx to du.You can do this as $$u=tan(x)^{\frac{1}{2}}$$ $$\frac{du}{dx}=\frac{sec^2(x)}{2\sqrt{tan(x)}}$$ and then find du and so the integrand changes. Just follow Orion's thread to see how it is done. We had the exact same question for our final board exams in India. It took me 10 mins of precious time and two pages of trial to finally get to the answer( a very big one mind you).And to think you did it all for 3 marks in a 100 mark paper.Phew! PS:Something wrong with latex? I just can't seem to edit them. PPS:Hoot, even if you are integrating u^(1/2) it would be 2/3u^3/2 Hootenanny May14-06 07:35 AM Ahh, dammit! I knew it was too simple. It's a repeat of my xmas exams when I did a very similar thing with secant! :frown: Sorry guys! ~H dextercioby May16-06 05:22 AM It's much more interesting to consider $\int \sqrt{\sin x} \ dx$ Daniel. Orion1 May16-06 07:11 AM $\int \sqrt{\tan x} \;\; dx = \frac{1}{2 \sqrt{2}} [ 2 \tan^{-1} (1 - \sqrt{2} \sqrt{\tan x} ) + 2 \tan^{-1} ( \sqrt{2} \sqrt{\tan x} + 1 ) + ...$ $\ln (| - \tan (x) + \sqrt{2} \sqrt{\tan x} - 1 |) - \ln (| \tan x + \sqrt{2} \sqrt{\tan x} + 1 |)]$ Reference: nrqed May16-06 12:37 PM Quote: Quote by dextercioby It's much more interesting to consider $\int \sqrt{\sin x} \ dx$ Daniel. As helpful as usual... arildno May16-06 01:13 PM Quote: Quote by dextercioby It's much more interesting to consider $\int \sqrt{\sin x} \ dx$ Daniel. What's interesting about the integral: $$\int\frac{2u^{2}du}{\sqrt{1-u^{4}}}$$ :confused: Orion1 May16-06 10:19 PM Quote: Quote by arildo $$\int\frac{2u^{2}du}{\sqrt{1-u^{4}}}$$ $$\int\frac{2u^{2}du}{\sqrt{1-u^{4}}} = \frac{-2\,{\sqrt{1 - u^2}}\,{\sqrt{1 + u^2}}\,\left( -\text{EllipticE}(\sin^{-1} u,-1) + \text{EllipticF}(\sin^{-1} u,-1) \right) }{{\sqrt{1 - u^4}}}$$ Arildno, what are you suggesting for $$u$$? dx May17-06 12:36 AM while were on the topic of integrating expressions that contain square roots of trigonometric functions, I was having a hard time a while ago evaluating this $$\int{\frac{1}{\sqrt{sin x}}dx$$ Curious3141 May17-06 01:20 AM Quote: Quote by dx while were on the topic of integrating expressions that contain square roots of trigonometric functions, I was having a hard time a while ago evaluating this $$\int{\frac{1}{\sqrt{sin x}}dx$$ It is always a good idea when confronted with an unfamiliar integral, to verify that it can be done before expending effort to figure out how. Mathematica is a good tool, or you use the free WebMathematica equivalent at http://integrals.wolfram.com/index.jsp Orion1 May17-06 01:26 AM $$F(z|m) = \text{EllipticF}[z,m] = \int_0^z \frac{1}{\sqrt{1 - m \sin^2 t}} dt$$ $$\int{\frac{1}{\sqrt{\sin x}}dx = \int_0^{\frac{1}{2} \left( \frac{\pi}{2} - x \right)} \frac{1}{\sqrt{1 - 2 \sin^2 t}} dt = -2\text{EllipticF} \left[ \frac{1}{2} \left( \frac{\pi}{2} - x \right), 2 \right]$$ Reference: http://functions.wolfram.com/Ellipti.../EllipticF/02/ arildno May17-06 05:13 AM Given a function f with domain D, the function $$G(x)=\int_{x_{0}}^{x}f(y)dy, x_{0}, y, x\in{D}$$ is seen to have no larger domain than f. Since the definite integral can't generate any singularities on its own (integration is a "smoothing" process), it is seen that G doesn't have a less domain than f. Thus, G has the same domain as f. Orion1 May17-06 06:27 AM Given that $\sqrt{\tan x}$ is valid in Quadrants I,III then the specific domains for this function are: $$D: \left[ 0, \frac{\pi}{2} \right) \; \; \; I$$ $$D: \left[ \pi, \frac{3 \pi}{2} \right) \; \; \; III$$ The third equation component in post #10 is: $$\ln ( - \tan x + \sqrt{2} \sqrt{\tan x} - 1 )$$ Placing the component in a point within its own domain produces: $$\ln \left( - \tan \frac{\pi}{4} + \sqrt{2} \sqrt{\tan \frac{\pi}{4}} - 1 \right) = \ln ( - 1 + \sqrt{2} \sqrt{1} - 1) = \ln ( \sqrt{2} - 2)$$ Taking the 'sign' of internal component $\ln [sgn(\sqrt{2} - 2)]$ yields: $$\ln (-1)$$ Reference: http://mathworld.wolfram.com/Singularity.html http://www.physicsforums.com/showpos...7&postcount=10 All times are GMT -5. The time now is 10:37 PM.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8978778123855591, "perplexity": 4354.059195061082}, "config": {"markdown_headings": true, "markdown_code": false, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-41/segments/1410657120974.20/warc/CC-MAIN-20140914011200-00246-ip-10-196-40-205.us-west-1.compute.internal.warc.gz"}
https://keras.rstudio.com/reference/application_xception.html
Xception V1 model for Keras. application_xception(include_top = TRUE, weights = "imagenet", input_tensor = NULL, input_shape = NULL, pooling = NULL, classes = 1000) xception_preprocess_input(x) ## Arguments include_top whether to include the fully-connected layer at the top of the network. NULL (random initialization), imagenet (ImageNet weights), or the path to the weights file to be loaded. optional Keras tensor to use as image input for the model. optional shape list, only to be specified if include_top is FALSE (otherwise the input shape has to be (299, 299, 3). It should have exactly 3 inputs channels, and width and height should be no smaller than 75. E.g. (150, 150, 3) would be one valid value. Optional pooling mode for feature extraction when include_top is FALSE. NULL means that the output of the model will be the 4D tensor output of the last convolutional layer. avg means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. max means that global max pooling will be applied. optional number of classes to classify images into, only to be specified if include_top is TRUE, and if no weights argument is specified. Input tensor for preprocessing ## Value A Keras model instance. ## Details On ImageNet, this model gets to a top-1 validation accuracy of 0.790 and a top-5 validation accuracy of 0.945. Do note that the input image format for this model is different than for the VGG16 and ResNet models (299x299 instead of 224x224). The xception_preprocess_input() function should be used for image preprocessing. This application is only available when using the TensorFlow back-end.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.1675073206424713, "perplexity": 1797.2001022037405}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-18/segments/1555578526923.39/warc/CC-MAIN-20190419001419-20190419022547-00036.warc.gz"}
https://www.padheye.com/2015/09/13/1-mark-questions-of-physical-world-and-measurement/
Very short answer type questions, (1 mark question) Q1. State one law that holds good in all natural processes. Ans. One such laws is the Newton’s gravitation law, According to this law everybody in this nature are attracts with other body with a force of attraction which is directly proportional to the product of their masses and inversely proportionally To the square of the distance between them. Q2: Among which type of elementary particles does the electromagnetic force act? Ans : Electromagnetic force acts between on all electrically charged particles. Q3. Name the forces having the longest and shortest range of operation. Ans : longest range force is gravitational force and nuclear force is shortest range force. Q4. If ‘slap’ times speed equals power, what will be the dimensional equation for ‘slap’? Ans . Slap x speed = power Or slap = power/speed = [MLT-2] Q5. If the units of force and length each are doubled, then how many times the unit of energy would be affected? Ans : Energy = Work done = Force x length So when the units are doubled, then the unit of energy will increase four times. Q6. Can a quantity has dimensions but still has no units? Ans : No, a quantity having dimension must have some units of its measurement. Q7. Justify L +L = L and L L =L. Ans: When we add or subtract a length from length we get length, So L +L =L AND L L =L, justify. Q8. Can there be a physical quantity that has no unit and no dimensions? Ans : yes, like strain. Q9. Given relative error in the measurement of length is 0.02, what is the percentage error? Ans: percentage error = 2 %
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.958317756652832, "perplexity": 1099.2662204671556}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-47/segments/1573496668534.60/warc/CC-MAIN-20191114182304-20191114210304-00267.warc.gz"}
http://www.perlmonks.org/index.pl?node_id=118207
We don't bite newbies here... much PerlMonks ### Re: Re: Re: Perl under Win 2000 & IIS 5.0 by earthboundmisfit (Chaplain) on Oct 11, 2001 at 17:27 UTC ( #118207=note: print w/ replies, xml ) Need Help?? in reply to Re: Re: Perl under Win 2000 & IIS 5.0 in thread Perl under Win 2000 & IIS 5.0 Contents of your PATH env var? E:\Perl\bin\ needs to be in there. You say that the script mapping is configured a certain way, but have you checked to make sure it is defined both for the top level and the server instance below? The top level in the IIS Manager is the machine name. Server instances run under it in the tree display of the Manager and is named 'WWW Server' by default. Perhaps you have the top level script mapping defined, but there is an anomaly in the server instance. Try perl -V and check to mak sure the @INC paths are right. Comment on Re: Re: Re: Perl under Win 2000 & IIS 5.0 Create A New User Node Status? node history Node Type: note [id://118207] help Chatterbox? and the web crawler heard nothing... How do I use this? | Other CB clients Other Users? Others pondering the Monastery: (13) As of 2014-09-30 19:45 GMT Sections? Information? Find Nodes? Leftovers? Voting Booth?
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9546681642532349, "perplexity": 9505.782798030667}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-41/segments/1412037663060.18/warc/CC-MAIN-20140930004103-00346-ip-10-234-18-248.ec2.internal.warc.gz"}
https://aacrjournals.org/cebp/article/27/4/503/71569/Hepatitis-C-Virus-Screening-Trends-Serial-Cross
Background: Rates of hepatitis C virus (HCV) infection are markedly higher for baby boomers compared with other birth cohorts, and they are now recommended for universal one-time screening. This study examines HCV screening rates and predictors for four birth cohorts [born <1945, born 1945–1965 (baby boomers), born 1966–1985, and born >1985] of a nationally representative sample over time. Methods: We used data from the 2013–2015 National Health Interview Surveys, an annual weighted survey of the U.S. civilian noninstitutionalized population. We assessed HCV screening prevalence stratified birth cohort with bivariate and multivariable logistic regression analyses. Results: There were 15,100 participants born <1945, 28,725 baby boomers, 28,089 born 1966–1985, and 13,296 born >1985 in the final analytic sample. Screening was 11.5%–12.8% for baby boomers. The second youngest birth cohort was similar to baby boomers (13.7%–14.9%), whereas the older birth cohort was screened less. After excluding participants who typically have higher rates of HCV screening than the general population, we developed a multivariable model of the general population. In the final model for baby boomers the odds of HCV screening increased significantly with each subsequent year (OR=1.20; 95% CI=1.05–1.38 and OR=1.31; 95% CI=1.13–1.52). HCV screening was also significantly associated with age, gender, and race/ethnicity in baby boomers. Conclusions: While HCV screening is increasing over time, these increases are minimal and there is substantial room for improvement. Impact: Future research should develop interventions to increase HCV screening with special focus on groups demonstrating significantly lower screening rates, such as Hispanics and females. Cancer Epidemiol Biomarkers Prev; 27(4); 503–13. ©2018 AACR. Hepatocellular carcinoma (HCC) is one of the few cancers that have increased in incidence and mortality over the last decade in the United States (1). Death from HCC increased by 56% from 2003 to 2012 (2). The strongest single predictor of HCC in the general population in the United States is chronic hepatitis C virus (HCV) infection, which accounts for approximately half of all HCC incidence (2, 3). Approximately 2.7 to 3.9 million people in the United States are currently chronically HCV infected, according to the Centers for Disease Control and Prevention (CDC; ref. 4). Until 2012, national guidelines recommended risk-based HCV screening for those who ever injected illegal drugs, had selected medical conditions (e.g., persistently abnormal alanine aminotransferase levels), received blood or organ donation before July 1992, had a possible occupational exposure, or were born to an HCV-positive mother. In 2012, in light of new research demonstrating three out of four people with HCV were born between 1945 and 1965 (baby boomers; ref. 5), the CDC augmented their risk-based recommendations to also include a one-time HCV screening for all baby boomers (6). The U.S. Preventive Services Task Force (USPSTF) issued a similar recommendation in 2013 (7). Risk-based screening alone fails to identify more than 50% of people currently living with a chronic HCV infection, and 75% of those missed would be identified through universal age-based screening for the 1945–1965 birth cohort (8). Cost-effectiveness analyses demonstrate one-time testing in this birth cohort produces an incremental cost-effectiveness ratio observed in other cancer screenings (9, 10). Despite these recommendations and potential benefits, an analysis of the 2013 National Health Interview Survey (NHIS) showed only 12% of individuals born between 1945–1965 reported ever being screened for HCV (11). Prior to 2014, treatment for HCV infection consisted of interferon and ribavirin which are associated with significant side effects, poor tolerability, and low cure rates (30%–40%; refs. 12, 13), leading to very limited treatment uptake (14). However, in October 2014 the Food and Drug Administration approved new oral and well-tolerated direct-acting antiviral treatments with exceptionally high cure rates (>90%; refs. 15–18). It is unknown whether the national screening guidelines and the availability of highly curable and tolerable treatment have influenced the rates of HCV screening, as HCV screening trends have not been assessed across the years following these changes. Without intervention, the United States is unlikely to meet its Healthy People 2020 HCV screening goal of having at least 60% of those infected with HCV aware of their infection (19). To assess whether HCV screening has changed in the overall U.S. population or by risk group (e.g., baby boomer, etc.), we conducted an analysis of NHIS HCV screening data for the period 2013–2015, the years immediately following the changed national recommendations for screening and availability of tolerable curative therapies. Specifically we (i) report serial cross-sectional HCV screening rates for four birth cohorts of a nationally representative sample from 2013 to 2015 and (ii) evaluate factors associated with HCV screening by birth cohort. Design, setting, and participants The NHIS is an annual, serial, cross-sectional national survey conducted in-person through a computer-assisted household interview. A nationally representative sample of the civilian noninstitutionalized population in the United States is generated using a stratified, multistage, cluster sample design (20). A detailed survey description and questionnaires can be accessed at http://www.cdc.gov/nchs/nhis/index.htm. For these analyses, we included the total sample from the 2013, 2014, and 2015 NHIS (the years in which complete data are available). The sample was divided by birth cohort to compare those who are recommended for age-based screening (baby boomers), those born before 1945 (older than the baby boomers), those born 1966–1985 (second youngest group), and those born after 1985 (youngest group) to assess whether screening prevalence is increasing in the baby boomer population compared with other birth cohorts during the same timeframe. Measures and data analysis Starting in the year 2013, every adult respondent was asked the question: Have you ever had a blood test for hepatitis C? Response options included: “yes,” “no,” “refuse,” “I don't know,” and “not ascertained.” A binary variable for the outcome was created such that “no,” “refused,” “I don't know,” and “not ascertained” were compared with “yes” in response to the question. Any participant with missing data regarding the outcome variable was excluded from the study. Descriptive statistics were generated for the entire sample by survey year and birth cohort and included the following: demographic variables, variables related to factors that would increase the probability of HCV screening, and indicators of participants' use of preventive health services. Variables assessed that are known to be related to an increased probability that the participant had been screened for HCV include: working in healthcare (21), former, regular alcohol consumption (22), lived with someone with hepatitis, or a personal history of liver cancer, hepatitis, any liver condition, or a chronic liver condition (6). Indicators of participants' use of preventive health services included whether a participant had: seen a healthcare provider in last 12 months, health insurance, ever been tested for HIV, a blood pressure or cholesterol check in the last 12 months, and a colon cancer test in last 12 months. For all other analyses, the sample was weighted using the standard NHIS-based approach applying the 2010 decennial census to best obtain population estimates. Each participant was assigned a sampling weight equal to the inverse probability of the participant being selected and was adjusted for survey nonresponse. We assessed HCV screening prevalence by year and birth cohort as well as prevalence of the respective reasons for receiving HCV screening, among those screened. There are some groups of people who have higher prevalence of HCV infection or that are more likely to have been screened for HCV as compared with the general population. These special populations include participants with a personal history of liver cancer (23), a personal history of hepatitis (of any type; ref. 24), a chronic liver condition (24), any liver condition (5), lived with someone who had hepatitis (2), reported previous high alcohol use (“former, regular drinker”; ref. 8), or reported that they work in healthcare (4). As our primary goal was to assess HCV screening among the healthy general U.S. population subsequent analyses excluded these special populations. Bivariate logistic regression analyses stratified by birth cohort were conducted to assess factors associated with HCV screening using the data combined across the three years of study. Finally, a multivariable logistic regression model per birth cohort was developed using backward elimination approach with a P value of 0.05 required to stay in the model. Some of the preventive health services we assessed are not recommended for all of the included birth cohorts and analyzing a variable in the model that is not routinely recommended would not necessarily be an indicator of preventive health services use, but rather a reflection of a more serious health issue for that individual. Therefore, based on USPSTF recommendations for screening (25–27), in the youngest two birth cohorts we removed cholesterol check in the last 12 months, and colon cancer test in the last 12 months from the model. In addition, we also removed blood pressure check in the last 12 months from the youngest age group. All statistical analyses were conducted with SAS, version 9.4 and SAS-callable SUDAAN, version 11.0.1 in 2017. Sample description Survey data included 88,744 participants from 2013 (n = 29,275), 2014 (n = 31,128), and 2015 (n = 28,341). After excluding 3,534 participants for whom birth year was unknown, the final analyses included 15,100 who were born before 1945 (older), 28,725 baby boomers, 28,089 born 1966–1985 (second youngest), and born after 1985 (youngest) for a total analytic sample size of 85,210. Demographic characteristics varied between birth cohorts. For example, 61.0% of those born after 1985 were non-Hispanic white while this group comprised 73.5% of baby boomers, and 79.9% of those born before 1945. For a full sample description, see Table 1. Table 1. Sociodemographic characteristics of participants in the 2013-2015 National Health Interview Surveysa Born post-1985b (n = 13,296)Born 1966–1985b (n = 28,089)Born 1945–1965b (n = 28,725)Born pre-1945b (n = 15,100) Population characteristics% (95%CI)% (95%CI)% (95%CI)% (95%CI) Demographic characteristics Region Northeast 14.4 (12.9–16.1) 15.8 (15.0–16.7) 17.8 (17.0–18.5) 19.6 (18.3–21.0) Midwest 24.8 (22.4–27.4) 23.4 (22.3–24.5) 23.5 (22.7–24.3) 24.1 (22.8–25.5) South 38.5 (35.9–41.1) 38.1 (36.9–39.2) 38.6 (37.6–39.6) 37.1 (35.6–38.7) West 22.3 (20.3–24.3) 22.7 (21.8–23.7) 20.1 (19.3–21.0) 19.1 (18.0–20.2) Age (Mean; 95% CI) 23.6 (23.5–23.8) 38.2 (38.1–38.3) 58.8 (58.7–58.9) 77.6 (77.5–77.8) Race/Ethnicity Non-Hispanic white 61.0 (59.4–62.6) 61.2 (60.3–62.2) 73.5 (72.6–74.4) 79.9 (78.9–80.9) Non-Hispanic black 14.3 (13.2–15.5) 13.6 (12.9–14.2) 12.5 (11.8–13.1) 9.5 (8.7–10.3) Non-Hispanic Asian 3.7 (3.3–4.2) 3.9 (3.6–4.3) 2.1 (1.9–2.3) 1.9 (1.6–2.2) Non-Hispanic Other 3.1 (2.8–3.6) 3.1 (2.8–3.4) 2.3 (1.9–2.3) 1.9 (1.7–2.2) Hispanic 17.8 (16.7–19.0) 18.2 (17.4–19.0) 9.6 (9.1–10.1) 6.8 (6.3–7.3) Gender Male 50.5 (49.3–51.6) 50.4 (49.6–51.2) 48.6 (47.9–49.3) 40.0 (39.0–40.9) Female 49.5 (48.4–50.7) 49.6 (48.8–50.4) 51.4 (50.7–52.1) 60.0 (59.1–61.0) Education Less than high school graduate 11.0 (10.2–11.9) 12.0 (11.5–12.6) 12.2 (11.7–12.7) 21.4 (20.4–22.4) High school graduate or GED 25.4 (24.1–26.8) 21.9 (21.2–22.5) 26.9 (26.2–27.7) 31.7 (30.7–32.8) Some college/associate's degree 40.7 (38.8–42.7) 28.9 (28.2–29.7) 29.6 (28.9–30.2) 23.6 (22.7–24.5) Bachelor's degree or higher 22.6 (21.3–24.1) 36.9 (36.0–37.9) 30.9 (30.1–31.7) 22.5 (21.5–23.6) Marital status Married/living with partner 28.1 (26.7–29.6) 60.2 (59.4–61.0) 55.0 (54.2–55.8) 39.6 (38.5–40.7) Not currently married (includes divorced, separated, and widowed) 2.3 (2.0–2.7) 15.3 (14.8–15.9) 32.5 (31.8–33.2) 55.5 (54.4–56.6) Never married 69.6 (68.0–71.0) 24.4 (23.7–25.2) 12.5 (11.9–13.1) 4.9 (4.4–5.4) Income <$35,000 52.0 (49.9–54.2) 30.6 (29.7–31.4) 34.2 (33.3–35.1) 53.3 (52.0–54.7)$35,000–$74,999 28.8 (27.4–30.3) 31.5 (30.7–32.2) 30.5 (29.8–31.3) 30.8 (29.7–31.9)$75,000–$99,999 8.0 (7.4–8.7) 13.4 (12.8–13.9) 11.7 (11.2–12.2) 6.9 (6.4–7.5)$100,000+ 11.1 (10.2–12.1) 24.6 (23.8–25.4) 23.6 (22.8–24.4) 9.0 (8.4–9.6) Risk factors Alcohol use Lifetime abstainer 25.7 (24.5–27.0) 16.1 (15.5–16.6) 16.5 (15.9–17.1) 27.8 (26.8–28.8) Former 4.7 (4.3–5.1) 9.8 (9.4–10.3) 18.3 (17.7–19.0) 26.0 (25.1–26.9) Current infrequent/light/unknown frequency 43.2 (42.2–44.3) 48.8 (48.0–49.6) 41.9 (41.1–42.6) 30.8 (29.8–31.8) Current moderate/heavy 25.0 (23.7–26.4) 23.7 (23.0–24.5) 21.7 (21.1–22.4) 14.1 (13.3–14.9) Drinking status unknown 1.4 (1.1–1.6) 1.6 (1.4–1.8) 1.6 (1.4–1.8) 1.4 (1.2–1.7) Health care factors Saw/talked to HCP in last 12 months No 44.5 (44.3–46.6) 38.5 (37.7–39.3) 24.0 (23.3–24.7) 12.6 (11.8–13.4) Yes 53.0 (51.9–54.1) 59.9 (59.1–60.7) 74.5 (73.8–75.2) 86.2 (85.4–87.0) Don't know/Refused/Not ascertained 1.5 (1.2–1.8) 1.6 (1.4–1.8) 1.5 (1.3–1.7) 1.2 (1.0–1.4) Have health insurance coverage No 19.7 (18.7–20.8) 18.7 (18.0–19.3) 10.3 (9.9–10.8) 0.4 (0.3–0.6) Yes 79.4 (78.2–80.4) 81.0 (80.3–81.7) 89.4 (88.9–89.8) 99.5 (99.3–99.6) Don't know/Refused/Not ascertained 0.9 (0.7–1.2) 0.3 (0.3–0.4) 0.3 (0.2–0.4) 0.1 (0.1–0.2) Other health screenings Ever been tested for HIV No 60.3 (58.9–61.6) 45.9 (45.0–46.7) 65.0 (64.2–65.8) 82.5 (81.7–83.2) Yes 35.4 (34.0–36.7) 49.3 (48.5–50.1) 29.8 (29.0–30.5) 11.4 (10.8–12.0) Don't know/Refused/Not ascertained 4.4 (3.9–4.9) 4.8 (4.5–5.2) 5.2 (4.8–5.6) 6.1 (5.6–6.6) Blood pressure check, last 12 mo No 20.6 (19.8–21.5) 16.3 (15.7–16.9) 9.1 (8.7–9.6) 4.0 (3.6–4.4) Yes 59.6 (58.4–60.8) 63.2 (62.5–63.9) 70.1 (69.5–70.7) 76.7 (75.8–77.5) Don't know/Refused/Not ascertained 19.7 (18.9–20.6) 20.5 (19.9–21.1) 20.8 (20.2–21.3) 19.4 (18.6–20.2) Cholesterol checked last 12 mo No 46.8 (45.7–47.9) 32.7 (31.9–33.4) 17.6 (17.1–18.2) 8.7 (8.1–9.3) Yes 30.0 (29.0–30.9) 45.1 (44.4–45.9) 60.7 (60.1–61.4) 70.0 (69.0–71.0) Don't know/Refused/Not ascertained 23.2 (22.3–24.2) 22.2 (21.6–22.8) 21.7 (21.1–22.2) 21.3 (20.5–22.2) Had colon cancer test in last 12 months No — 37.1 (36.4–37.8) 76.5 (75.8–77.1) 77.5 (76.7–78.3) Yes — 2.6 (2.3–2.8) 20.9 (20.3–21.6) 19.8 (19.1–20.6) Don't know/Refused/Not ascertained — 60.4 (59.6–61.1) 2.6 (2.4–2.9) 2.7 (2.4–3.0) Born post-1985b (n = 13,296)Born 1966–1985b (n = 28,089)Born 1945–1965b (n = 28,725)Born pre-1945b (n = 15,100) Population characteristics% (95%CI)% (95%CI)% (95%CI)% (95%CI) Demographic characteristics Region Northeast 14.4 (12.9–16.1) 15.8 (15.0–16.7) 17.8 (17.0–18.5) 19.6 (18.3–21.0) Midwest 24.8 (22.4–27.4) 23.4 (22.3–24.5) 23.5 (22.7–24.3) 24.1 (22.8–25.5) South 38.5 (35.9–41.1) 38.1 (36.9–39.2) 38.6 (37.6–39.6) 37.1 (35.6–38.7) West 22.3 (20.3–24.3) 22.7 (21.8–23.7) 20.1 (19.3–21.0) 19.1 (18.0–20.2) Age (Mean; 95% CI) 23.6 (23.5–23.8) 38.2 (38.1–38.3) 58.8 (58.7–58.9) 77.6 (77.5–77.8) Race/Ethnicity Non-Hispanic white 61.0 (59.4–62.6) 61.2 (60.3–62.2) 73.5 (72.6–74.4) 79.9 (78.9–80.9) Non-Hispanic black 14.3 (13.2–15.5) 13.6 (12.9–14.2) 12.5 (11.8–13.1) 9.5 (8.7–10.3) Non-Hispanic Asian 3.7 (3.3–4.2) 3.9 (3.6–4.3) 2.1 (1.9–2.3) 1.9 (1.6–2.2) Non-Hispanic Other 3.1 (2.8–3.6) 3.1 (2.8–3.4) 2.3 (1.9–2.3) 1.9 (1.7–2.2) Hispanic 17.8 (16.7–19.0) 18.2 (17.4–19.0) 9.6 (9.1–10.1) 6.8 (6.3–7.3) Gender Male 50.5 (49.3–51.6) 50.4 (49.6–51.2) 48.6 (47.9–49.3) 40.0 (39.0–40.9) Female 49.5 (48.4–50.7) 49.6 (48.8–50.4) 51.4 (50.7–52.1) 60.0 (59.1–61.0) Education Less than high school graduate 11.0 (10.2–11.9) 12.0 (11.5–12.6) 12.2 (11.7–12.7) 21.4 (20.4–22.4) High school graduate or GED 25.4 (24.1–26.8) 21.9 (21.2–22.5) 26.9 (26.2–27.7) 31.7 (30.7–32.8) Some college/associate's degree 40.7 (38.8–42.7) 28.9 (28.2–29.7) 29.6 (28.9–30.2) 23.6 (22.7–24.5) Bachelor's degree or higher 22.6 (21.3–24.1) 36.9 (36.0–37.9) 30.9 (30.1–31.7) 22.5 (21.5–23.6) Marital status Married/living with partner 28.1 (26.7–29.6) 60.2 (59.4–61.0) 55.0 (54.2–55.8) 39.6 (38.5–40.7) Not currently married (includes divorced, separated, and widowed) 2.3 (2.0–2.7) 15.3 (14.8–15.9) 32.5 (31.8–33.2) 55.5 (54.4–56.6) Never married 69.6 (68.0–71.0) 24.4 (23.7–25.2) 12.5 (11.9–13.1) 4.9 (4.4–5.4) Income <$35,000 52.0 (49.9–54.2) 30.6 (29.7–31.4) 34.2 (33.3–35.1) 53.3 (52.0–54.7)$35,000–$74,999 28.8 (27.4–30.3) 31.5 (30.7–32.2) 30.5 (29.8–31.3) 30.8 (29.7–31.9)$75,000–$99,999 8.0 (7.4–8.7) 13.4 (12.8–13.9) 11.7 (11.2–12.2) 6.9 (6.4–7.5)$100,000+ 11.1 (10.2–12.1) 24.6 (23.8–25.4) 23.6 (22.8–24.4) 9.0 (8.4–9.6) Risk factors Alcohol use Lifetime abstainer 25.7 (24.5–27.0) 16.1 (15.5–16.6) 16.5 (15.9–17.1) 27.8 (26.8–28.8) Former 4.7 (4.3–5.1) 9.8 (9.4–10.3) 18.3 (17.7–19.0) 26.0 (25.1–26.9) Current infrequent/light/unknown frequency 43.2 (42.2–44.3) 48.8 (48.0–49.6) 41.9 (41.1–42.6) 30.8 (29.8–31.8) Current moderate/heavy 25.0 (23.7–26.4) 23.7 (23.0–24.5) 21.7 (21.1–22.4) 14.1 (13.3–14.9) Drinking status unknown 1.4 (1.1–1.6) 1.6 (1.4–1.8) 1.6 (1.4–1.8) 1.4 (1.2–1.7) Health care factors Saw/talked to HCP in last 12 months No 44.5 (44.3–46.6) 38.5 (37.7–39.3) 24.0 (23.3–24.7) 12.6 (11.8–13.4) Yes 53.0 (51.9–54.1) 59.9 (59.1–60.7) 74.5 (73.8–75.2) 86.2 (85.4–87.0) Don't know/Refused/Not ascertained 1.5 (1.2–1.8) 1.6 (1.4–1.8) 1.5 (1.3–1.7) 1.2 (1.0–1.4) Have health insurance coverage No 19.7 (18.7–20.8) 18.7 (18.0–19.3) 10.3 (9.9–10.8) 0.4 (0.3–0.6) Yes 79.4 (78.2–80.4) 81.0 (80.3–81.7) 89.4 (88.9–89.8) 99.5 (99.3–99.6) Don't know/Refused/Not ascertained 0.9 (0.7–1.2) 0.3 (0.3–0.4) 0.3 (0.2–0.4) 0.1 (0.1–0.2) Other health screenings Ever been tested for HIV No 60.3 (58.9–61.6) 45.9 (45.0–46.7) 65.0 (64.2–65.8) 82.5 (81.7–83.2) Yes 35.4 (34.0–36.7) 49.3 (48.5–50.1) 29.8 (29.0–30.5) 11.4 (10.8–12.0) Don't know/Refused/Not ascertained 4.4 (3.9–4.9) 4.8 (4.5–5.2) 5.2 (4.8–5.6) 6.1 (5.6–6.6) Blood pressure check, last 12 mo No 20.6 (19.8–21.5) 16.3 (15.7–16.9) 9.1 (8.7–9.6) 4.0 (3.6–4.4) Yes 59.6 (58.4–60.8) 63.2 (62.5–63.9) 70.1 (69.5–70.7) 76.7 (75.8–77.5) Don't know/Refused/Not ascertained 19.7 (18.9–20.6) 20.5 (19.9–21.1) 20.8 (20.2–21.3) 19.4 (18.6–20.2) Cholesterol checked last 12 mo No 46.8 (45.7–47.9) 32.7 (31.9–33.4) 17.6 (17.1–18.2) 8.7 (8.1–9.3) Yes 30.0 (29.0–30.9) 45.1 (44.4–45.9) 60.7 (60.1–61.4) 70.0 (69.0–71.0) Don't know/Refused/Not ascertained 23.2 (22.3–24.2) 22.2 (21.6–22.8) 21.7 (21.1–22.2) 21.3 (20.5–22.2) Had colon cancer test in last 12 months No — 37.1 (36.4–37.8) 76.5 (75.8–77.1) 77.5 (76.7–78.3) Yes — 2.6 (2.3–2.8) 20.9 (20.3–21.6) 19.8 (19.1–20.6) Don't know/Refused/Not ascertained — 60.4 (59.6–61.1) 2.6 (2.4–2.9) 2.7 (2.4–3.0) aThis is table includes both low- and high-risk participants (work in healthcare, former, regular alcohol consumption, lived with someone with hepatitis, or a personal history of: liver cancer, hepatitis, any liver condition, or a chronic liver condition). bAll variables listed were significantly different between age groups (P < 0.0001). HCV screening prevalence Weighted analyses indicated screening was between 11.5 and 12.8% across the three survey years for baby boomers (Table 2). Screening prevalence in the second youngest birth cohort was similar to that of baby boomers (13.7%–14.9%), whereas the older birth cohort was screened between 3.9 and 4.5%. Among the response options available to select reporting the reason for HCV screening (i.e., their doctor thought they were at risk because they experienced symptoms, they were born between 1945 and 1965, they were at risk due to exposure, other, refused, not ascertained, or don't know) “other” was the most frequently reported response regardless of age or study year. Of the baby boomers screened, less than 20% indicated the reason was their age. Table 2. Proportion screened for HCV by age group and yeara Born post-1985Born 1966–1985Born 1945–1965Born pre-1945 2013 (%) (95%CI)2014 (%) (95%CI)2015 (%) (95%CI)2013 (%) (95%CI)2014 (%) (95%CI)2015 (%) (95%CI)2013 (%) (95%CI)2014 (%) (95%CI)2015 (%) (95%CI)2013 (%) (95%CI)2014 (%) (95%CI)2015 (%) (95%CI) HCV Screening 9.2 (8.2–10.2) 10.6 (9.5–11.9) 9.8 (8.7–10.9) 13.8 (13.0–14.6) 13.7 (12.9–14.5) 14.9 (14.0–15.8) 11.9 (11.2–12.7) 11.5 (10.8–12.3) 12.8 (12.0–13.7) 3.9 (3.3–4.5) 4.3 (3.7–4.9) 4.5 (3.9–5.2) Reasons for having HCV screen (of those screened) HCP thought at risk due to blood test or symptoms like fatigue, nausea, stomach pain, yellowing of the eyes or skin 9.2 (6.6–12.6) 8.0 (5.8–10.9) 9.3 (6.7–12.6) 10.7 (9.1–12.6) 11.5 (9.6–13.7) 11.9 (10.0–14.2) 16.9 (14.9–19.1) 13.3 (11.4–15.5) 18.6 (16.1–21.3) 12.9 (8.8–18.5) 14.5 (9.8–20.9) 17.4 (12.5–23.8) Born 1945-1965 — — — — — — 17.4 (15.0–20.1) 18.5 (16.3–20.9) 14.4 (12.2–16.9) HCV exposure to blood on the job, injection drug use, or recipient of transfusion before 1992 12.9 (9.5–17.3) 14.6 (11.4–18.6) 12.9 (10.0–16.5) 20.9 (18.5–23.5) 20.5 (17.8–23.4) 19.1 (16.6–22.0) 23.6 (21.0–26.4) 21.5 (18.9–24.3) 20.4 (17.9–23.2) 15.4 (11.1–21.0) 28.6 (22.2–35.8) 21.6 (15.8–28.8) Other reason 76.2 (71.2–80.7) 76.0 (71.4–80.1) 76.0 (71.3–80.2) 66.8 (64.1–69.4) 66.2 (63.1–69.1) 66.5 (63.4–69.5) 40.7 (37.7–43.8) 45.2 (41.9–48.6) 45.4 (42.4–48.5) 64.2 (56.9–71.0) 50.3 (42.9–57.7) 54.7 (46.5–62.6) Don't know/Not Ascertained 1.4 (0.6–3.3) 1.4 (0.7–2.8) 1.2 (0.5–3.2) 0.8 (0.5–1.5) 1.5 (0.9–2.4) 2.0 (1.3–3.2) 1.3 (0.8–2.2) 1.5 (0.9–2.6) 1.3 (0.7–2.3) 3.2 (1.5–6.6) 4.2 (2.0–8.6) 2.4 (0.8–7.3) Born post-1985Born 1966–1985Born 1945–1965Born pre-1945 2013 (%) (95%CI)2014 (%) (95%CI)2015 (%) (95%CI)2013 (%) (95%CI)2014 (%) (95%CI)2015 (%) (95%CI)2013 (%) (95%CI)2014 (%) (95%CI)2015 (%) (95%CI)2013 (%) (95%CI)2014 (%) (95%CI)2015 (%) (95%CI) HCV Screening 9.2 (8.2–10.2) 10.6 (9.5–11.9) 9.8 (8.7–10.9) 13.8 (13.0–14.6) 13.7 (12.9–14.5) 14.9 (14.0–15.8) 11.9 (11.2–12.7) 11.5 (10.8–12.3) 12.8 (12.0–13.7) 3.9 (3.3–4.5) 4.3 (3.7–4.9) 4.5 (3.9–5.2) Reasons for having HCV screen (of those screened) HCP thought at risk due to blood test or symptoms like fatigue, nausea, stomach pain, yellowing of the eyes or skin 9.2 (6.6–12.6) 8.0 (5.8–10.9) 9.3 (6.7–12.6) 10.7 (9.1–12.6) 11.5 (9.6–13.7) 11.9 (10.0–14.2) 16.9 (14.9–19.1) 13.3 (11.4–15.5) 18.6 (16.1–21.3) 12.9 (8.8–18.5) 14.5 (9.8–20.9) 17.4 (12.5–23.8) Born 1945-1965 — — — — — — 17.4 (15.0–20.1) 18.5 (16.3–20.9) 14.4 (12.2–16.9) HCV exposure to blood on the job, injection drug use, or recipient of transfusion before 1992 12.9 (9.5–17.3) 14.6 (11.4–18.6) 12.9 (10.0–16.5) 20.9 (18.5–23.5) 20.5 (17.8–23.4) 19.1 (16.6–22.0) 23.6 (21.0–26.4) 21.5 (18.9–24.3) 20.4 (17.9–23.2) 15.4 (11.1–21.0) 28.6 (22.2–35.8) 21.6 (15.8–28.8) Other reason 76.2 (71.2–80.7) 76.0 (71.4–80.1) 76.0 (71.3–80.2) 66.8 (64.1–69.4) 66.2 (63.1–69.1) 66.5 (63.4–69.5) 40.7 (37.7–43.8) 45.2 (41.9–48.6) 45.4 (42.4–48.5) 64.2 (56.9–71.0) 50.3 (42.9–57.7) 54.7 (46.5–62.6) Don't know/Not Ascertained 1.4 (0.6–3.3) 1.4 (0.7–2.8) 1.2 (0.5–3.2) 0.8 (0.5–1.5) 1.5 (0.9–2.4) 2.0 (1.3–3.2) 1.3 (0.8–2.2) 1.5 (0.9–2.6) 1.3 (0.7–2.3) 3.2 (1.5–6.6) 4.2 (2.0–8.6) 2.4 (0.8–7.3) aThis is table includes both low- and high-risk participants (work in healthcare, former, regular alcohol consumption, lived with someone with hepatitis, or a personal history of: liver cancer, hepatitis, any liver condition, or a chronic liver condition). Regression models: factors independently associated with HCV screening Bivariate and multivariable models of factors associated with HCV screening by birth cohort are presented in Table 3. Table 3. Factors associated with HCV screening by age group for the population at average risk of HCVa Univariate OR (95% CI)Multivariable aOR (95% CI) Born post-1985 Population characteristics Year 2013 (ref.) — — 2014 1.20 (0.98–1.48) 2015 1.06 (0.87–1.30) Demographic characteristics Region Northeast (ref.) — — Midwest 0.97 (0.73–1.27) South 1.09 (0.85–1.40) West 1.06 (0.82–1.37) Age (continuous, 5-year increments) 1.70 (1.501.94) 1.04 (1.001.07) Race/ethnicity Non-Hispanic white (ref.) — — Non-Hispanic black 0.93 (0.74–1.16) Non-Hispanic Asian 0.84 (0.54–1.29) Non-Hispanic Other 0.81 (0.56–1.17) Hispanic 0.75 (0.610.92) Gender Male (ref.) — — Female 1.04 (0.88–1.23) 0.79 (0.660.95) Education Less than high school graduate (ref.) — — High school graduate or GED 1.62 (1.182.23) 1.58 (1.152.17) Some college/associate's degree 1.83 (1.362.46) 1.78 (1.332.38) Bachelor's degree or higher 1.58 (1.152.17) 1.32 (0.94–1.87) Don't know 2.42 (0.80–7.30) 3.61 (1.2510.45) Marital status Married/living with partner (ref.) — — Not currently married (divorced, separated, and widowed) 1.66 (1.132.45) 1.45 (0.97–2.17) Never married 0.68 (0.570.81) 0.85 (0.70–1.03) Income <$35,000 (ref.) — —$35,000–$74,999 0.93 (0.78–1.12)$75,000–$99,999 0.78 (0.57–1.05)$100,000+ 0.75 (0.53–1.05) Risk factors Alcohol use Lifetime abstainer (ref.) — — Former infrequent/unknown 3.15 (2.194.55) 1.94 (1.332.84) Current infrequent/light/unknown frequency 2.11 (1.702.62) 1.50 (1.191.89) Current moderate/heavy 2.40 (1.913.03) 1.65 (1.282.14) Drinking status unknown 0.55 (0.22–1.43) 0.50 (0.19–1.31) Health care factors Saw/talked to HCP in last 12 months No (ref.) — — Yes 1.33 (1.131.57) Have health insurance coverage No (ref.) — — Yes 1.10 (0.93–1.31) 1.24 (1.031.50) Don't know/Refused/Not ascertained 0.17 (0.040.67) 0.17 (0.040.68) Other health screenings Ever been tested for HIV No (ref.) — — Yes 6.05 (5.067.24) 5.72 (4.756.90) Don't know/Refused/Not ascertained 1.27 (0.74–2.17) 1.42 (0.81–2.49) Univariate OR (95% CI) Multivariable aOR (95% CI) Born 1966–1985 Population characteristics Year 2013 (ref.) — — 2014 1.02 (0.91–1.14) 1.03 (0.92–1.16) 2015 1.15 (1.031.28) 1.20 (1.061.35) Demographic characteristics Region Northeast (ref.) — — Midwest 0.79 (0.670.94) 0.78 (0.660.94) South 0.97 (0.85–1.12) 1.00 (0.86–1.16) West 1.07 (0.93–1.24) 1.07 (0.92–1.24) Age (Continuous, 5-year increments) 0.93 (0.90–0.97) 0.98 (0.980.99) Race/Ethnicity Non-Hispanic white (ref.) — — Non-Hispanic black 1.00 (0.89–1.13) 0.70 (0.620.80) Non-Hispanic Asian 0.56 (0.430.72) 0.64 (0.500.83) Non-Hispanic Other 0.96 (0.76–1.21) 0.95 (0.74–1.22) Hispanic 0.68 (0.610.76) 0.68 (0.590.78) Gender Male (ref.) — — Female 0.87 (0.790.96) 0.66 (0.600.74) Education Less than high school graduate (ref.) — — High school graduate or GED 1.36 (1.141.62) 1.18 (0.97–1.42) Some college/Associates degree 1.81 (1.532.14) 1.39 (1.151.69) Bachelor's degree or higher 1.52 (1.291.80) 1.22 (0.99–1.51) Don't know 0.77 (0.24–2.45) 0.84 (0.27–2.62) Marital status Married/Living with partner (ref.) — — Not currently married (divorced, separated, and widowed) 1.52 (1.331.73) 1.40 (1.211.61) Never married 1.31 (1.191.44) 1.20 (1.071.34) Income <$35,000 (ref.) — —$35,000–$74,999 0.88 (0.790.99) 0.82 (0.730.93)$75,000–$99,999 0.97 (0.83–1.13) 0.91 (0.77–1.09)$100,000+ 0.90 (0.78–1.02) 0.83 (0.700.99) Risk Factors Alcohol use Lifetime abstainer (ref.) — — Former infrequent/unknown 2.05 (1.702.49) 1.46 (1.191.78) Current infrequent/light/unknown frequency 1.67 (1.441.94) 1.20 (1.031.40) Current moderate/heavy 1.65 (1.411.94) 1.07 (0.90–1.26) Drinking status unknown 0.40 (0.190.84) 0.40 (0.190.88) Health care factors Saw/talked to HCP in last 12 months No (ref.) — — Yes 1.41 (1.281.55) Have health insurance coverage No (ref.) — — Yes 1.32 (1.171.49) 1.19 (1.031.37) Don't know/Refused/Not ascertained 0.93 (0.38–2.30) 0.70 (0.27–1.78) Other health screenings Ever been tested for HIV No (ref.) — — Yes 4.65 (4.145.22) 4.59 (4.065.18) Don't know/Refused/Not ascertained 1.24 (0.88–1.73) 1.45 (1.012.08) Blood pressure check, last 12 mo No (ref.) — — Yes 1.97 (1.682.31) 1.64 (1.381.94) Don't know/Refused/Not ascertained 1.43 (1.181.73) 1.27 (1.031.56) Univariate OR (95% CI) Multivariable aOR (95% CI) Born 1945–1965 Population characteristics Year 2013 (ref.) — — 2014 1.11 (0.97–1.27) 1.20 (1.051.38) 2015 1.22 (1.061.39) 1.31 (1.131.52) Demographic characteristics Region Northeast (ref.) — — Midwest 0.82 (0.670.99) 0.88 (0.72–1.07) South 1.22 (1.061.41) 1.22 (1.051.42) West 1.56 (1.301.86) 1.45 (1.201.76) Age (Continuous, 5-year increments) 0.88 (0.840.92) 0.90 (0.850.94) Race/Ethnicity Non-Hispanic white (ref.) — — Non-Hispanic black 1.10 (0.95–1.27) 0.81 (0.690.94) Non-Hispanic Asian 0.81 (0.57–1.14) 0.85 (0.60–1.22) Non-Hispanic Other 1.14 (0.86–1.51) 1.03 (0.76–1.40) Hispanic 0.85 (0.721.00) 0.79 (0.660.95) Gender Male (ref.) — — Female 0.68 (0.610.76) 0.71 (0.630.79) Education Less than high school graduate (ref.) — — High school graduate or GED 1.01 (0.83–1.23) 1.05 (0.85–1.29) Some college/Associates degree 1.76 (1.482.10) 1.60 (1.331.94) Bachelor's degree or higher 1.67 (1.402.00) 1.56 (1.281.91) Don't know 0.57 (0.19–1.72) 0.76 (0.24–2.43) Marital Status Married/Living with partner (ref.) — — Not currently married (divorced, separated, and widowed) 1.28 (1.141.43) 1.15 (1.011.31) Never married 1.09 (0.92–1.29) 0.97 (0.82–1.16) Income <$35,000 (ref.) — —$35,000–$74,999 0.85 (0.740.98) 0.84 (0.730.97)$75,000–$99,999 0.85 (0.71–1.02) 0.77 (0.630.94)$100,000+ 1.10 (0.96–1.26) 0.89 (0.75–1.06) Risk factors Alcohol use Lifetime abstainer (ref.) — — Former infrequent/unknown 2.11 (1.752.54) 1.62 (1.331.97) Current infrequent/light/unknown frequency 1.73 (1.482.02) 1.30 (1.101.54) Current moderate/heavy 1.80 (1.482.18) 1.29 (1.061.56) Drinking status unknown 0.52 (0.26–1.02) 0.89 (0.44–1.79) Health care factors Saw/talked to HCP in last 12 months No (ref.) — — Yes 1.46 (1.291.67) 1.27 (1.111.47) Have health insurance coverage No (ref.) — Yes 1.16 (0.99–1.37) Don't know/Refused/Not ascertained 0.85 (0.34–2.14) Other health screenings Ever been tested for HIV No (ref.) — — Yes 4.79 (4.275.37) 4.17 (3.704.70) Don't know/Refused/Not ascertained 1.04 (0.78–1.39) 1.80 (1.312.47) Blood pressure check, last 12 mo No (ref.) — Yes 1.72 (1.402.12) Don't know/Refused/Not ascertained 1.35 (1.071.71) Cholesterol checked last 12 mo No (ref.) — — Yes 1.61 (1.401.85) 1.34 (1.151.57) Don't know/Refused/Not ascertained 1.19 (1.001.42) 1.11 (0.92–1.33) Had colon cancer test in last 12 months No (ref.) — — Yes 1.75 (1.561.96) 1.43 (1.251.62) Don't know/Refused/Not ascertained 0.09 (0.040.20) 0.26 (0.110.61) Univariate OR (95% CI) Multivariable aOR (95% CI) Born pre-1945 Population characteristics Year 2013 (ref.) — 2014 1.03 (0.79–1.36) 2015 1.11 (0.84–1.46) Demographic characteristics Region Northeast (ref.) — — Midwest 1.01 (0.67–1.54) 1.03 (0.68–1.56) South 1.31 (0.90–1.89) 1.19 (0.82–1.73) West 2.28 (1.593.27) 2.00 (1.392.88) Age (Continuous, 5-year increments) 0.93 (0.910.95) 0.85 (0.760.95) Race/Ethnicity Non-Hispanic white (ref.) — Non-Hispanic black 1.44 (1.081.91) Non-Hispanic Asian 2.06 (1.223.49) Non-Hispanic Other 1.79 (0.94–3.42) Hispanic 1.03 (0.71–1.48) Gender Male (ref.) — — Female 0.58 (0.470.72) 0.71 (0.570.89) Education Less than high school graduate (ref.) — High school graduate or GED 0.93 (0.67–1.29) Some college/Associates degree 1.39 (1.041.86) Bachelor's degree or higher 1.59 (1.162.17) Don't know 0.53 (0.15–1.93) Marital status Married/Living with partner (ref.) — Not currently married (divorced, separated, and widowed) 0.76 (0.600.95) Never married 0.79 (0.46–1.37) Income <$35,000 (ref.) —$35,000–$74,999 1.28 (0.99–1.64)$75,000–$99,999 1.56 (1.042.33)$100,000+ 1.96 (1.402.75) Risk factors Alcohol use Lifetime abstainer (ref.) — Former infrequent/unknown 1.57 (1.142.15) Current infrequent/light/unknown frequency 1.41 (1.041.92) Current moderate/heavy 1.28 (0.87–1.87) Drinking status unknown 0.16 (0.02–1.12) Health care factors Saw/talked to HCP in last 12 months No (ref.) — Yes 0.89 (0.64–1.22) Have health insurance coverage No (ref.) — Yes 0.50 (0.18–1.37) Don't know/Refused/Not ascertained — Other health screenings Ever been tested for HIV No (ref.) — — Yes 6.53 (5.198.21) 5.37 (4.196.89) Don't know/Refused/Not ascertained 1.28 (0.81–2.03) 1.64 (1.042.58) Blood pressure check, last 12 mo No (ref.) — Yes 1.36 (0.69–2.69) Don't know/Refused/Not ascertained 1.06 (0.53–2.14) Cholesterol checked last 12 mo No (ref.) — Yes 1.42 (0.88–2.27) Don't know/Refused/Not ascertained 1.05 (0.64–1.74) Had colon cancer test in last 12 months No (ref.) — — Yes 1.91 (1.512.43) 1.56 (1.212.00) Don't know/Refused/Not ascertained 0.37 (0.10–1.31) 0.35 (0.09–1.26) Univariate OR (95% CI)Multivariable aOR (95% CI) Born post-1985 Population characteristics Year 2013 (ref.) — — 2014 1.20 (0.98–1.48) 2015 1.06 (0.87–1.30) Demographic characteristics Region Northeast (ref.) — — Midwest 0.97 (0.73–1.27) South 1.09 (0.85–1.40) West 1.06 (0.82–1.37) Age (continuous, 5-year increments) 1.70 (1.501.94) 1.04 (1.001.07) Race/ethnicity Non-Hispanic white (ref.) — — Non-Hispanic black 0.93 (0.74–1.16) Non-Hispanic Asian 0.84 (0.54–1.29) Non-Hispanic Other 0.81 (0.56–1.17) Hispanic 0.75 (0.610.92) Gender Male (ref.) — — Female 1.04 (0.88–1.23) 0.79 (0.660.95) Education Less than high school graduate (ref.) — — High school graduate or GED 1.62 (1.182.23) 1.58 (1.152.17) Some college/associate's degree 1.83 (1.362.46) 1.78 (1.332.38) Bachelor's degree or higher 1.58 (1.152.17) 1.32 (0.94–1.87) Don't know 2.42 (0.80–7.30) 3.61 (1.2510.45) Marital status Married/living with partner (ref.) — — Not currently married (divorced, separated, and widowed) 1.66 (1.132.45) 1.45 (0.97–2.17) Never married 0.68 (0.570.81) 0.85 (0.70–1.03) Income <$35,000 (ref.) — —$35,000–$74,999 0.93 (0.78–1.12)$75,000–$99,999 0.78 (0.57–1.05)$100,000+ 0.75 (0.53–1.05) Risk factors Alcohol use Lifetime abstainer (ref.) — — Former infrequent/unknown 3.15 (2.194.55) 1.94 (1.332.84) Current infrequent/light/unknown frequency 2.11 (1.702.62) 1.50 (1.191.89) Current moderate/heavy 2.40 (1.913.03) 1.65 (1.282.14) Drinking status unknown 0.55 (0.22–1.43) 0.50 (0.19–1.31) Health care factors Saw/talked to HCP in last 12 months No (ref.) — — Yes 1.33 (1.131.57) Have health insurance coverage No (ref.) — — Yes 1.10 (0.93–1.31) 1.24 (1.031.50) Don't know/Refused/Not ascertained 0.17 (0.040.67) 0.17 (0.040.68) Other health screenings Ever been tested for HIV No (ref.) — — Yes 6.05 (5.067.24) 5.72 (4.756.90) Don't know/Refused/Not ascertained 1.27 (0.74–2.17) 1.42 (0.81–2.49) Univariate OR (95% CI) Multivariable aOR (95% CI) Born 1966–1985 Population characteristics Year 2013 (ref.) — — 2014 1.02 (0.91–1.14) 1.03 (0.92–1.16) 2015 1.15 (1.031.28) 1.20 (1.061.35) Demographic characteristics Region Northeast (ref.) — — Midwest 0.79 (0.670.94) 0.78 (0.660.94) South 0.97 (0.85–1.12) 1.00 (0.86–1.16) West 1.07 (0.93–1.24) 1.07 (0.92–1.24) Age (Continuous, 5-year increments) 0.93 (0.90–0.97) 0.98 (0.980.99) Race/Ethnicity Non-Hispanic white (ref.) — — Non-Hispanic black 1.00 (0.89–1.13) 0.70 (0.620.80) Non-Hispanic Asian 0.56 (0.430.72) 0.64 (0.500.83) Non-Hispanic Other 0.96 (0.76–1.21) 0.95 (0.74–1.22) Hispanic 0.68 (0.610.76) 0.68 (0.590.78) Gender Male (ref.) — — Female 0.87 (0.790.96) 0.66 (0.600.74) Education Less than high school graduate (ref.) — — High school graduate or GED 1.36 (1.141.62) 1.18 (0.97–1.42) Some college/Associates degree 1.81 (1.532.14) 1.39 (1.151.69) Bachelor's degree or higher 1.52 (1.291.80) 1.22 (0.99–1.51) Don't know 0.77 (0.24–2.45) 0.84 (0.27–2.62) Marital status Married/Living with partner (ref.) — — Not currently married (divorced, separated, and widowed) 1.52 (1.331.73) 1.40 (1.211.61) Never married 1.31 (1.191.44) 1.20 (1.071.34) Income <$35,000 (ref.) — —$35,000–$74,999 0.88 (0.790.99) 0.82 (0.730.93)$75,000–$99,999 0.97 (0.83–1.13) 0.91 (0.77–1.09)$100,000+ 0.90 (0.78–1.02) 0.83 (0.700.99) Risk Factors Alcohol use Lifetime abstainer (ref.) — — Former infrequent/unknown 2.05 (1.702.49) 1.46 (1.191.78) Current infrequent/light/unknown frequency 1.67 (1.441.94) 1.20 (1.031.40) Current moderate/heavy 1.65 (1.411.94) 1.07 (0.90–1.26) Drinking status unknown 0.40 (0.190.84) 0.40 (0.190.88) Health care factors Saw/talked to HCP in last 12 months No (ref.) — — Yes 1.41 (1.281.55) Have health insurance coverage No (ref.) — — Yes 1.32 (1.171.49) 1.19 (1.031.37) Don't know/Refused/Not ascertained 0.93 (0.38–2.30) 0.70 (0.27–1.78) Other health screenings Ever been tested for HIV No (ref.) — — Yes 4.65 (4.145.22) 4.59 (4.065.18) Don't know/Refused/Not ascertained 1.24 (0.88–1.73) 1.45 (1.012.08) Blood pressure check, last 12 mo No (ref.) — — Yes 1.97 (1.682.31) 1.64 (1.381.94) Don't know/Refused/Not ascertained 1.43 (1.181.73) 1.27 (1.031.56) Univariate OR (95% CI) Multivariable aOR (95% CI) Born 1945–1965 Population characteristics Year 2013 (ref.) — — 2014 1.11 (0.97–1.27) 1.20 (1.051.38) 2015 1.22 (1.061.39) 1.31 (1.131.52) Demographic characteristics Region Northeast (ref.) — — Midwest 0.82 (0.670.99) 0.88 (0.72–1.07) South 1.22 (1.061.41) 1.22 (1.051.42) West 1.56 (1.301.86) 1.45 (1.201.76) Age (Continuous, 5-year increments) 0.88 (0.840.92) 0.90 (0.850.94) Race/Ethnicity Non-Hispanic white (ref.) — — Non-Hispanic black 1.10 (0.95–1.27) 0.81 (0.690.94) Non-Hispanic Asian 0.81 (0.57–1.14) 0.85 (0.60–1.22) Non-Hispanic Other 1.14 (0.86–1.51) 1.03 (0.76–1.40) Hispanic 0.85 (0.721.00) 0.79 (0.660.95) Gender Male (ref.) — — Female 0.68 (0.610.76) 0.71 (0.630.79) Education Less than high school graduate (ref.) — — High school graduate or GED 1.01 (0.83–1.23) 1.05 (0.85–1.29) Some college/Associates degree 1.76 (1.482.10) 1.60 (1.331.94) Bachelor's degree or higher 1.67 (1.402.00) 1.56 (1.281.91) Don't know 0.57 (0.19–1.72) 0.76 (0.24–2.43) Marital Status Married/Living with partner (ref.) — — Not currently married (divorced, separated, and widowed) 1.28 (1.141.43) 1.15 (1.011.31) Never married 1.09 (0.92–1.29) 0.97 (0.82–1.16) Income <$35,000 (ref.) — —$35,000–$74,999 0.85 (0.740.98) 0.84 (0.730.97)$75,000–$99,999 0.85 (0.71–1.02) 0.77 (0.630.94)$100,000+ 1.10 (0.96–1.26) 0.89 (0.75–1.06) Risk factors Alcohol use Lifetime abstainer (ref.) — — Former infrequent/unknown 2.11 (1.752.54) 1.62 (1.331.97) Current infrequent/light/unknown frequency 1.73 (1.482.02) 1.30 (1.101.54) Current moderate/heavy 1.80 (1.482.18) 1.29 (1.061.56) Drinking status unknown 0.52 (0.26–1.02) 0.89 (0.44–1.79) Health care factors Saw/talked to HCP in last 12 months No (ref.) — — Yes 1.46 (1.291.67) 1.27 (1.111.47) Have health insurance coverage No (ref.) — Yes 1.16 (0.99–1.37) Don't know/Refused/Not ascertained 0.85 (0.34–2.14) Other health screenings Ever been tested for HIV No (ref.) — — Yes 4.79 (4.275.37) 4.17 (3.704.70) Don't know/Refused/Not ascertained 1.04 (0.78–1.39) 1.80 (1.312.47) Blood pressure check, last 12 mo No (ref.) — Yes 1.72 (1.402.12) Don't know/Refused/Not ascertained 1.35 (1.071.71) Cholesterol checked last 12 mo No (ref.) — — Yes 1.61 (1.401.85) 1.34 (1.151.57) Don't know/Refused/Not ascertained 1.19 (1.001.42) 1.11 (0.92–1.33) Had colon cancer test in last 12 months No (ref.) — — Yes 1.75 (1.561.96) 1.43 (1.251.62) Don't know/Refused/Not ascertained 0.09 (0.040.20) 0.26 (0.110.61) Univariate OR (95% CI) Multivariable aOR (95% CI) Born pre-1945 Population characteristics Year 2013 (ref.) — 2014 1.03 (0.79–1.36) 2015 1.11 (0.84–1.46) Demographic characteristics Region Northeast (ref.) — — Midwest 1.01 (0.67–1.54) 1.03 (0.68–1.56) South 1.31 (0.90–1.89) 1.19 (0.82–1.73) West 2.28 (1.593.27) 2.00 (1.392.88) Age (Continuous, 5-year increments) 0.93 (0.910.95) 0.85 (0.760.95) Race/Ethnicity Non-Hispanic white (ref.) — Non-Hispanic black 1.44 (1.081.91) Non-Hispanic Asian 2.06 (1.223.49) Non-Hispanic Other 1.79 (0.94–3.42) Hispanic 1.03 (0.71–1.48) Gender Male (ref.) — — Female 0.58 (0.470.72) 0.71 (0.570.89) Education Less than high school graduate (ref.) — High school graduate or GED 0.93 (0.67–1.29) Some college/Associates degree 1.39 (1.041.86) Bachelor's degree or higher 1.59 (1.162.17) Don't know 0.53 (0.15–1.93) Marital status Married/Living with partner (ref.) — Not currently married (divorced, separated, and widowed) 0.76 (0.600.95) Never married 0.79 (0.46–1.37) Income <$35,000 (ref.) —$35,000–$74,999 1.28 (0.99–1.64)$75,000–$99,999 1.56 (1.042.33)$100,000+ 1.96 (1.402.75) Risk factors Alcohol use Lifetime abstainer (ref.) — Former infrequent/unknown 1.57 (1.142.15) Current infrequent/light/unknown frequency 1.41 (1.041.92) Current moderate/heavy 1.28 (0.87–1.87) Drinking status unknown 0.16 (0.02–1.12) Health care factors Saw/talked to HCP in last 12 months No (ref.) — Yes 0.89 (0.64–1.22) Have health insurance coverage No (ref.) — Yes 0.50 (0.18–1.37) Don't know/Refused/Not ascertained — Other health screenings Ever been tested for HIV No (ref.) — — Yes 6.53 (5.198.21) 5.37 (4.196.89) Don't know/Refused/Not ascertained 1.28 (0.81–2.03) 1.64 (1.042.58) Blood pressure check, last 12 mo No (ref.) — Yes 1.36 (0.69–2.69) Don't know/Refused/Not ascertained 1.06 (0.53–2.14) Cholesterol checked last 12 mo No (ref.) — Yes 1.42 (0.88–2.27) Don't know/Refused/Not ascertained 1.05 (0.64–1.74) Had colon cancer test in last 12 months No (ref.) — — Yes 1.91 (1.512.43) 1.56 (1.212.00) Don't know/Refused/Not ascertained 0.37 (0.10–1.31) 0.35 (0.09–1.26) aAll variables listed were initially included in the multivariable model, and a significance of 0.05 was required to remain in the model. †Designates a variable that was eliminated from the multivariable model. Youngest group (born after 1985). In the multivariable model, HCV screening did not significantly change over time in this birth cohort. Age was significantly associated with screening and increasing age was associated with an increase in the odds of screening (aOR = 1.04; 95% CI = 1.00–1.07). Likewise, whether or not the participant had ever been screened for HIV was associated with an increased odds of screening (aOR = 5.72; 95% CI = 4.75–6.90). Female gender was associated with a decreased odds of screening (aOR = 0.79; 95% CI = 0.66–0.95). Second youngest group (born 1966–1985). There was a significant difference in screening over time in the multivariable model for this birth cohort and screening increased significantly from 2013 to 2015 (aOR = 1.20; 95% CI = 1.06–1.35). People residing in the Midwest (aOR = 0.78; 95% CI = 0.66–0.94) and females (aOR = 0.66; 95% CI = 0.60–0.74) had lower odds of HCV screening. Non-Hispanic black participants (aOR = 0.70; 95% CI = 0.62–0.80), non-Hispanic Asian participants (aOR = 0.64; 95% CI = 0.50–0.83), and Hispanic participants (aOR = 0.68; 95% CI = 0.59–0.78) all had lower odds of screening than their non-Hispanic white counterparts. Several indicators of use of preventive health services were positively associated with the odds of HCV screening including having health insurance coverage (aOR = 1.19; 95% CI = 1.03–1.37), having been tested for HIV (aOR = 4.59; 95% CI = 4.06–5.18), and having their blood pressure checked in the last 12 months (aOR = 1.64; 95% CI = 1.38–1.94). Baby boomers. Among the baby boomer population, the only variables not significantly associated with HCV screening in the multivariable model were insurance status and having blood pressure checked in the last 12 months (P > 0.05). In the final model the odds of HCV screening increased significantly from 2013 to 2014 (OR = 1.20; 95% CI = 1.05–1.38) and from 2013 to 2015 (OR = 1.31; 95% CI = 1.13–1.52). Age was significant but in the opposite direction than was observed in the youngest group: increasing age was associated with decreasing odds of HCV screening (aOR = 0.90; 95% CI = 0.85–0.94). Non-Hispanic black participants (aOR = 0.81; 95% CI = 0.69–0.94) and Hispanic participants (aOR = 0.79; 95% CI = 0.66–0.95) were still less likely to have been screened as compared with their non-Hispanic white counterparts, but there was no longer a significant difference for non-Hispanic Asian participants in this birth cohort. As with the other birth cohorts, female gender was associated with decreased odds of screening (aOR = 0.71; 95% CI = 0.63–0.79), and several variables reflecting use of preventive health services were positively associated with screening including seeing a healthcare provider in the last 12 months (aOR = 1.27; 95% CI = 1.11–1.47), ever having been tested for HIV (aOR = 4.17; 95% CI = 3.70–4.70), having their blood pressure checked in the last 12 months (aOR = 1.43; 95% CI = 1.15–1.57), and having a colon cancer test in the last 12 months (aOR = 1.43; 95% CI = 1.25–1.62). Older group (born before 1945). Few variables were associated with HCV screening among the older birth cohort population in the multivariable model. Geographic location was significant for this birth cohort and people living in the West having higher odds of HCV screening (aOR = 2.00; 95% CI = 1.39–2.88). Age was significant in the same direction as the baby boomers and increasing age was associated with decreasing odds in screening (aOR = 0.85; 95% CI = 0.76–0.95). As with all other birth cohorts, female gender was associated with lower odds of screening (aOR = 0.71; 95% CI = 0.57–0.89). Some indicators of use of preventive health services were positively associated with the odds of screening including ever having been tested for HIV (aOR = 5.37; 95% CI = 4.19–6.89) and having had a colon cancer test in the last 12 months (aOR = 1.56; 95% CI = 1.21–2.00). As of 2013, both the CDC and USPSTF recommend one-time HCV screening for all baby boomers (6, 7). This study provides population estimates of screening rates over three consecutive years for the baby boomer population since the implementation of the new recommendations. In addition, we examined the populations that are older and younger than the baby boomers to compare screening predictors and examine differences between populations for whom there are risk-based versus universal screening recommendations. Our large, nationally representative sample demonstrated screening has increased slightly in the baby boomer population (1.3% from 2014 to 2015), but the proportion screened for HCV (12.8%) falls well below the national recommendation for universal screening in this birth cohort. While the proportion of baby boomers screened appeared to decrease from 2013 to 2014 and then increase from 2014 to 2015, this apparent decrease was attenuated in the multivariable model controlling for other factors. The HCV screening pattern among baby boomers demonstrated by these data are consistent with recent research and indicates the need to dramatically improve rates of screening (28). The relatively low HCV screening prevalence suggests the existence of barriers to screening at multiple levels. For example, recent research demonstrates barriers at the provider-level include low communication skills (29), and low awareness of HCV prevalence and screening recommendations (30). Barriers at the patient level include lack of knowledge and awareness of HCV infection (31), confusion regarding transmission (30), and lack of insurance to pay for screening (32). Practice-level barriers include lack of routine and automated reminders for screening, inadequate funding for HCV prevention and control (31), and inadequate insurance reimbursement (33). HCV screening prevalence differed by race and ethnicity for the second youngest and baby boomer populations. Specifically, non-Hispanic blacks and Hispanics had lower odds of screening in both of these birth cohorts. It is important to note that this was not significant for baby boomers in the bivariate analysis and the trend was only apparent in the multivariable model, suggesting Hispanics and non-Hispanic blacks were screened more due to other factors that were controlled for in the multivariable model. This is particularly worrisome given findings from a recent study indicating non-Hispanic blacks are more likely to have a current infection than non-Hispanic whites (34) and HCV-infected Hispanics have higher rates of advanced fibrosis and cirrhosis than HCV-infected non-Hispanic whites (35). One of the variables with the strongest association with HCV screening was also HIV testing, which could be a marker of a physician's concern about a patient's intravenous drug use. This may indicate that either providers still screen based on risk factors as opposed to birth cohort (baby boomers), or patients engaging in high risk behaviors specifically sought out HCV screening. The new screening guidelines are meant to augment, not replace risk-based guidelines, but if they are not properly implemented, half of those chronically infected with HCV may fail to be identified (8). Interestingly, having been screened for colon cancer was positively associated with HCV screening for the two birth cohorts for which it was examined. While it is common for preventive care screening tests and pro-health behaviors to cluster (36), in this study, it was only colon cancer screening, not other tests such as blood pressure or cholesterol screening that was reliably associated with HCV screening regardless of age. It is possible this finding may be due to the fact that colon cancer screening (via colonoscopy) is typically performed by a gastroenterologist, the same provider who treats HCV infections, and these providers may be more aware of HCV screening recommendations. However, it is typically the primary care provider, not a specialist, who orders HCV screening. Therefore, the stronger association between colon cancer screening and HCV screening was an unexpected finding that requires replication in other studies. Other indicators of use of preventive health services (e.g., having seen a provider in the last 12 months) were also associated with HCV screening. Approximately 35% of baby boomers had seen a provider in the last 12 months but only 12% had ever been screened for HCV. This demonstrates that, much like other preventive health measures (37, 38), there are missed clinical opportunities both for routine preventive care visits and for HCV screening in this group. The odds of HCV screening did increase for the baby boomer population over the three survey years. These increases, although statistically significant, were relatively small and well below the goal of universal screening in this birth cohort. Interestingly, some variables had associations in the opposite direction for different birth cohorts. For example, not being currently married (i.e. widowed, divorced, or separated) was not retained in the multivariable model for the youngest and oldest birth cohorts but was associated with increased screening for the middle two birth cohorts. It is possible the different pattern between the birth cohorts is because people who are not currently married in the older group are more often widowed and the people not currently married in the younger birth cohort are more likely divorced, indicating a different risk profile. Likewise, age was negatively associated with the odds of being screened for the baby boomer and older group but was positively associated with screening in the youngest group. This study used data from a large, weighted, nationally representative sample to assess trends in HCV screening in the U.S. population over time. These unique data offer the opportunity to identify important factors associated with HCV screening. Although these are strengths, study findings should be considered in light of certain limitations. First, the survey did not include questions regarding all known HCV risk factors including HIV status, being born to an HCV-infected mother, received a solid organ transplant or blood transfusion before 1992, or a history of injection drug use. Moreover, the NHIS sample excludes certain groups known to have high HCV infection rates including the homeless and incarcerated (39–41). Furthermore, we included people who refused to answer whether they were screened for HCV in the analyses. While it is possible these people were more likely to be positive and did not want to share that information, this group accounted for only 0.1% of the total sample population and is therefore unlikely to affect results. In addition, these data are cross-sectional, longitudinal associations within a cohort cannot be assessed. In particular, we cannot determine whether the reported behaviors included in the “high risk” groups occurred before or after HCV screening. NHIS data are self-reported and are therefore subject to individual interpretation and recall bias. This is especially relevant given research on other cancer screenings demonstrating people can be unaware of what they are being screened for (42). However, the NHIS does ask participants about several preventive health screenings including HIV and hepatitis B testing, reducing the likelihood a participant may confuse them. In addition, the use of large, nationally representative datasets is a commonly used technique to assess trends across the United States including several preventive health behaviors such as mammography (43), colorectal cancer screening (44, 45), and genetic testing (46). In addition, studies examining the reliability and validity of responses of national surveys found responses were similar between surveys and registry-verified patient data (47, 48), Finally, it is possible some of the statistically significant associations in this population are due to the large sample size. Despite these limitations, this study provides the first analysis of HCV screening trends over these three years for the birth cohort targeted for one-time screening as well as other age and risk groups. While HCV screening is increasing over time, there is substantial room for improvement. Future research should focus on interventions to increase access to primary care, particularly among the baby boomer cohort and HCV recommendation awareness among both providers and patients with a special focus on groups demonstrating significantly lower screening rates, such as Hispanics, non-Hispanic blacks, and females. R.R. Reich is a biostatistician at Sarasota Memorial Hospital. D.R. Nelson reports receiving commercial research grants from Gilead, Merck, and AbbVie. No potential conflicts of interest were disclosed by the other authors. The content of this manuscript is solely the responsibility of the authors and does not necessarily represent the official views of the Patient Centered Outcomes Research Institute, the National Cancer Institute, or the National Institutes of Health. Conception and design: M.L. Kasting, A.R. Giuliano, R.R. Reich, R.G. Roetzheim, D.R. Nelson, E. Shenkman, S.T. Vadaparampil Development of methodology: M.L. Kasting, R.R. Reich, R.G. Roetzheim, D.R. Nelson, S.T. Vadaparampil Acquisition of data (provided animals, acquired and managed patients, provided facilities, etc.): M.L. Kasting, S.T. Vadaparampil Analysis and interpretation of data (e.g., statistical analysis, biostatistics, computational analysis): M.L. Kasting, A.R. Giuliano, R.R. Reich, D.R. Nelson, S.T. Vadaparampil Writing, review, and/or revision of the manuscript: M.L. Kasting, A.R. Giuliano, R.R. Reich, R.G. Roetzheim, D.R. Nelson, E. Shenkman, S.T. Vadaparampil Administrative, technical, or material support (i.e., reporting or organizing data, constructing databases): M.L. Kasting Study supervision: M.L. Kasting, A.R. Giuliano, S.T. Vadaparampil This work was supported, in part, by the Biostatistics Core at the H. Lee Moffitt Cancer Center & Research Institute, an NCI designated Comprehensive Cancer Center (P30-CA076292; PI: Sellers). M. Kasting and A. Giuliano are supported, in part, by the NIH/NCI-funded Center for Infection Research in Cancer (K05-CA181320; PI: A. Giuliano). M. Kasting is also supported by the National Cancer Institute of the NIH (R25-CA090314; PI: Brandon). Information reported in this publication was supported by the University of Florida Clinical and Translational Science Institute, which is supported in part by the NIH National Center for Advancing Translational Sciences under award number UL1TR001427. Information reported in this publication was supported in part by the OneFlorida Clinical Data Network, funded by the Patient-Centered Outcomes Research Institute #CDRN-1501-26692. The costs of publication of this article were defrayed in part by the payment of page charges. This article must therefore be hereby marked advertisement in accordance with 18 U.S.C. Section 1734 solely to indicate this fact. 1. Ryerson AB , Eheman CR , Altekruse SF , Ward JW , Jemal A , Sherman RL , et al Annual report to the nation on the status of cancer, 1975-2012, Featuring the increasing incidence of liver cancer . Cancer 2016 ; 122 : 1312 37 . 2. Centers for Disease Control and Prevention . CDC fact sheet: viral hepatitis and liver cancer 2016 . Available from : https://www.cdc.gov/nchhstp/newsroom/docs/factsheets/viral-hep-liver-cancer.pdf. 3. Centers for Disease Control and Prevention . Recommendations for prevention and control of hepatitis C virus (HCV) infection and HCV-related chronic disease . MMWR Morb Mortal Wkly Rep 1998 ; 47 : 1 39 . 4. Centers for Disease Control and Prevention . Hepatitis C FAQs for the public 2016 . Available from : https://www.cdc.gov/hepatitis/hcv/cfaq.htm. 5. Centers for Disease Control and Prevention . Hepatitis C: Why baby boomers should get tested 2016 . Available from : https://www.cdc.gov/knowmorehepatitis/media/pdfs/factsheet-boomers.pdf. 6. Smith BD , Morgan RL , Beckett GA , Falck-Ytter Y , Holtzman D , Teo CG , et al Recommendations for the identification of chronic hepatitis C virus infection among persons born during 1945-1965 . MMWR Morb Mortal Wkly Rep 2012 ; 61 : 1 32 . 7. U.S. Preventive Services Task Force . Final recommendation statement: Hepatitis C screening. 2016 . Available from : https://www.uspreventiveservicestaskforce.org/Page/Document/. 8. Ona M , Papafragkakis H , Pan C . Hepatitis C screening in the United States: current models and challenges . Am J Digest Dis 2015 ; 2 : 29 40 . 9. McGarry LJ , Pawar VS , Panchmatia HR , Rubin JL , Davis GL , Younossi ZM , et al Economic model of a birth cohort screening program for hepatitis C virus . Hepatology 2012 ; 55 : 1344 1355 . 10. Pataky R , Phillips N , Peacock S , Coldman AJ . Cost-effectiveness of population-based mammography screening strategies by age range and frequency . J Cancer Policy 2014 ; 2 : 97 102 . 11. Jemal A , Fedewa SA . Prevalence of hepatitis C virus testing in cohorts born between 1945 and 1965 in the U.S . Am J Prev Med 2015 ; 48 : e7 9 . 12. Poynard T , Marcellin P , Lee SS , Niederau C , Minuk GS , Ideo G , et al Randomised trial of interferon alpha2b plus ribavirin for 48 weeks or for 24 weeks versus interferon alpha2b plus placebo for 48 weeks for treatment of chronic infection with hepatitis C virus. International Hepatitis Interventional Therapy Group (IHIT) . Lancet 1998 ; 352 : 1426 1432 . 13. McHutchison JG , Gordon SC , Schiff ER , Shiffman ML , Lee WM , Rustgi VK , et al Interferon alfa-2b alone or in combination with ribavirin as initial treatment for chronic hepatitis C. Hepatitis Interventional Therapy Group . N Engl J Med 1998 ; 339 : 1485 1492 . 14. Raedler LA . Once-a-day Harvoni (Ledipasvir plus Sofosbuvir), a new oral combination for the treament of patients with genotype 1 chronic hepatitis C infection . Am Health Drug Benefits 2015 ; 8 : 54 58 . 15. U.S. Food and Drug Administration . FDA approves first combination pill to treat hepatitis C [press release] 2014 . Available from : http://www.fda.gov/NewsEvents/Newsroom/PressAnnouncements/ucm418365.htm. 16. U.S. Food and Drug Administration . FDA approves Epclusa for treatment of chronic Hepatitis C virus infection 2016 . Available from : http://www.fda.gov/NewsEvents/Newsroom/PressAnnouncements/ucm508915.htm. 17. Coppola N , Pisaturo M , Zampino R , Macera M , Sagnelli C , Sagnelli E . Hepatitis C virus markers in infection by hepatitis C virus: In the era of directly acting antivirals . World J Gastroenterol 2015 ; 21 : 10749 10759 . 18. Li DK , Chung RT . Impact of hepatitis C virus eradication on hepatocellular carcinogenesis . Cancer 2015 ; 121 : 2874 2882 . 19. U.S. Department of Health Human Services . Healthy People 2020 Topics and Objectives: Immunizations and Infectious Disease . n.d. Available from : https://www.healthypeople.gov/2020/topics-objectives/topic/immunization-and-infectious-diseases/objectives#4696. 20. Centers for Disease Control and Prevention . About the National Health Interview Survey. 2016 . Available from : http://www.cdc.gov/nchs/nhis/about_nhis.htm. 21. Henderson DK . Managing occupational risks for hepatitis C transmission in the health care setting . Clin Microbiol Rev 2003 ; 16 : 546 568 . 22. Befrits R , Hedman M , Blomquist L , Allander T , Grillner L , Kinnman N , et al Chronic hepatitis C in alcoholic patients: prevalence, genotypes, and correlation to liver disease . Scand J Gastroenterol 1995 ; 30 : 1113 1118 . 23. Yu SJ . A concise review of updated guidelines regarding the management of hepatocellular carcinoma around the world: 2010-2016 . Clin Mol Hepatol 2016 ; 22 : 7 17 . 24. Joshi SN . Hepatitis C Screening . Ochsner J 2014 ; 14 : 664 668 . 25. U.S. Preventive Services Task Force . Final Recommendation Statement: High Blood Pressure in Adults: screening; 2017 . Available from : https://www.uspreventiveservicestaskforce.org/Page/Document/RecommendationStatementFinal/high-blood-pressure-in-adults-screening. 26. U.S. Preventive Services Task Force . Final Update Summary: Lipid Disorders in Adults (Cholesterol, Dyslipidemia): screening; 2015 . Available from : https://www.uspreventiveservicestaskforce.org/Page/Document/UpdateSummaryFinal/lipid-disorders-in-adults-cholesterol-dyslipidemia-screening. 27. U.S. Preventive Services Task Force . Final Update Summary: Colorectal Cancer: screening; 2015 . Available from : https://www.uspreventiveservicestaskforce.org/Page/Document/UpdateSummaryFinal/colorectal-cancer-screening. 28. Jemal A , Fedewa SA . Recent hepatitis C virus testing patterns among baby boomers . Am J Prev Med 2017 ; 53 : e31 33 . 29. Zickmund S , Hillis SL , Barnett MJ , Ippolito L , LaBrecque DR . Hepatitis C virus-infected patients report communication problems with physicians . Hepatology 2004 ; 39 : 999 1007 . 30. Dhopesh VP , Taylor KR , Burke WM . Survey of hepatitis B and C in addiction treatment unit . Am J Drug Alcohol Abuse 2000 ; 26 : 703 707 . 31. Mitchell AE , Colvin HM , Palmer Beasley R . Institute of Medicine recommendations for the prevention and control of hepatitis B and C . Hepatology 2010 ; 51 : 729 733 . 32. Ong JP , Collantes R , Pitts A , Martin L , Sheridan M , Younossi ZM . High rates of uninsured among HCV-positive individuals . J Clin Gastroenterol 2005 ; 39 : 826 830 . 33. Reinhardt UE . The disruptive innovation of price transparency in health care . JAMA 2013 ; 310 : 1927 1928 . 34. Liu G , Holmberg SD , Kamili S , Xu F . Racial disparities in the proportion of current, unresolved hepatitis C virus infections in the United States, 2003-2010 . Dig Dis Sci 2014 ; 59 : 1950 1957 . 35. Turner BJ , Taylor BS , Hanson J , Liang Y , Veerapaneni P , Villarreal R , et al High priority for hepatitis C screening in safety net hospitals: Results from a prospective cohort of 4582 hospitalized baby boomers . Hepatology 2015 ; 62 : 1388 1395 . 36. Fortenberry JD , Costa FM , Jessor R , Donovan JE . Contraceptive behavior and adolescent lifestyles: a structural modeling approach . J Res Adolesc 1997 ; 7 : 307 329 . 37. Zapka JG , Lemon SC . Interventions for patients, providers, and health care organizations . Cancer 2004 ; 101 : 1165 1187 . 38. Coughlin SS , Thompson T . Physician recommendation for colorectal cancer screening by race, ethnicity, and health insurance status among men and women in the United States, 2000 . Health Promot Pract 2005 ; 6 : 369 378 . 39. Varan AK , Mercer DW , Stein MS , Spaulding AC . Hepatitis C seroprevalence among prison inmates since 2001: still high but declining . Public Health Rep 2014 ; 129 : 187 195 . 40. Nyamathi AM , Christiani A , Windokun F , Jones T , Strehlow A , Shoptaw S . Hepatitis C virus infection, substance use and mental illness among homeless youth: a review . AIDS 2005 ; 19 : S34 40 . 41. Chak E , Talal AH , Sherman KE , Schiff ER , Saab S . Hepatitis C virus infection in USA: an estimate of true prevalence . Liver Int 2011 ; 31 : 1090 1101 . 42. Kasting ML , Wilson S , Zollinger TW , Dixon BE , Stupiansky NW , Zimet GD . Differences in cervical cancer screening knowledge, practices, and beliefs: an examination of survey responses . Prev Med Rep 2017 ; 5 : 169 174 . 43. Breen N , Gentleman JF , Schiller JS . Update on mammography trends: comparisons of rates in 2000, 2005, and 2008 . Cancer 2011 ; 117 : 2209 2218 . 44. Breen N , Wagener DK , Brown ML , Davis WW , Ballard-Barbash R . Progress in cancer screening over a decade: results of cancer screening from the 1987, 1992, and 1998 National Health Interview Surveys . J Natl Cancer Inst 2001 ; 93 : 1704 1713 . 45. Klabunde CN , Cronin KA , Breen N , Waldron WR , Ambs AH , Nadel MR . Trends in colorectal cancer test use among vulnerable populations in the United States . Cancer Epidemiol Biomarkers Prev 2011 ; 20 : 1611 1621 . 46. Mai PL , Vadaparampil ST , Breen N , McNeel TS , Wideroff L , Graubard BI . Awareness of cancer susceptibility genetic testing: the 2000, 2005, and 2010 National Health Interview Surveys . Am J Prev Med 2014 ; 46 : 440 448 . 47. Pierannunzi C , Hu SS , Balluz L . A systematic review of publications assessing reliability and validity of the behavioral risk factor surveillance system (BRFSS), 2004–2011 . BMC Med Res Methodol 2013 ; 13 : 49 . 48. Nelson DE , Powell-Griner E , Town M , Kovar MG . A comparison of national estimates from the National Health Interview Survey and the behavioral risk factor surveillance system . Am J Public Health 2003 ; 93 : 1335 1341 .
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.18122513592243195, "perplexity": 14803.644673776073}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": false}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-21/segments/1652662580803.75/warc/CC-MAIN-20220525054507-20220525084507-00296.warc.gz"}
http://en.wikipedia.org/wiki/Talk:Kleene%e2%80%93Rosser_paradox
WikiProject Mathematics (Rated Start-class, Low-priority) This article is within the scope of WikiProject Mathematics, a collaborative effort to improve the coverage of Mathematics on Wikipedia. If you would like to participate, please visit the project page, where you can join the discussion and see a list of open tasks. Mathematics rating: Start Class Low Priority Field: Foundations, logic, and set theory ## Resolution by Curry? I don't know, but I presume that it was Haskell Curry, with his "don't run away from the paradox" attitude, that resolved the paradox by proposing its equivalence to recursion. linas 16:13, 15 November 2007 (UTC) To answer my own question, it seems to be implied in the Stanford encyclopedia of philosophy that Curry did this in 1941 or 1942 in one or more essays, but no reference is given. linas 16:39, 15 November 2007 (UTC) I had a very hard time following this; I think it's because of some notational and terminological issues. On one hand, the lambda calculus is not inconsistent; what Kleene and Rosser showed inconsistent was a formal logic the Church had proposed. This logic was not what is now called lambda calculus; $\lambda x (\lnot x x)$ is not an expression in pure lambda calculus. I looked up the original paper by Kleene and Rosser and redid the article to match what I could find there and in the Stanford entry linked in the article. This resulted in this version which, while more clear to me has none of the flavor of the orignal. This made me think that maybe there is some other source that is actually being used here, or maybe some other phenomenon than Kleene and Rosser's proof is being discussed. So I reverted my edits for the time being. But after reworking it, I still don't have a good sense of what the original version is trying to say. — Carl (CBM · talk) 19:16, 15 November 2007 (UTC) I think this should be deleted, I think it's started by someone who doesn't really understand λc. As far as I know '¬' is not even an object in Λc. ¬ applies to truth values, lambda expressions aren't truth values. Sure, it's a convention to interpret Λyx.x as 'false', but that's just a convention and it's simply 'the constant function which returns the identity function for any input.', if '¬' here is defined as the standard NOT in lambda calculus then it's just infinite regress. Which is mainly the fault of defining a ridiculous function. Also, even if we could define a function in Λ which would lead to strange behaviour in λc, that doesn't make λc inconsistent, it just means we can define a function that does strange things. Saying 'Let 3 be 4...' doesn't make arithmetics inconsistent. Unless of course an axiom of lambda calculus is that no lambda expression equals its own negation. But to think of it, that can't even be so, because lambda expressions have no truth values to begin with. That a certain expression IS a lambda expression has a truth value. I vote for removing this article and adjusting the other articles that claim that untyped lambda is inconsistent. Rajakhr (talk) 20:29, 24 December 2009 (UTC) ## It doesn't compute The article does no make any sense to me. What is " λ.xxx)" supposed to mean? Is the intention λ x. ¬ x x? But this is a denotation for a function; if f is a function, what is the meaning of f ⇔ ¬ f supposed to be? Also, I don't see how one is "deduced" from the other. If Y is the paradoxical combinator, which is expressible in the untyped lambda calculus, then Y ¬ is interconvertible with ¬(Y ¬) – which, if ¬ is interpreted semantically as negation and interconvertibility as material equivalence, shows an inconsistency. I don't know, though, if this is the paradox found by Kleene & Rosser.  --Lambiam 19:29, 15 November 2007 (UTC) I am also confused (see above). The original paper of Kleene and Rosser doesn't appear to explicitly use a fixed point, so this must be referring to something else. — Carl (CBM · talk) 19:55, 15 November 2007 (UTC) Sorry, writing articles while waking up, while staring at scribbled notes made late at night when one is tired, is perhaps a bad idea. Especially when ones grasp of the subject matter is shaky and general knowledge superficial. Yes, its a combinator-ish thing. According to my notes, I should have written $K = (\lambda x . \neg x x)$ and so $KK = (\lambda x . \neg x x)K = \neg KK$. Since Church's attempts were to lay a foundation for mathematics and logic, the collection of valid symbols included negation, for-all, there-exists, etc in addition to just-plain lambda-binding of free variables. Rather than using equals signs, I should have written "implies", or "if and only if", thus the use of the double-sided arrow in the earlier draft. Yes, Y ¬ is essentially the same thing, where interconversion is interpreted to mean "if ... then ..." so that "if Y ¬ then ¬(Y ¬)" is the paradox. I presume, then, that this is Curry's true contribution to elucidating the connection to recursive functions. I did not get the impression that this was ever written up in a paper; rather Kleene and Rosser were both grad students of his, and that this simply came up during thier studies. I don't know. There were appearently various repairs made to the original system to avoid this paradox, including the elimination of some of the axioms; supposedly this was disheartening enough that Church abandoned further attempts to formalize mathematics. I'm skimming the surface of a topic I'm mostly unfamiliar with. linas 05:43, 16 November 2007 (UTC) The "other sources" are multiple library books. I have access to a very good library. I have a goal of reading and understanding the first five pages of every book in there :-) Which explains my scattered contributions on WP. linas 05:49, 16 November 2007 (UTC) Yes, and although Russell's paradox is strictly about set theory, I'd also been recently reading about topology and lattices and heyting algebras and order theory, where there are a lot of similar concepts and constructions, and so when I saw this, I had a big a-ha moment, vaguely naively along the lines of "for every non-halting turing machine there exists an equivalent paradox in naive set theory, and v.v.". I suppose that's just naive unprovable speculation/inspiration, but it still seems to somehow hook up order theory notions e.g. from ontology, to computable things. OK, that's poetry, I guess. Goodnight. linas 06:12, 16 November 2007 (UTC) I think my comments above got missed. Kleene and Rosser did write a paper, and their actual result is not similar to what is here. I wrote up a stub about their result here. I would appreciate it if you could add some sources to this stub, because I can't clean it up until I find some reference that says what this "paradox" is supposed to be. — Carl (CBM · talk) 12:55, 16 November 2007 (UTC) OK. I'll hit the library this weekend or early next week. What I saw was rather thin, a short discussion in an intro chapter. Googling reveals two different things called Kleene-Rosser paradox, one of the form kk=~kk and another being the 1935 paper which is summarized as "a demonstraction that Richard's paradox applies to Churches logic". Google is very thin on this. Some urls: In support of "KR paradox is kk=~kk": In support of "KR paradox is thier 1935 paper": Other: linas 16:01, 16 November 2007 (UTC) Thanks for finding some references; I would really like to clean this up, since if Lambiam and I are confused then probably the intended reader is even more confused. We also need to distinguish between lambda calculus, which is consistent, and Church's formal system, which is inconsistent. Church's system is a formal logic that includes some aspects of lambda calculus along with other aspects of logic. In 1935, Kleene and Rosser just showed that Church's particular formal system is inconsistent. Since there are lots of inconsistent formal systems it's not surprising to find some occasionally. So I don't see that there is any actual 'paradox'. I hope that the references will clarify exactly what is supposed to be paradoxical about their result. — Carl (CBM · talk) 16:08, 16 November 2007 (UTC) I guess I don't understand what you want. I've now seen Y called the paradoxical combinator in several texts. I got the impression that perhaps Curry himself called it that. I've seen several books and articles now that make it clear that Curry was drawn to the paradoxes. The result Y~=~Y~ seems paradoxical, don't you agree? The Bunder reference above explicitly calls the results of the 1935 KR paper "the KR paradox", and does so in the title. All I'd seen was a breif discussion about formal systems in the intro/preface, with a presentation no deeper than this article, (surely with a different and more elegant nuance), with the words "kleen-rosser", "paradox" and kk=~kk in proximity. Perhaps there was a deeper discussion further in the book, perhaps not. A few minutes in the library leafing through indexes will solve this. But given that kk=~kk is extremely similar to Russell's paradox, I can't much carry on if you don't think there's anything much "paradoxical" about it. linas (talk) 13:54, 17 November 2007 (UTC) I'm not familiar with the literature about the "Kleene-Church paradox" so I need to review it to learn about the issue. If the standard terminology is 'paradox', I'll go with it in the article, but I can explain what I meant. On its own, "kk = ~kk" is just a string of 8 symbols. Only when you try to apply some semantics can it appear paradoxical. The problem is that the semantics that Church developed are inconsistent. If I use an invalid algebraic method and derive "0 = 1", that doesn't make the equation a paradox, it just shows that I used an invalid method. So I prefer to limit the word 'paradox' to actual paradoxes in natural language, for example the question about the set of all sets that don't contain themselves, or Curry's paradox in natural language. When natural language paradoxes are formalized, invariably one of two things happens: (1) the formal system is inconsistent, in which case it's hard to claim the result is paradoxical or (2) the paradox no longer arises. Classic examples of this are the paradoxes of naive set theory and Lowenheim's 'paradox' about countable models. In short, I like to be more conservative with the word 'paradox' than other authors. — Carl (CBM · talk) 14:26, 17 November 2007 (UTC) The thing is not reviewed in Barendregt's book "Lambda calculus", or, at least, its not in the index. The paradox, to me, is precisely that the thing is so minimal: "kk=~kk" requires two "axioms" (I'm not sure if the word "axiom" is appropriate here), and a linguistic gloss or two: the axiom of "apply" (lambda application), linguisticaly interpreted as "if the previous formula is true, then the result of apply is true". A second axiom is that formulas can be either true or false, and a third axiom is the admission of ~ as a valid symbol, denoting "not". So the ingredients to the paradox are quite minimal -- In particular, note that there is no appeal to "there exists", "for each", "is an element of", "and", "or", etc. The vocabulary is very small. The paradox lies in the linguistic gloss: The statement "kk" is neither true, nor is it false. Its indeterminate. Thus, since "kk" was neither true nor false, then "~kk" is neither false nor true, and so "kk=~kk" isn't really a contradication of any sort at all. It just looks like one, if one makes the mental slip, the mental error of beleiving that "kk" evaluates to T or F. It does not. This is why the rephrasing as a recursive function is the "solution" to the paradox: the recursion provides a semantics for the thing which is "consistent". (Similar remark apply to Y~=~Y~ : because Y does not (cannot) evaluate to T or F, there is nothing "wrong" with Y~=~Y~; it is only the erroneous use of natural language that makes it seem contradictory. The correct use is to say that "Y implies recursion".) The thing that has me stumped is this: is there any way of "axiomatizing" or "formalizing" the statement "Y implies recursion"? Perhaps Barendregt's book will inform ... linas (talk) 15:55, 19 November 2007 (UTC) It can only be a paradox if there is a notion of semantics and truth involved. If I say "@@", there is no paradox. If this is a theorem in some formal system, there is still no paradox. If you additionally know that the semantic interpretation of "@F", where the interpretation F of "F" is a predicate, is the same as of what is more conventionally written as ∀x:F(x), with α-conversion as needed, we get that the interpretation of "@@" is ∀P:@(P) = ∀P:∀x:P(x). This is an inconsistency, not a paradox. Only when it is agreed that the axioms and inference rules of the formal system appear quite sound when interpreted semantically do we get a paradox. Most of the ingredients needed to make the rewriting of "kk" to "~kk" a paradox are missing (in the sense of not having been supplied). In many programming languages you can give a definition like "define f(x) = not(f(x))". This is really similar to adding an axiom to a possibly consistent system for a new symbol "f", stating that ∀x:(f(x)⇔¬f(x)). This obviously makes the system inconsistent, but does not create a paradox. Under any reasonable semantics for such programming languages the "definition" does not actually define "f", except possibly as the everywhere undefined function. Our article on fixed point combinators assumes, unfortunately and without justification, immediately a computational interpretation of the lambda calculus. It is not a coincidence that I used the section title "It doesn't compute". If you use a computational interpretation, then guaranteed termination is related to the normalization property, which a lambda calculus may or may not have. If it has the property (like the simply-typed lambda calculus), you cannot express any of the fixed-point combinators, and also not your "k". In summary, without a logical system that appeared sound – but wasn't – there is no paradox. It is possible that K & R first gave a complicated construction exhibiting an inconsistency, while Church later found a much simpler construction using "k".  --Lambiam 20:11, 19 November 2007 (UTC) OK, point taken, I don't really have a semantic interpretation for what k is supposed to be. I'll try to dig up stronger material for all this, but I do not have high hopes; what I've seen was cursory, and I don't particularly want to engage in original research in the history of mathematics. Again, I have only a shallow acquaintance with the field, I presume that @@ is a standard textbook example, but its refreshingly new to me :-) Perhaps Prof. Skalka, author cited above, can be recruited to sort out the muddle. linas (talk) 05:27, 20 November 2007 (UTC) I just came up with @@ on the spot to illustrate the point.  --Lambiam 14:16, 20 November 2007 (UTC) Skalka is an associate professor of computer science, and has no specific background in logic that I can discern. You might consider asking assistance from Wikipedians who are professional mathematical logicians, such as User:Hans Adler.  --Lambiam 15:34, 20 November 2007 (UTC) There's also Trovatore, Arthur Rubin, me, and others. The problem is that this is a relatively obscure topic in contemporary mathematical logic. So it might take some luck to find a person with an immediate working knowledge of it. The original paper by Kleene and Rosser is easy enough to read but doesn't contain the info being presented here. — Carl (CBM · talk) 16:06, 20 November 2007 (UTC) Um... Since when are computer scientists specializing in theory of computation and programming languages not qualified to discuss the Lambda calculus? Ever heard of the Curry-Howard isomorphism (to quote Wikipedia's article: "the direct relationship between computer programs and mathematical proofs")?!! 65.183.135.231 (talk) 19:51, 4 February 2009 (UTC) My understanding of this is that k is to be interpreted as a propositional function, or equivalently as the set of terms that it is true for. With this interpretation, \x.-(x x) is essentially interpreted as "the set of all sets that don't contain themselves," which is Russell's Paradox in this system. k k is interpreted as "k contains itself" and -(k k) is interpreted as "k does not contain itself," which is the paradox. —Preceding unsigned comment added by 69.120.140.170 (talk) 20:56, 14 July 2008 (UTC) I don't understand the problem here. It is an infinite function. When you ask a question and get infinity, it's because you asked the wrong question.. You can't just evaluate kk because it is infinity recursive. —Preceding unsigned comment added by 207.207.126.249 (talk) 05:30, 11 February 2009 (UTC) ## Explanation If you read this, and know how to explain this paradox, please edit this page and help others understand it, because it is using symbols which go without a prepared explanation of use. Please avoid symbols altogether in an explanation, unless they are specifically explained themselves. —Preceding unsigned comment added by 70.171.37.195 (talk) 13:28, 20 March 2009 (UTC) ## ugly! I am not a dumb in mathematics, but I do not know nothing about \lambda-calculus. The written "kk+ etc..." sounds incomprehensible... why x and double x? why the point? I think the author of the page should reconsider rewritting it. —Preceding unsigned comment added by 160.103.2.224 (talk) 17:25, 25 November 2009 (UTC) ## Clarify Syntax The symbols in "k = (lambda x . not(xx))" need to be clarified. What does lambda mean? What does dot mean? What does xx mean? It's obviously not x squared in this context. —Preceding unsigned comment added by 129.174.190.101 (talk) 02:07, 2 March 2011 (UTC)
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 3, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8344587683677673, "perplexity": 961.4217864905158}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 20, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-10/segments/1394010851505/warc/CC-MAIN-20140305091411-00011-ip-10-183-142-35.ec2.internal.warc.gz"}
https://sites.google.com/site/shakeshome/Welcome/how-to-s-1
### How To's #### How to Use Linux Commands in Windows with Cygwin posted Jan 25, 2011, 6:33 AM by Ahmed El-Sharkasy Windows command-line tools have advanced a lot with PowerShell, but Linux has had a much more usable terminal for many years. Here’s how you can get the best of both worlds with Cygwin, a ‘nix-style terminal for Windows PCs. In today’s lesson, we’ll explain how to get Cygwin running, but stay tuned for future articles where we’ll explain how to use it for all sorts of different things. ### Install Cygwin Cygwin comes with a normal setup.exe to install in Windows, but there are a couple steps you will need to pay attention to, so we will walk you through the installation. To keep the installation small while saving bandwidth for you and Cygwin, the default installer will download only the files you need from the internet. The default install path is C:\Cygwin but if you don’t like to have programs installed on the root of your C: drive you can change the path or make a symbolic link from C:\Cygwin to your program files. Click next until you come to a download mirror selection. Unfortunately, the installer does not say where the mirrors are located so in most cases you might as well just guess which mirror works best. After you have selected a mirror, the installer will download a list of available packages for you to install. Here is where things get a bit more intimidating. There will be hundreds of packages available separated by multiple different categories. If you don’t know what the package is you can leave the default selection and install additional packages later by running the installer again. If you know what package you need, you can search for it and the results will be automatically filtered. Once you click next, it will take a little while to download all the selected tools and then finish the installation. ### Add Cygwin Path to Windows Environment Variable After the installation you will have a Cygwin icon on your desktop that you can launch to open the Cygwin terminal. This terminal starts in the C:\Cygwin\home\<user> folder but that isn’t particularly useful because you probably don’t have any files stored there. You can use all of the basic Linux commands but if you want to get back to your C: drive you have to change directory to /cygdrive/c. To make Cygwin work in your normal Windows command prompt you need to add Cygwin to your Windows Environment Variables. Start by opening your system properties with either Win+Pause/Break or right click on computer and select properties. In the left column click on advanced system settings to open the system properties window we are looking for. From the advanced tab, click on environment variables at the bottom. Then in the system variables, locate the path variable and click edit. At the end of the variable value option, add the Cygwin bin location like so. ;C:\Cygwin\bin Note: Make sure you add a semicolon to separate it from the other values. Click OK to close out of the window and then open a command prompt to test out a few Linux commands. As you can see from the below picture both pwd and ls work in the normal Windows command prompt. You can also see that /cygdrive/c is automatically added to the location. There is a lot more you can do with Cygwin installed and we will show you some more of the useful tools in future articles. Cygwin homepage #### Security Tips - Let's Make a Computer Virus ... But Only For Testing AntiVirus posted Jan 22, 2011, 8:00 PM by Ahmed El-Sharkasy www.shoutwhisper.com This is an interesting tips. We will test antivirus program running on computer whether it works well or not, but using quite a unique way, namely we will create a file containing the virus codes, so that when it runs, the antivirus program should be able to detect and remove it. But you all no need to fear, because we will create a virus that will not disrupt or damage the computer system even though antivirus program can not detect it. So this is just a kind of AV tester. These virus codes were developed by those who joined in the EICAR, which stands for European Institute for Computer Anti-Virus Research, having website at http://www.eicar.org/. Here are the steps to make this tester virus: 1. First step, run Notepad. This virus can be made just using Notepad. 2. Copy the codes below, and then paste them into Notepad. X5O!P%@AP[4\PZX54(P^)7CC)7}$EICAR-STANDARD-ANTIVIRUS-TEST-FILE!$H+H* 3. Save the file with filename: EICAR.COM. How: on Notepad, click File > Save As. On the Save As dialog box, choose folder to save this file in, then choose option Save As Type, next select All Files, and then type eicar.com for its filename. Finish it by clicking the Save button. It's done. 4. Now open the Windows Explorer (press Windows + E), go to folder containing the virus file you've created, and then run then virus file by double-clicking it. If  antivirus installed has real time guard, it shoud have detected the virus usually by displaying a dialog box containing information about detected threat. If your antivirus doesn't have real time guard (maybe you're using a light weight free AV :-)  => not recommended  ), run it and do scan for folder which the virus located. I myself try this on a computer using AVG Antivirus, and it can detect the virus and displaying this message: Notes: Once attention again, the virus we will create is not a real virus, but just a tester virus. Even though your antivirus program can not detect and remove it, it will NOT harm your computer. For more information about this virus, please go to http://www.eicar.org/. #### How to Reset/Restore MySQL's root password posted Jan 21, 2011, 4:02 AM by Ahmed El-Sharkasy http://blog.ezzarghili.info/ If you set previously a root password for you MySQL server instance but have lost/forgotten it, don't worry that happens to everyone. In this post I will explain the procedure to reset the password as there is no way to restore it, the following procedure applies to any platform (Unix, Windows,Mac OSX, Linux). first let's look at the steps: 1. Stop mysqld 2. Restart mysqld 3. Connect to MySQL server 5. stop and restart mysqld normally Now let's look at each step in details #### 1- Stop mysqld ##### a) On windows platform If the the mysqld is running as a service you need to stop the mysql service in #Start menu > Control Panel > Administrative Tools > Services. or use the command: cmd> net stop MYSQL where MYSQL is the name of the service If that is not the case you may use the script provided with your install or just force stop it using the Task Manager ##### b) On *nix platforms you may stop mysql daemon using shell> kill 'pid' where 'pid' corresponds to processes of mysqld which you can get with the command shell> ps -ef | grep mysql or you may also use something like: shell> /etc/init.d/mysqld stop in some distros you may use the command shell> service mysql stop #### 2- Restart mysqld ##### a) On windows you need to start mysqld with --skip-grant-tables and optionaly –skip-networking option from the bin directory of your mysql installation folder or by typing mysqld directly if you have added mysql bin directory to the %PATH% environment variable. cmd> mysqld --skip-grant-tables –skip-networking or something like cmd> c:\path\to\mysql\bin\mysqld --skip-grant-tables –skip-networking ##### b) On *nix use the command: shell> mysqld --skip-grant-tables –skip-networking & Note: as noted above –skip-networking is optional but strongly recommended if your MySQl instance is accessible through insecure network (Internet, wi-fi...) as omitting this option is like allowing anyone in the network to mess with your server. #### 3- connect to mysql Connect to mysql on Windows using the command: cmd> c:\path\to\mysql\bin\mysql or if the bin directory of your mysql install is included the %PATH% environment variable just type cmd> mysql and the same goes for *unix issue the command: shell> mysql this step will get you to the mysql command prompt #### 4- Restore / reset root password You need now to update the old password by issueing two sql statements: mysql> UPDATE mysql.user    -> SET Password=PASSWORD('newPasswordHere')    -> WHERE User='root';mysql> FLUSH PRIVILEGES; The FLUSH PRIVILEGES statement allows mysql to notice the password change. #### 5- Restart your MySQL server normally Were're done all you need now to do is restarting the server without the --skip-grant-tables and –skip-networking options. with these steps you're now able to reset/restore mysql root password, If you still can't connect to your server, just have a suggestion or addotion; your feedback is most welcome. #### What’s the Difference Between JPG, PNG, and GIF? posted Dec 27, 2010, 6:12 AM by Ahmed El-Sharkasy As we keep building on old image technology, types of file formats keep piling up, each with their own nuances and uses. JPG, PNG, and GIF have become the most common, but what sets them apart from each other? These formats have become the most popular because of their compatibility with modern browsers, broadband speeds, and the needs of average users. Join us as we take a detailed look at each format, and cover the strengths and weaknesses of each. ### JPG (Joint Photographic Experts Group) JPG was a filetype developed by the Joint Photographic Experts Group (JPEG) to be a standard for professional photographers. Like the method ZIP files use to find redundancies in files to compress data, JPGs compress image data by reducing sections of images to blocks of pixels or “tiles.” JPG compression has the unfortunate side effect of being permanent, however, as the technology for the file was created for storing large photographic image files in surprisingly small spaces, and not for photo editing. JPGs have become the de facto standard image of the internet because they can be compressed so much. A typical JPG can be compressed at a ratio of anywhere from 2:1 to as high as 100:1, depending on your settings. Particularly back in the days of dial-up internet, JPGs were the only viable way to send image information. However, because of the lossy nature of JPG, it is not an ideal way to store art files. Even the highest quality setting for JPG is compressed, and will change the look of your image, if only slightly. JPG is also not an ideal medium for typography, crisp lines, or even photographs with sharp edges, as they are often blurred or smeared out by anti-aliasing. What is potentially worse, is that this loss can accumulate—saving multiple versions of artwork can cause degradation with every save. Even so, it is common to see these things saved as JPG, simply because the filetype is so ubiquitous. Close up of a high quality JPG. Close up of a very lossy JPG. The Joint Photographic Experts Group developed lossless JPG technology to combat this serious problem of quality degradation. However, because of dial-up speeds and general lack of interest in high quality non-degrading files, the JPG-LS standard never caught on. It is possible to download plugins that allow users to open and save the lossless JPG2000, and some programs, like Apple’s Preview application, can read and save JPG2000 directly out of the box. JPGs support 24-bit RGB and CMYK, as well as 8-bit Grayscale. I personally do not recommend using CMYK color spaces in JPGs. It’s also important to note that Grayscale JPGs do not compress nearly as much as color ones do. ### GIF (Graphics Interchange Format) GIF, like JPG, is an older filetype, and one generally associated with the internet as opposed to photography. GIF stands for “Graphics Interchange Format” and employs the same lossless LZW compression that TIFF images use. This technology was once controversial (for patent enforcement issues) but has become an accepted format since all patents have expired. Close up of an 8-bit color GIF. GIF is by nature an 8-bit color file, meaning they are limited to a palette of 256 colors, which can be picked from the RGB color model and saved to a Color Look Up Table (CLUT), or simply “Color Table.” There are, however, standard color palettes, like the “Web Safe” palette. An important note is that Grayscale images are by nature an 8-bit palette, so saving them as GIF is fairly ideal. Apart from support for transparency, GIF also is supports animations, limiting every frame to 256 preselected colors. While GIF is not lossy like JPG, conversion to 8-bit color distorts many images, using dither filters to optically blend, or “diffuse,” colors, similar to halftone dots or pointilism. This can radically alter an image for the worse, or, in some cases, be used to create an interesting effect. Because of this non-lossy format, GIF can be used to keep tight lines on typography and geometric shapes, although these things are better suited to vector graphic files like SVG or the Adobe Illustrator native format, AI. GIF is not ideal for modern photography, nor image storage. At small sizes with very limited color tables, GIF images can be smaller than JPG files. But at most ordinary sizes, JPG compression will create a smaller image. They are largely out of date, useful only to create dancing babies or to sometimes create rough transparencies. ### PNG (Portable Network Graphics) PNG stands for Portable Network Graphics (or, depending on whom you ask, the recursive “PNG-Not-GIF”). It was developed as an open alternative to GIF, which used the proprietary LZW compression algorithm discussed earlier. PNG is an excellent filetype for internet graphics, as it supports transparency in browsers with an elegance that GIF does not possess. Notice how the transparent color changes and blends with the background. Right-click the image to see. This is actually one image that is on four different background colors. PNG supports 8-bit color like GIF, but also supports 24-bit color RGB, like JPG does. They are also non-lossy files, compressing photographic images without degrading image quality. PNG tends to be the biggest of the three filetypes and isn’t supported by some (usually older) browsers. In addition to being an excellent format for transparency, the non-lossy nature of 24-bit PNG is ideal for screenshot software, allowing pixel for pixel reproduction of your desktop environment. ### Which to use? From left to right, these files are: 24-bit JPG Compressed, 8-bit GIF, 8-bit PNG, Full Quality 24-bit JPG, and 24-bit PNG. Note that the file sizes increase in this same direction. PNG is the largest image type for bigger images, often containing information you may or may not find useful, depending on your needs. 8-bit PNG is an option, but GIF is smaller. Neither are optimal options for photography, as JPG is much smaller than lossless PNG with only minimal loss of quality. And for storage of high resolution files, JPG compresses to tiny proportions, with quality loss only visible on close inspection. In short: • PNG is good option for transparency and non-lossy, smaller files. Larger files, not so much, unless you demand non-lossy images. • GIF is largely a novelty and only useful for animation, but can produce small 8-bit images. • JPG is still the king for photographs and photo-like images on the internet, but be careful, as your file can degrade with every save. #### imgscalr – Java Image Scaling Library posted Dec 27, 2010, 3:45 AM by Ahmed El-Sharkasy ### Description imgscalr is an very simple and efficient “best-practices” image-scaling library implemented in pure Java. This library makes  uses of efficient Java2D scaling techniques advocated by the Java2D team which provides hardware accelerated operations on most platforms. This library also implements the optimized incremental scaling algorithm proposed by Chris Campbell with some minor enhancements for good-looking (and quick) thumbnail generation (previously only possible with the discouraged Image.getScaledInstance method using the much slower SCALE_AREA_AVERAGE algorithm). ### What Problem Does this Solve? If you have ever wanted to quickly rescale an image in Java you have probably noticed the following confusing things: ### Usage The simplest use-case of the library is a simple, single 2-argument method call: BufferedImage srcImage = ... // Scale the image using the imgscalr library BufferedImage scaledImage = Scalr.resize(srcImage, 150); In this use-case we pass the library our image and ask it to fit the image (while maintaing its original proportions) within a width and height of 150 pixels. Alternatives, if we wanted to use the full-argument method call and print out some debugging and performance information from the library, we could do this: BufferedImage srcImage = ... // Scale the image using the imgscalr library BufferedImage scaledImage = Scalr.resize(srcImage, Scalr.Method.AUTOMATIC, 150, 100, true, true); Here we are explicitly telling the library to use the AUTOMATIC method when deciding between QUALITY or SPEED for the scaling operation. We are also telling the library to fit our image (while maintaing its proportions) within a max width of 150 pixels and max height of 100 pixels while printing out debugging and performance metrics. NOTE: If a width and height are provided that violate the image’s proportions (e.g. attempt to resize an 800×600 image to a 150×150 square) the library will select the smallest (most constrained) dimension and then re-calculate the correct size for the other dimension based on that. So the given targetWidth and targetHeight arguments are used as upper bounds when scaling an image, not absolutes. This was done intentionally as a correctly proportional image was determined to be a better default behavior (more expected by users) than cropping the image or forcibly rescaling it to the given dimensions, possibly skewing it. ### Intended Audience This library is intended for developers needing to quickly scale images (using the correct or most optimized methods available in native Java) and move one with their lives. imgscalr is general purpose and will work on any platform providing the base Java2D classes it uses. imgscalr was also written with web application’s in mind, possibly needing to generate thousands of thumbnails or previews from larger uploaded images. This library is not a comprehensive image processing (e.g. blur, sharpen, saturate, etc.) library for Java. #### The Beginner’s Guide to Managing Users and Groups in Linux posted Dec 10, 2010, 2:30 PM by Ahmed El-Sharkasy Ubuntu Linux uses groups to help you manage users, set permissions on those users, and even monitor how much time they are spending in front of the PC. Here’s a beginner’s guide to how it all works. ### Users and Groups Ubuntu is set up for a single person to use when you installed it in your system, but if more than one person will use the computer, it is best for each person to have their own user account. This way each person can have separate settings and documents, and files can be protected from being viewed by the other users on the same PC. Normally Linux computers have two user accounts—your own user account, and the root account, which is the super user that can access everything on the PC, make system changes, and administer other users. Ubuntu works a little differently, though—you can’t login directly as root by default, and you use the sudo command to switch to root-level access when you need to make a change. Linux stores a list of all users in the ‘/etc/groups’ file. You can run this command in the Terminal to to view and edit the groups and users in your system: sudo vigr /etc/groups ### Creating User Accounts To create a new user, you can head to System –> Administration -> User and Groups, and click the “Add” button to add a new user. Give the appropriate name that identifies the other user and tick the “encrypt” checkbox to secure their home folder. Click the “Advanced Settings” button to configure the user’s privileges. The user management module lists Anna’s privileges under the “User Privileges” tab. We recommend that you remove the “Administer System” privilege from other user accounts. This is to make sure that other users cannot easily change critical system settings that may jeopardize your Linux box. ### Linux File and Folder Permissions Each file in Linux has a set of user and group permissions, and you can use the ls -l command to show the full set of permissions and attributes from the terminal. Reading from left to right, each item in the list means: <permissions> 1 <file owner> <file group> <file size> <file date> <file name> For instance, in the example showing a file named anki, the permissions are rwxr-xr-x, the file is owned by the root user and belongs to the root group, and it’s 159 bytes. The permission flag has four components, the first character being the flag, usually used to indicate whether it’s a directory or a file—a directory would show a “d” and a regular file will show a “-“. The next 9 characters are broken up into sets of 3 characters, which indicate user, group, and everyone permissions. <flag><user permissions><group permissions><everyone permissions> In this particular example, we’ve got rwxr-xr-x, which can be broken up like this: <flag><user permissions = rwx><group permissions = r-x><everyone permissions = r-x> The permissions correspond to the following values: • w = write permission • x = execute permission This means that for the file in question, everybody has read and execute permissions, but only root has access to write to the file. ### Changing Group Ownership of Files and Directories Anna is a 7th grader and her brother Peter just enrolled in a programming course in a university. Anna will be more interested to use the educational software for her mathematics or geography homework, compared to Peter who is more interested to use software development tools. We can configure Anna’s and Peter’s access to these applications by assigning them to the appropriate groups from the “Manage Groups” module. Let’s create two user groups, a K-12 student group, a University student group, and assign the appropriate user accounts to each group. We should give the K-12 students the privileges to run the educational software. Linux stores most of the executables under /usr/bin, for example, Linux stores Anki under /usr/bin/anki. If you’re not sure where a file is located, the which command is a convenient way to find out the location from the terminal: which anki Let’s assign Anki and Kig to the k12 group using the chown command, which uses the following format: sudo chown :[group name] [files list] You can also revoke the read and execute access from other user groups using the chmod command. sudo chown :[group name] [files list] This command gives the member of K12 group access to Anki and Kig. We should restrict the access rights of the university group from Anki and Kig by removing the read and execute permission from the “Other” groups. The format of the command is: chmod [ugoa][+-=][rwxXst] fileORdirectoryName The first command that we executed in the command line removes the read (r) and execute (x) privilege from the “Other” group. The “O” option indicates that we are modifying the access right of the Other group. The ‘-’ option means that we want to remove certain file permissions specified in the parameters that follow the ‘-’ option. The man page of chmod gives a detailed explanation of these options. man chmod ### Monitoring Computer Usage Timekpr allows us to set give each user a limited amount of computing time, and you’ll need to add the following PPA to your software sources so that you can install Timekpr from the Ubuntu Software Center. deb http://ppa.launchpad.net/timekpr-maintainers/ppa/ubuntu lucid main deb-src http://ppa.launchpad.net/timekpr-maintainers/ppa/ubuntu lucid main Ubuntu Software Center is the easiest way to install Timekpr—just use the search box and it should come right up. Timekpr allows us to limit the computer usage time by a certain time frame on each day of the month. For example, we can specify the computer time usage for 300 minutes on Sunday and 60 minutes on Monday. Timekpr will appear on the user’s task bar and lock the desktop when the computing time of the user is up. #### The 50 Best Registry Hacks that Make Windows Better posted Dec 10, 2010, 2:22 PM by Ahmed El-Sharkasy   [ updated Dec 11, 2010, 10:48 AM ] We’re big fans of hacking the Windows Registry around here, and we’ve got one of the biggest collections of registry hacks you’ll find. Don’t believe us? Here’s a list of the top 50 registry hacks that we’ve covered. It’s important to note that you should never hack the registry if you don’t know what you’re doing, because your computer will light on fire and some squirrels may be injured. Also, you should create a System Restore point before doing so. Otherwise, keep reading. ### Prevent Windows Update from Forcibly Rebooting Your Computer We’ve all been at our computer when the Windows Update dialog pops up and tells us to reboot our computer. I’ve become convinced that this dialog has been designed to detect when we are most busy and only prompt us at that moment. There’s a couple of ways that we can disable this behavior, however. You’ll still get the prompt, but it won’t force you to shut down. Here’s how to do it. Prevent Windows Update from Forcibly Rebooting Your Computer One of the most irritating things about Windows is the context menu clutter that you have to deal with once you install a bunch of applications. It seems like every application is fighting for a piece of your context menu, and it’s not like you even use half of them. Today we’ll explain where these menu items are hiding in your registry, how to disable them the geeky way, and an easier cleanup method for non-geeks as well. ### Stop Windows Update from Hijacking the Sleep/Shutdown Button As an avid user of the Sleep function on my laptop, I’ve been more than irritated with Windows 7 or Vista’s habit of changing the Sleep/Shutdown button into an “Install Updates and Shut Down” button whenever there are updates from Windows Update. After the last time I accidentally clicked this stupid button when I just wanted to enter sleep mode, I decided to look for a solution. Stop Windows Update from Hijacking the Sleep/Shutdown Button ### Add “Take Ownership” to Explorer Right-Click Menu in Win 7 or Vista Taking ownership of system files or folders in Windows 7 or Vista is not a simple task. Whether you use the GUI or the command line, it takes far too many steps. Here’s a registry hack that adds an item to the menu that will let you take ownership of the files in a single step, so you can delete, move, or otherwise modify the file. Add “Take Ownership” to Explorer Right-Click Menu in Win 7 or Vista ### Disable Aero Shake in Windows 7 One of the interesting new features in Windows 7 is the way you can grab a window by the title bar and “shake” it back and forth to minimize everything else. It’s a fun feature, but just in case you want to disable it we’ve got the solution for you. All you’ll have to do is apply a simple registry hack, and that Disable Aero Shake in Windows 7 The default method of opening unknown files forces you to go through a list of known applications and is generally a pain to deal with. That’s why I like to have a context menu option for “Open with Notepad” so that I can quickly open up files without having to go through a lot of trouble. ### Disable All Notification Balloons in Windows 7 or Vista If you find the popup notification balloons in the Windows system tray to be too annoying, you might be interested to know that you can completely disable them. This would be an extreme option, of course… typically you can just turn them off in any offending applications, but if you want to disable them across the board, this is the solution. Disable All Notification Balloons in Windows 7 or Vista ### Change the Registered Owner in Windows If you’ve ever wondered how to change the name of the person that Windows is registered to, this is the quick tip for you. It’s not all that useful for most people, but it might come in handy if you got a computer from somebody else. To show off the new changes, just type winver.exe into the start menu search box to see the About Windows box. Quick Tip: Change the Registered Owner in Windows ### Kill Windows with the Blue Screen of Death in 3 Keystrokes Have you ever wanted to show off your keyboard ninja skills by taking down Windows with just a couple of keystrokes? All you have to do is add one registry key, and then you can impress your friends… or use it to convince people to switch to Linux. This isn’t a bug, it’s a “feature” in Windows that is designed to let users trigger a crash dump for testing purposes. Note: this one doesn’t work in Windows 7 anymore. Also, it clearly doesn’t make Windows better, but we included it because it’s lots of fun. Keyboard Ninja: Kill Windows with the Blue Screen of Death in 3 Keystrokes ### How to Add Any Application to the Windows Desktop Right-Click Menu If you want really quick access to launch a frequently used application without putting extra icons on your desktop, you can add that application to the context menu for the desktop with a simple registry hack. Here’s how to do it. We’ve already shown you how to create shortcuts to create new Google Docs easily, but what if you want total Windows integration? Here’s how to add them to the Windows Explorer “New” menu for easier access. This should work for all versions of Windows, and you can modify it to work with Google Apps for your Domain as well. Keep reading for the full instructions. ### How to Add Registry Editor to Control Panel It’s always struck me as odd that system tweakers use the registry editor all the time to fix annoyances in Windows, but nobody has created a tweak to add the registry editor to the control panel… until now. I’ve created a registry hack to add the registry editor as another option in the Control Panel in any version of Windows. ### Remove “Shortcut” Text From New Shortcuts in Windows 7 or Vista A source of annoyance for many Windows users is the ” – Shortcut” text that is added to the name of newly created shortcuts, and every time you have to manually edit the shortcut and remove that text. Wouldn’t it be great if there was a registry hack for this? Most long-time geeks will remember that there was a hack for Windows XP, and probably already know that the same hack works in Windows 7 or Vista as well. ### Disable Win+X Shortcut Keys on Windows 7 or Vista Windows 7 and Vista have all the same Windows+X shortcut keys as other versions of Windows, such as Win+E for explorer and Win+D for the desktop, but adds in all of the Win+<num> keys to launch the shortcuts in the Vista Quick Launch menu (or switch to apps in Windows 7), as well as Win+X for mobility center, etc. But what if you want to disable all these extra keys? ### Stupid Geek Tricks: Enable the Secret “How-To Geek” Mode in Windows 7 We haven’t told anybody before, but Windows has a hidden “How-To Geek Mode” that you can enable which gives you access to every Control Panel tool on a single page—and we’ve documented the secret method for you here. NOTE: Do not use this on Vista or XP: things will break. ### Disable Windows Mobility Center in Windows 7 or Vista Windows Mobility Center is a fairly useful tool for those of us using Windows 7 or Vista on a laptop computer, but might not be for everybody, especially since it takes over the Win+X keyboard shortcut. If you would rather disable Windows Mobility Center, all it takes is a single registry tweak. ### Hide Drives from Your Computer in Windows 7 or Vista If you’ve got drives in My Computer that you never access, such as a USB Flash drive that you are using solely for ReadyBoost, a floppy drive, or a network drive only used for a particular piece of software, then you might want to simply hide the drive from your computer. This tip will only hide the drive from being displayed, applications and the command prompt will still have access to it, and you can still manually browse to the folder if you type in the path. ### How to Put a Real Libraries Icon On Your Windows 7 Desktop We’re big fans of hidden registry hacks around here, so when our friend Justin showed how to put a real, working Libraries icon on the desktop, we figured it would make a perfect article for for a few extra geek points. Yeah, you can always create a shortcut icon for anything on the desktop, but this one is the actual icon without the shortcut overlay. Plus it’s a geeky hidden trick—what’s not to like? ### How to Restore Previous Versions of the Registry in Windows 7 If you want to manually restore a specific section of the registry from a previous System Restore snapshot, or access some specific keys from an older version of the registry, you can do so by getting access to those files and then exporting sections from them. Here’s how to do it in Windows 7 or Vista. ### Remove or Hide Unwanted Items From the Control Panel in Windows 7 Have you ever opened the Control Panel in Windows 7 and thought there is no reason to have some of the icons listed? Today we take a look at how to remove unwanted or unneeded items from Control Panel in Windows 7. For some items you won’t be able to easily delete the CPL file if it’s a Windows system file or in use. You could use a program like Unlocker, but we can tweak the Registry to hide items in Control Panel. ### Make the Taskbar Buttons Switch to the Last Active Window in Windows 7 The new Windows 7 taskbar’s Aero Peek feature, with the live thumbnails of every window, is awesome… but sometimes you just want to be able to click the taskbar button and have the last open window show up instead. Here’s a quick hack to make it work better. ### Make Aero Peek Display Instantly (or Disable it) in Windows 7 Aero Peek is one of the more useful new features in Windows 7… just move your mouse to the taskbar for half a second, and everything else hides so you can see the desktop or application window. But why does it take half a second? There’s a simple little registry hack that will change the amount of time between hovering your mouse over the show desktop button in the lower right-hand corner, and the Aero Peek display showing up. The same thing should work for when you hover over an application window on the taskbar. ### Why Doesn’t Disk Cleanup Delete Everything from the Temp Folder? After you’ve used Disk Cleanup, you probably expect every temporary file to be completely deleted, but that’s not actually the case. Files are only deleted if they are older than 7 days old, but you can tweak that number to something else. Why Doesn’t Disk Cleanup Delete Everything from the Temp Folder? ### Remove “Troubleshoot Compatibility” from the Windows Context Menu Reader Jeevus wrote in asking how to remove the “Troubleshoot Compatibility” item from the Windows context menu whenever you right-click on an application—naturally, we were happy to explain how to do it, and share with the rest of you. You’ll want to note that we’re not necessarily recommending that you remove this item, since it could be useful if you’re having compatibility issues with an application, but we’re fans of showing how to do something—also, we just like tinkering in the registry. ### Show the Classic “All Programs” Menu in the Start Menu in Windows 7 There are a lot of new users to Windows 7 who are not happy with the new Start Menu and wish they could revert to the Classic menu. Here is how to at least get back the Classic “All Programs” Menu. While it’s not quite the same as the good old XP days, if you’re looking to get the All Programs Menu back, this Registry hack will do the trick without installing 3rd party software. ### How To Add Recycle Bin to “My Computer” in Windows 7 or Vista Have you ever wanted to add the Recycle Bin to your My Computer screen? Yeah, it’s probably not the most common request, but here’s how to do it anyway with a simple registry hack. To make this tweak, we’ll be doing a quick registry hack, but there’s a downloadable version provided as well. ### Remove Gadgets and Screen Resolution on Windows 7 Desktop Context Menu One of the first things you might notice in Windows 7 is the addition of the new Gadgets and Screen Resolution items to the context menu. The only problem is that you might not really want them there—so we’ll explain how to get rid of them. No clue what we’re talking about? If you are using Windows 7 and you right-click on the desktop, you’ll see a bunch of new items at the bottom: ### Stupid Geek Tricks: Enable More Fonts for the Windows Command Prompt Have you ever noticed that there are only two fonts to choose from in the Command prompt properties window? What you might not know is that you can use a simple registry hack to enable alternate fonts, including a very readable font that comes with Windows 7, Vista, or Office 2007. But that’s not all… you can enable a number of fixed width fonts if you really want to. We’ll cover how this works, as well as one of my favorite “interesting” fonts for the command prompt. ### Remove ATI Catalyst Control Center from the Desktop Right-Click Menu Have you ever wondered how to remove the “Catalyst(TM) Control Center” item from the desktop context menu? Here’s the simple registry hack to remove it. ### Remove NVIDIA Control Panel from Desktop Right-Click Menu Have you ever wondered how to remove the “NVIDIA Control Panel” item from the desktop context menu? If so, you probably didn’t realize that it’s trivially easy to remove. ### Make “Command Prompt Here” Always Display for Folders in Windows We’ve previously explained how you can open a command prompt by holding down the Shift key and right-clicking on a folder or the desktop… but how do you make that item show up without having to hold down the shift key? There’s a simple registry hack you can do that will enable “Open Command Window Here” item without holding down the shift key: ### Add Encrypt / Decrypt Options to Windows 7 / Vista Right-Click Menu If you use the built-in file encryption in Windows 7 or Vista, you might be interested in adding an option to the right-click menu to more easily encrypt and decrypt your files, rather than having to use the file properties dialog. Adding this to the menu couldn’t be simpler – there’s only a single registry key to add. ### Customize the Default Screensavers in Windows 7 and Vista Windows 7 and Vista include a nice set of backgrounds, but unfortunately most of them aren’t configurable by default.  Thanks to a free app and some registry changes, however, you can make the default screensavers uniquely yours! You can customize the Bubbles, Ribbons, and Mystify screensaver to enable hidden options with this registry hack. ### Skip the Annoying “Use the Web service to find the correct program” Dialog If you’ve used Windows for any length of time, you’ve likely tried to open a file with an unknown extension. Instead of getting a list of programs to open the file with, you get an annoying dialog asking you to use a web service to find a program. So how do we change this? You can use a registry hack to force Windows to skip this dialog altogether, and give you a list of applications to use to open the file instead, just as if you had selected the second option. ### Disable Caps Lock Key in Windows 7 or Vista The caps lock key is one of those remnants of another age of computers, back when people used to shout at each other more often. Since it’s not entirely useful anymore we’ll learn how to disable it. If you aren’t interested in the explanation you can skip to the bottom for the registry files. Windows doesn’t have a default setting to allow for disabling the key, so what we have to do is re-map the key to something non-existent so as to completely disable it. We’ve received lots of requests to add Defrag to the right-click menu for a drive, so we created a simple registry hack that can be easily added that does just that. A couple of days ago I noticed a thread on our forum asking how to add Control Panel to the desktop context menu, so I decided to write up the solution for everybody, since it seems like a really useful hack. There’s a manual registry hack that you can apply if you’d like, or you can download the reghack version and apply it easily. ### Use Verbose Boot Messages to Troubleshoot Windows Startup Problems If you’ve ever had problems with your PC starting up or shutting down slowly, there’s lots of different troubleshooting techniques that you can use—today we’ll talk about how to enable verbose messages. Enabling these verbose messages is not going to magically solve your problems, of course—the point is to use this to identify a problem, which you can then solve through other means, generally by uninstalling a problem application or upgrading a faulty driver. ### How to Enable or Disable TortoiseSVN Menus Easily If you’re a programmer that uses TortoiseSVN to manage your Subversion source control project, you might wonder how to easily disable the menu items without completely uninstalling. Here’s a quick way to do it. The general idea is that we’ll remove the Windows Explorer context menu items from the registry with one script, and then add the registry entries back with another script. ### How to Add Control Panel to “My Computer” in Windows 7 or Vista Back in the Windows XP days, you could easily add Control Panel to My Computer with a simple checkbox in the folder view settings. Windows 7 and Vista don’t make this quite as easy, but there’s still a way to get it back. To make this tweak, we’ll be doing a quick registry hack, but there’s a downloadable version provided as well. ### Increase the Speed of the Aero Taskbar Thumbnails in Windows 7 By default you may notice that there is a slight delay when hovering your mouse over a Taskbar Thumbnail. Here is a neat registry hack that will allow you to speed it up. Once you apply the hack, you’ll notice when you hover your mouse over a thumbnail of an open app on the Taskbar the preview pops up instantly with no delay. ### Remove Programs from the Open With Menu in Explorer Would you like to clean up the Open with menu in Windows Explorer?  Here’s how you can remove program entries you don’t want in this menu on any version of Windows. ### Add “Run as Administrator” to Any File Type in Windows 7 or Vista Have you ever tried to unzip a file to the Program Files directory in Windows 7 or Vista? You’ll get all sorts of permission denied errors, and generally be unsuccessful. So how do we open up the zipfile as an administrator? For that matter, how do you open any file as administrator? There’s a simple registry tweak that will let us specify the action to run as administrator for a file type. Unfortunately you’ll have to make the tweak manually, but we’ll walk you through it. ### Create a Context Menu Item to Copy a Text File To the Clipboard in Windows 7 / Vista / XP If you are the type of person that likes to keep a lot of information stored in text-format files on your drive, you’ve probably encountered a scenario where you want to copy that information to the clipboard… so you open the file in notepad, select all, then copy to the clipboard. What if you could do it with a simple context menu item instead? Using a little registry hacking and the clip.exe utility built into Windows 7 and Vista, we can do just that, and we can even hide it behind the Shift + Right-Click menu so that it won’t waste space on the menu unless you hold down the shift key. ### Disable the “Send To” Folder on the Windows Explorer Context Menu After writing the article about adding Notepad to the context menu I noticed all the comments from users that prefer to use a shortcut in the Send To menu, which got me thinking… I wonder if you can disable the Send To folder? Of course you can easily disable it… naturally it’s a registry hack, so standard disclaimers apply. Disable the “Send To” Folder on the Windows Explorer Context Menu ### Remove “Map Network Drive” Menu Item from Windows Vista or XP If you have never used the “Map Network Drive” dialog box, do you ever wonder how to get rid of it? Personally I only map drives from the command line so I never use it either… so I’m thankful there’s a registry hack that can remove the menu items. If you are unfamiliar with what I’m talking about, just right-click on the Computer icon and you’ll see it. Remove “Map Network Drive” Menu Item from Windows Vista or XP ### Enable or Disable Task Manager Some time ago I received an email from a reader curious why their Task Manager option was grayed out on the taskbar right-click menu. After a bit of research his problem was solved, and now I’m sharing the solution with everybody. There is a registry key that will disable Task Manager, although it’s not always clear how or why it was set to disable. In many cases the problem is related to spyware, so you should also scan your computer. Here’s how to enable or disable it. Is your Internet Explorer context menu completely out of control? Is it so long that it actually runs off the screen? Here’s how to quickly take a few steps to get rid of all that ridiculous clutter without installing Google Chrome instead. Sometimes you can remove the items just by using the Manage Add-ons screen, but other add-ons embed themselves a lot deeper, and you’ll need to use a registry hack to get rid of them. Here’s how to do that. ### How to Restore the Real Internet Explorer Desktop Icon in Windows 7 Remember how previous versions of Windows had an Internet Explorer icon on the desktop, and you could right-click it to quickly access the Internet Options screen? It’s completely gone in Windows 7, but a geeky hack can bring it back. Microsoft removed this feature to comply with all those murky legal battles they’ve had, and their alternate suggestion is to create a standard shortcut to iexplore.exe on the Desktop, but it’s not the same thing. We’ve got a registry hack to bring it back. ### Enable or Disable Displaying a Message During the Boot Process If you’ve ever had a corporate laptop, there’s a good chance that you’ve seen a message before you login that talks about the corporate policies and all of that stuff. Here’s how to enable or disable that login message using a registry hack. #### How long does it take to make a context switch? posted Nov 28, 2010, 11:54 PM by Ahmed El-Sharkasy Blog That's a interesting question I'm willing to waste some of my time on. Someone at StumbleUpon emitted the hypothesis that with all the improvements in the Nehalem architecture (marketed as Intel i7), context switching would be much faster. How would you devise a test to empirically find an answer to this question? How expensive are context switches anyway? (tl;dr answer: very expensive) ## The lineup I've put 3 different generations of CPUs to test: • A dual Intel 5150 (Woodcrest, based on the old "Core" architecture, 2.67 GHz). The 5150 is a dual-core, and so in total the machine has 4 cores available. • A dual Intel E5440 (Harpertown, based on the Penrynn architecture, 2.83 GHz). The E5440 is a quad-core so the machine has a total of 8 cores. • A dual Intel E5520 (Gainestown, based on the Nehalem architecture, aka i7, 2.27 GHz). The E5520 is a quad-core, and has HyperThreading enabled, so the machine has a total of 8 cores or 16 "hardware threads". As far as I can say, all CPUs have are set to a constant clock rate (no Turbo Boost or anything fancy). ## First idea: with syscalls (fail) My first idea was to make a cheap system call many times in a row, time how long it took, and compute the average time spent per syscall. The cheapest system call on Linux these days seems to be gettid. Turns out, this was a naive approach since system calls don't actually cause a full context switch anymore nowadays, the kernel can get away with a "mode switch" (go from user mode to kernel mode, then back to user mode). That's why when I ran my first test program, vmstat wouldn't show a noticeable increase in number of context switches. But this test is interesting too, although it's not what I wanted originally. Source code: timesyscall.c Results: • Intel 5150: 105ns/syscall • Intel E5440: 87ns/syscall • Intel E5520: 58ns/syscall Now that's nice, more expensive CPUs perform noticeably better. But that's not really what we wanted to know. So to test the cost of a context switch, we need to force the kernel to de-schedule the current process and schedule another one instead. And to benchmark the CPU, we need to get the kernel to do nothing but this in a tight loop. How would you do this? ## Second idea: with futex The way I did it was to abuse futex (RTFM). futex is the low level Linux-specific primitive used by most threading libraries to implement blocking operations such as waiting on a contended mutexes, semaphores that run out of permits, condition variables and friends. If you would like to know more, go read Futexes Are Tricky by Ulrich Drepper. Anyways, with a futex, it's easy to suspend and resume processes. What my test does is that it forks off a child process, and the parent and the child take turn waiting on the futex. When the parent waits, the child wakes him up and goes on to wait on the futex, until the parent wakes him and goes on to wait again. Some kind of a ping-pong "I wake you up, you wake me up...". Source code: timectxsw.c Results: • Intel 5150: ~4300ns/context switch • Intel E5440: ~3600ns/context switch • Intel E5520: ~4500ns/context switch Note: those results include the overhead of the futex system calls. Now you must take those results with a grain of salt. The micro-benchmark does nothing but context switching. In practice context switching is expensive because it screws up the CPU caches (L1, L2, L3 if you have one, and the TLB – don't forget the TLB!). ## CPU affinity Things are harder to predict in an SMP environment, because the performance can vary wildly depending on whether a task is migrated from one core to another (especially if the migration is across physical CPUs). I ran the benchmarks again but this time I pinned the processes/threads on a single core (or "hardware thread"). The performance speedup is dramatic. Source code: cpubench.sh Results: • Intel 5150: ~1900ns/process context switch, ~1700ns/thread context switch • Intel E5440: ~1300ns/process context switch, ~1100ns/thread context switch • Intel E5520: ~1400ns/process context switch, ~1300ns/thread context switch Performance boost: 5150: 66%, E5440: 65-70%, E5520: 50-54%. The performance gap between thread switches and process switches seems to increase with newer CPU generations (5150: 7-8%, E5440: 5-15%, E5520: 11-20%). Overall the penalty of switching from one task to another remains very high. Bear in mind that those artificial tests do absolutely zero computation, so they probably have 100% cache hit in L1d and L1i. In the real world, switching between two tasks (threads or processes) typically incurs significantly higher penalties due to cache pollution. But we'll get back to this later. After producing the numbers above, I quickly criticized Java applications, because it's fairly common to create shitloads of threads in Java, and the cost of context switching becomes high in such applications. Someone retorted that, yes, Java uses lots of threads but threads have become significantly faster and cheaper with the NPTL in Linux 2.6. They said that normally there's no need to do a TLB flush when switching between two threads of the same process. That's true, you can go check the source code of the Linux kernel (switch_mm in mmu_context.h): static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk){ unsigned cpu = smp_processor_id(); if (likely(prev != next)) { [...] load_cr3(next->pgd); } else { [don't typically reload cr3] }} In this code, the kernel expects to be switching between tasks that have different memory structures, in which cases it updates CR3, the register that holds a pointer to the page table. Writing to CR3 automatically causes a TLB flush on x86. In practice though, with the default kernel scheduler and busy server-type workload, it's fairly infrequent to go through the code path that skips the call to load_cr3. Plus, different threads tend to have different working sets, so even if you skip this step, you still end up polluting the L1/L2/L3/TLB caches. I re-ran the benchmark above with 2 threads instead of 2 processes (source: timetctxsw.c) but the results aren't significantly different (this varies a lot depending on scheduling and luck, but on average on many runs it's typically only 100ns faster two switch between threads if you don't set a custom CPU affinity). ## Indirect costs in context switches: cache pollution The results above are in line with a paper published a bunch of guys from University of Rochester: Quantifying The Cost of Context Switch. On an unspecified Intel Xeon (the paper was written in 2007, so the CPU was probably not too old), they end up with an average time of 3800ns. They use another method I thought of, which involves writing / reading 1 byte to / from a pipe to block / unblock a couple of processes. I thought that (ab)using futex would be better since futex is essentially exposing some scheduling interface to userland. The paper goes on to explain the indirect costs involved in context switching, which are due to cache interference. Beyond a certain working set size (about half the size of the L2 cache in their benchmarks), the cost of context switching increases dramatically (by 2 orders of magnitude). I think this is a more realistic expectation. Not sharing data between threads leads to optimal performance, but it also means that every thread has its own working set and that when a thread is migrated from one core to another (or worse, across physical CPUs), the cache pollution is going to be costly. Unfortunately, when an application has many more active threads than hardware threads, this is happening all the time. That's why not creating more active threads than there are hardware threads available is so important, because in this case it's easier for the Linux scheduler to keep re-scheduling the same threads on the core they last used ("weak affinity"). Having said that, these days, our CPUs have much larger caches, and can even have an L3 cache. • 5150: L1i & L1d = 32K each, L2 = 4M • E5440: L1i & L1d = 32K each, L2 = 6M • E5520: L1i & L1d = 32K each, L2 = 256K, L3 = 8M Note that in the case of the E5520 (the one marketed as "i7"), the L2 cache is tiny but there's one L2 cache per core (with HT enabled, this gives us 128K per hardware thread). The L3 cache is shared by the 4 cores that are on each physical CPU. Having more cores is great, but it also increases the chance that your task be rescheduled onto a different core. The cores have to "migrate" cache lines around, which is expensive. I recommend reading What Every Programmer Should Know About Main Memory by Ulrich Drepper (yes, him again!) to understand more about how this works and the performance penalties involved. So how does the cost of context switching increases with the size of the working set? This time we'll use another micro-benchmark, timectxswws.c that takes in argument the number of pages to use as a working set. This benchmark is exactly the same as the one used earlier to test the cost of context switching between two processes except that now each process does a memset on the working set, which is shared across both processes. Before starting, the benchmark times how long it takes to write over all the pages in the working set size requested. This time is then discounted from the total time taken by the test. This attempts to estimate the overhead of overwriting pages across context switches. Here are the results for the 5150: As we can see, the time needed to write a 4K page more than doubles once our working set is bigger than what we can fit in the L1d (32K). The time per context switch keeps going up and up as the working set size increases, but beyond a certain point the benchmark becomes dominated by memory accesses and is no longer actually testing the overhead of a context switch, it's simply testing the performance of the memory subsystem. Same test, but this time with CPU affinity (both processes pinned on the same core): Oh wow, watch this! It's an order of magnitude faster when pinning both processes on the same core! Because the working set is shared, the working set fits entirely in the 4M L2 cache and cache lines simply need to be transfered from L2 to L1d, instead of being transfered from core to core (potentially across 2 physical CPUs, which is far more expensive than within the same CPU). Now the results for the i7 processor: Note that this time I covered larger working set sizes, hence the log scale on the X axis. So yes, context switching on i7 is faster, but only for so long. Real applications (especially Java applications) tend to have large working sets so typically pay the highest price when undergoing a context switch. Other observations about the Nehalem architecture used in the i7: • Going from L1 to L2 is almost unnoticeable. It takes about 130ns to write a page with a working set that fits in L1d (32K) and only 180ns when it fits in L2 (256K). In this respect, the L2 on Nehalem is more of a "L1.5", since its latency is simply not comparable to that of the L2 of previous CPU generations. • As soon as the working set increases beyond 1024K, the time needed to write a page jumps to 750ns. My theory here is that 1024K = 256 pages = half of the TLB of the core, which is shared by the two HyperThreads. Because now both HyperThreads are fighting for TLB entries, the CPU core is constantly doing page table lookups Speaking of TLB, the Nehalem has an interesting architecture. Each core has a 64 entry "L1d TLB" (there's no "L1i TLB") and a unified 512 entry "L2TLB". Both are dynamically allocated between both HyperThreads. ## Virtualization I was wondering how much overhead there is when using virtualization. I repeated the benchmarks for the dual E5440, once a normal Linux install, once while running the same install inside VMware ESX Server. The result is that, on average, it's 2.5x to 3x more expensive to do a context switch when using virtualization. My guess is that this is due to the fact that the guest OS can't update the page table itself, so when it attempts to change it, the hypervisor intervenes, which causes an extra 2 context switches (one to get inside the hypervisor, one to get out, back to the guest OS). This probably explains why Intel added the EPT ( Extended Page Table) on the Nehalem, since it enables the guest OS to modify its own page table without help of the hypervisor, and the CPU is able to do the end-to-end memory address translation on its own, entirely in hardware (virtual address to "guest-physical" address to physical address). ## Parting words Context switching is expensive. My rule of thumb is that it'll cost you about 30µs of CPU overhead. This seems to be a good worst-case approximation. Applications that create too many threads that are constantly fighting for CPU time (such as Apache's HTTPd or many Java applications) can waste considerable amounts of CPU cycles just to switch back and forth between different threads. I think the sweet spot for optimal CPU use is to have the same number of worker threads as there are hardware threads, and write code in an asynchronous / non-blocking fashion. Asynchronous code tends to be CPU bound, because anything that would block is simply deferred to later, until the blocking operation completes. This means that threads in asynchronous / non-blocking applications are much more likely to use their full time quantum before the kernel scheduler preempts them. And if there's the same number of runnable threads as there are hardware threads, the kernel is very likely to reschedule threads on the same core, which significantly helps performance. Another hidden cost that severely impacts server-type workloads is that after being switched out, even if your process becomes runnable, it'll have to wait in the kernel's run queue until a CPU core is available for it. Linux kernels are often compiled with HZ=100, which entails that processes are given time slices of 10ms. If your thread has been switched out but becomes runnable almost immediately, and there are 2 other threads before it in the run queue waiting for CPU time, your thread may have to wait up to 20ms in the worst scenario to get CPU time. So depending on the average length of the run queue (which is reflected in load average), and how long your threads typically run before getting switched out again, this can considerably impact performance. It is illusory to imagine that NPTL or the Nehalem architecture made context switching cheaper in real-world server-type workloads. Default Linux kernels don't do a good job at keeping CPU affinity, even on idle machines. You must explore alternative schedulers or use taskset or cpuset to control affinity yourself. If you're running multiple different CPU-intensive applications on the same server, manually partitioning cores across applications can help you achieve very significant performance gains. #### How To Harmonize Your Dual-Boot Setup for Windows and Ubuntu posted Nov 23, 2010, 3:47 PM by Ahmed El-Sharkasy Looking for some harmony between Windows 7 and Ubuntu in your dual-boot setup?  Here are a few ways you can make the tense OS situation a little more unified and copacetic. ### Background When we covered How to Choose a Partition Scheme for Your Linux PC, we noticed that some people were wondering how to use a third partition between Linux and Windows to act as a storage partition. Why It’s Difficult As a few commenters pointed out, you can’t use an NTFS-formatted partition for /home in Linux.  That’s because NTFS doesn’t preserve all of the properties and permissions used by Linux, and Windows doesn’t even read Linux file systems.  You can readily see this if you view a folder that’s hidden in Windows from within Linux, or a file that Linux sees as hidden in Windows.  What works for one doesn’t work for the other.  Furthermore, there isn’t an incredibly clean way to move the Users folder in Windows without messing with things.  This is why many people with nicer machines end up using virtualization software; it’s easier than forcing the two to co-operate side-by-side. Image from cellguru.co.cc, assumed fair use A Work-Around There isn’t a way to run your /home directory from a FAT32 or NTFS partition, so your configuration files and scripts will have to remain there.  What you can do is redirect the other commonly used folders like Documents, Downloads, Music, etc. to another partition, one that can be read by Windows.  Then, you can add these folders to your Windows 7 Libraries and mark them as the default save location. This isn’t a proper workaround.  Your program-associated configuration files and other user-related settings will not be in the same place for this setup.  If you have to reinstall either OS, you will have to perform a separate backup of your user settings.  That being said, however, most people are really just concerned about their documents, music, videos, and so forth.  This solves that issue by pointing both OSs to look in the same place for them. Linux has come a long way with regards to reading and writing NTFS, and since it’s much better than FAT32 and tougher to configure this setup with, that’s what we’ll be covering in this guide. Partition Scheme For this to work, you’ll want your hard drive set up in a way similar to this: • A large partition (or second hard drive!) to store your files • A small swap partition For later convenience, when you format your storage partition to NTFS, add an easily recognizable label to it.  It’ll be easier to find a drive called “storage” or “media” than by counting partition numbers. Notice that we don’t have a separate /home partition this time around.  Since the vast majority of your important/large files will be on a separate partition, this negates the need for that.  You’re welcome to use a separate /home partition to make backing up the Linux-side of things easier, just remember that you can’t exceed four primary partitions per disk. ### Auto-Mount Your Storage Partition (Linux) Since we’re using NTFS, it’s a good idea to specifically tell your system to mount your storage partition or disk in the same place every time you boot.  To do this, we’ll be editing the /etc/fstab system file, which is the file system table used by Linux, but first, we have some preparations to make.  Open up terminal, and if this makes you nervous, just take a deep breath and relax.  It’ll be okay. Prep Work We need to install ntfs-3g, the driver Linux will use to read and write to NTFS.  If you already have it installed, it’ll tell you, so don’t worry. sudo apt-get install ntfs-3g If you see “ntfs-3g is already the newest version” then you already have it installed, otherwise you’ll see it work, so wait for it to finish its thing.  Next, let’s create the directory where your partition will mount.  If you want the drive to appear in the “Places” menu by default, you’ll use: sudo mkdir /media/storage If you don’t want it to appear in “Places” and you want to manually browse to it for whatever reason, you can use this instead: sudo mkdir /mnt/storage This will create a “storage” directory in /media.  You can change this to something else if you like, but be sure it does not have any spaces.  Spaces will create a problem when we configure it to automatically mount in the next few steps. fstab Now, it’s time to edit the fstab file.  First, we’ll create a backup, just in case anything happens. sudo cp /etc/fstab /etc/fstab.backup It’ll prompt you for your password, so go ahead and enter it.  If, for whatever reason, you need to restore the backup in the future, you would do this: sudo cp /etc/fstab.backup /etc/fstab Next, you need to find what the UUID of your storage partition is.  The UUID stands for “universally unique identifier” and acts as a proper serial number that will not change until the partition is reformatted.  Run the following command: sudo blkid /dev/sda1: UUID=”23A87DBF64597DF1″ TYPE=”ntfs” /dev/sda2: UUID=”2479675e-2898-48c7-849f-132bb6d8f150″ TYPE=”ext4″ /dev/sda5: UUID=”66E53AEC54455DB2″ LABEL=”storage” TYPE=”ntfs” /dev/sda6: UUID=”05bbf608-87fa-4473-9774-cf4b2602d8d6″ TYPE=”swap” Find the line that has the correct label to your storage partition (makes things easy, doesn’t it?) and copy the UUID. gksudo gedit /etc/fstab You’ll see gedit open, like so: You may see an uglier theme on gedit than usual, but don’t worry it.  Add the following lines to the bottom of fstab, substituting your own UUID instead of mine: # storage mount UUID=66E53AEC54455DB2 /media/storage/    ntfs-3g        auto,user,rw 0 0 The first line is a comment, indicated by the leading hash tag.  The next line tells fstab to look for the partition with the specified UUID, mount it to /media/storage/, and to use the ntfs-3g driver.  Furthermore, it makes sure that it automatically mounts at boot, makes it accessible by users (not just root), gives both read and write privileges, and skip file-system checks (you’ll probably want to use Windows to do that).  Lastly, double-check, and triple-check to make sure you didn’t touch anything else, and that the UUID is correct. When you’re ready, click save and then reboot.  Don’t skip the reboot, as it’s necessary for the next step as well as to make sure things work. You should be able to boot into Ubuntu as if nothing happened, but you’ll notice that you’ve got “storage” (or whatever you named it) under the Places menu now!  If not, check to make sure you got fstab correct.  See above to restore fstab from your backup, if you need to. Open up terminal and enter the following command: gedit .config/user-dirs.dirs This is the file where your “special” folders in your home directory are defined. You can edit this to your liking.  In place of where you see “\$HOME/Downloads” you would put in an absolute folder location, like “/media/storage/Downloads”.  Go ahead and create those folders, or whatever folders you’d like to call them, and put the path down for each of these.  Here’s what the finished edit should look like: Click save, and we’re done the crux of the configuration.  You may need to reboot for these changes to take effect, but you can just boot into Windows to finish out the process in the next section. Basically, now when you browse and put files in your “Downloads” folder, they’ll actually go to your storage drive’s “Downloads” folder.  Anything in your home folder itself will stay in /home/yourusername/, not on your storage drive.  A few of the folders, like “Desktop” and “Templates,” probably won’t benefit from this treatment, either.  Templates are rarely used, the desktop usually gets cluttered with shortcuts and the like, and the Windows desktop isn’t elegantly redirected, unfortunately. Boot into Windows, and you’ll see that there’s another partition called “storage” under “My Computer.”  Windows 7 has the beautiful Libraries feature built-in, so take a look at our article “Understanding the Libraries Feature in Windows 7,” and you’ll see step-by-step directions on how to add your new storage folders to your libraries. As you can see, my storage drive folders are a part of my libraries.  My storage drive letter is E: because my network share is at D:.  Also, take a look at our “Change the Default Save Folder for Windows 7 Libraries…” article so that when you stick things in your libraries, they automatically get saved to your new storage folders as well. It’s also worth mentioning that if you have some know-how, you could even do this with a remotely shared drive on your network, though it may prove to be too slow for actual use.  A better idea is to turn your storage partition into a shared drive that can be accessed by other computers in your network. #### How to Create an Index Table Like a Pro with Microsoft Word posted Nov 22, 2010, 4:41 AM by Ahmed El-Sharkasy An index gives readers a way to find important words easily in our document, but creating an index by hand is very tedious and time consuming. Thankfully you can automatically create an index table in Word. Image by Ifijay The common approach to create an index table in Word is to manually mark each word that we wish to index, but the other alternative is to use a concordance document to automatically index our master document, which is what we will cover in today’s article. ### Generating the Index Let’s start by creating a two column table in our concordance file. Write the words that you would like to be marked for indexing in the left column. Write the text that you would like to use in the master document’s index table in the right column. Close the concordance file and open your master document’s reference tab to index our master document. Click on the “AutoMark” button and choose the concordance document when Word prompts you to specify the AutoMark file. Right after you click the OK button you will see that Word creates some index entry fields in our document. We can hide these fields by clicking the “Show/Hide Paragraph” button in the home tab. Go to the end of the master document and click the “Insert Index” button one more time and click the OK button this time to create the index. That’s all we have on how to create an index table with a concordance file. Let’s take a closer look on how we can customize the index style. ### Creating Cross References in the Index Word allows us to create different type of index. Here we have an index with sub-entry that is particularly useful when we want to group a set of closely related concepts in our document. The trick of creating a sub-entry is by separating the text in the right column of our concordance document table with a colon (:). Word will treat any words that come after the colon as a sub-entry in the index. Another useful type of index is a cross reference index that usually comes in the form of “See also …” Unfortunately we can’t create cross references in our index table using a concordance file, so we have to manually edit the index field by adding “\t” after the indexed word followed by the cross referenced word. ### Maintaining the Index One of the challenges with maintaining an index in Word is that Word does not give us a button or menu that we can simply click to clear the index fields if we decide to redo our concordance file. We have to use a Visual Basic script to clear the index fields in our document. Most of you must be thinking “Visual Basic Script, I am not a programmer ! What is that ?”. Don’t worry it’s not as bad as it sounds. Just copy and paste this simple script written by the awesome guys at TechRepublic into Word’s Visual Basic editor and run it to clean your master document’s index fields. Sub DeleteIndexEntries() Dim doc As Document Dim fld As Field Set doc = ActiveDocument For Each fld In doc.Fields fld.Select If fld.Type = wdFieldIndexEntry Then fld.Delete End If Next Set fld = Nothing Set doc = Nothing End Sub Open the Visual Basic editor by pressing Alt+F11 and place this script into the editor. Execute the script by clicking the “run button” to clean the master document’s index fields. Your master document should no longer have any index fields. We can now re-index the master document and recreate the index table using the “Insert Index” menu. ### Applying Different Index Table Formats Yes index is definitely useful for your reader, but most probably some of you are thinking, “Why does the index table look so boring. Can I change the way it looks to make it more appealing ?”. The answer is yes, index table does not have to look plain. We can adjust the index table’s style by selecting the one of the available format to adjust the look and feel of the index table. Here is an example on how the Classic index format looks like. We can even apply our own style to the index table by choosing the “From Template” and clicking the “Modify” button. Choose one of the available index styles and click the “Modify” button to make our own style. We can adjust the index table’s font style. Gives the index table some borders, or numbering; … to give the index table a specific look and feel. 1-10 of 26
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.2882985770702362, "perplexity": 2653.9267944491353}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2015-18/segments/1429246658904.34/warc/CC-MAIN-20150417045738-00246-ip-10-235-10-82.ec2.internal.warc.gz"}
https://cstheory.stackexchange.com/questions/17819/a-natural-problem-in-textrms-2-textrmp
# A natural problem in $\textrm{S}_2^\textrm{P}$? The complexity class $\textrm{S}_2^\textrm{P}$ is defined as follows (from Wikipedia): A language $L$ is in $S_2^P$ if there exists a polynomial-time predicate $P$ such that • If $x \in L$, then there exists a $y$ such that for all $z$, $P(x,y,z)=1$ • If $x \notin L$, then there exists a $z$ such that for all $y$, $P(x,y,z)=0$ where the size of both $y$ and $z$ must be polynomial in the size of $x$. Also see Fortnow's post and the complexity zoo for more informal explanations and discussions. While this class seems reasonably natural, I can't find an example of a problem that's in $\textrm{S}_2^\textrm{P}$ for a non-trivial reason (i.e., not just because it is in NP or MA or some class contained in $\textrm{S}_2^\textrm{P}$). Does anyone know a problem that fits this description? If no one can think of a problem like that, I wouldn't mind a problem that's in a sub-class of $\textrm{S}_2^\textrm{P}$, but it's non-trivial to show this, whereas the problem is obviously in $\textrm{S}_2^\textrm{P}$. • How about "an odd number of these circuits are satisfiable"? $\;$ – user6973 May 29 '13 at 5:35 • This is a nice example, however, it is also in the smaller class $\Delta_2 = \mathsf{P}^{\mathsf{NP}}$. – sdcvvc May 29 '13 at 9:28 • Not quite what you asked for, but how about a problem complete for promise-$\mathsf{S_2^p}$? Fortnow--Impagliazzo--Kabanets--Umans, On the complexity of succinct zero-sum games, Computational Complexity 17:353-376, 2008, see cs.sfu.ca/~kabanets/Research/games.html – Joshua Grochow May 29 '13 at 16:43 • @RickyDemer: Thanks, that's a nice example. (If I understand correctly, it's equally easy to show that the problem is in $\Delta_2$ too.) – Robin Kothari May 29 '13 at 21:16 • @JoshuaGrochow: Thanks, that works for me. Feel free to post that as an answer. It seems like the best answer so far, but I'll wait to see if I get a better one. – Robin Kothari May 29 '13 at 21:18 How about a problem complete for promise-$\mathsf{S_2^p}$? We prove that approximating the value of a succinct zero-sum game to within an additive factor is complete for the class promise-$\mathsf{S_2^p}$, the "promise" version of $\mathsf{S_2^p}$. To the best of our knowledge, it is the first natural problem shown complete for this class. (Historical note: it is not too surprising that not many natural problems are known to be in $\mathsf{S_2^p}$ but not known to be in its subclasses $\mathsf{MA}$ or $\mathsf{P^{NP}}$. If you check the original papers of Russell--Sundaram and Canetti (independently), it seems as though the definition of $\mathsf{S_2^p}$ was made more or less specifically to capture their improved arguments placing $\mathsf{BPP}$ in $\mathsf{PH}$, rather than to capture some set of of natural problems.)
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8554043769836426, "perplexity": 175.2526557126911}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-35/segments/1566027320156.86/warc/CC-MAIN-20190824084149-20190824110149-00514.warc.gz"}
https://wiki.contextgarden.net/Table_of_Contents
## Placing the ToC \completecontent % with title \placecontent % without title One important quirk. If you do not start the document with \completecontent, but use it later –for example after the introduction–, you should use: \completecontent[criterium=all] When \completecontent is at the end of the document you can use: \completecontent[criterium=previous] ## Modifying the default ToC You can customize the appearance, number of shown levels and other attributes of predefined ToC. Please note that the level parameter has no effect in MkIV, however you can set the levels used in the table of contents with the list option. \setuppapersize[A5] \setupcolors[state=start] % A new ToC title % turn off numbering of some levels % ToC % alternative=c, space to the page number is filled with dots \setupcombinedlist[content][list={chapter,section,subsection,subsubsection}, alternative=c,] % in MkII replace list with level=4: \subsubsubsections are not listed in ToC %\setuplist[chapter][width=5mm, style=bold] \setuplist[section][width=10mm, style=bold] \setuplist[subsection][width=20mm] % pagestyle=normal for changing the appearance of pagenumber \setuplist[subsubsection][width=20mm, style=slanted, pagestyle=normal] \starttext \startcolor[darkgreen] \completecontent \stopcolor \page[yes] \section{First section} \subsection{First subsection} \subsubsection{First subsubsection} \subsubsubsection{First subsubsubsection} \section{Second section} \subsection{Second subsection} \subsubsection{Second subsubsection} \subsubsubsection{Second subsubsubsection} \stoptext ## Setting the number of sections in the ToC To set the number of sections included in the table of contents you can explicitly set the list of items included in the 'contents' list. To include chapter, section, and subsection, in the table of contents use: \starttext \setupcombinedlist[content][list={chapter,section,subsection}] \startcolor[darkgreen] \completecontent \stopcolor \page[yes] \section{First section} \subsection{First subsection} \subsubsection{First subsubsection} \subsubsubsection{First subsubsubsection} \section{Second section} \subsection{Second subsection} \subsubsection{Second subsubsection} \subsubsubsection{Second subsubsubsection} \stoptext \starttext \setupcombinedlist[content][list={chapter,section,subsection,subsubsection}] \startcolor[darkgreen] \completecontent \stopcolor \page[yes] \section{First section} \subsection{First subsection} \subsubsection{First subsubsection} \subsubsubsection{First subsubsubsection} \section{Second section} \subsection{Second subsection} \subsubsection{Second subsubsection} \subsubsubsection{Second subsubsubsection} \stoptext In MkII this was achieved with the level parameter. ## Including unnumbered heads in the ToC ConTeXts head mechanism is designed in such a way that heads are stored in a list when they have an incrementing counter, so you must activate this for the 'unnumbered' heads (title, subject, subsubject, etc.) In addition, the \placecontent command is an aggregate of only the numbered heads; to place a list that also includes unnumbered heads, you must manually specify all the heads you want. % mode=mkii [incrementnumber=yes, % keep track of the number number=no] % but don't show it % Set area in which number is placed: % distance from margin to left edge of title % NB: MkIV may ignore width % for unnumbered entries, and may require % you to set margin, instead. \setuplist[section][width=2cm] \setuplist[subject][width=2cm] \setupcombinedlist[content][list={chapter,subject,subsubject,section,subsection}] \setuppapersize[A7][A7] \starttext \placecontent \page \chapter{One} \section{Sec A} \subject{Sub B} \section{Sec C} \stoptext Quite probably, the un-numbered sections to be included are at the beginning or end of the document (e.g. a Preface and a Bibliography) and should *not* modify the enumeration of the actual numbered sections. In this case you have to use "incrementnumber=list" instead. A slightly modified example would be: % mode=mkiv \setuppapersize[A7][A7] %list the "subject" sections in the ToC but don't modify the "section" counter: %"width": distance between number and text in the ToC entry, "margin" : indent of ToC entry relative to left page margin \setuplist[section][width=1.5em] \setuplist[subsection][width=2.5em, margin=1.5em] \setuplist[subject][margin=1.5em] \setupcombinedlist[content][list={subject,section,subsection}] \starttext \completecontent \subject{Preface} \section{One} \subsection{Sec A} \section{Two} \subject{Bibliography} \stoptext ## Explicitly excluding headings from the ToC \starttext % The first parameter is the new heading name, and the second is the % name of the heading that is copied. \startcolor[darkgreen] \completecontent \stopcolor \page[yes] \section{First section} \subsection{First subsection} \mysubsection{Second subsection} \subsection{Third subsection} \stoptext ## Page numbering in ToC \starttext \startfrontmatter \placecombinedlist[MyContentsList] \stopfrontmatter \startbodymatter \setuppagenumbering[way=bychapter, left=A, chapternumber=yes, numberseparator=/] ... \stopbodymatter will give you pagenumbering such as A1/1, A1/2 etc. on the pages in the bodymatter. In the table of contents, however, these will show up as 1-1, 1-2 etc. The ToC needs to be formatted separately. To get a prefix to the page numbering (like "A"), use \def\ChapterPrefix#1{A#1} \setuplist[chapter][pagecommand=\ChapterPrefix] To get the numberseparator working, you need to know that the ToC will use the separator that is active at the time the ToC is output. So you need to set it immediately before you call the ToC command, e.g. % MkII \setuppagenumbering[numberseparator=/] \placecombinedlist[MyContentsList] The above method does not work in MkIV. Hence the code below illustrates how to get it working under MkIV. (Seems the example got lost?) It should be noted that MkIV and MkII are incompatible with respect to this feature. As an example, consider that the frontmatter uses roman numerals and bodymatter remains unchanged. To reflect the romannumerals of the front matter in the ToC, \definestructureconversionset[frontpart:pagenumber][][romannumerals] \definestructureconversionset[bodypart:pagenumber] [][numbers] \setuplist[chapter][pageconversionset=pagenumber] \startsectionblockenvironment[frontpart] \setupuserpagenumber[numberconversion=romannumerals] \setuppagenumber[number=1] \stopsectionblockenvironment \startsectionblockenvironment[bodypart] \setuppagenumber[number=1] \stopsectionblockenvironment ## Adjusting spacing between lines in ToC Sometimes you want your ToC to be denser than the interlinespace used in the document in general. You can achieve this by wrapping the ToC. \start \setupinterlinespace[small] \placecontent \stop See \setupinterlinespace for more options. ## Swap Page Number and Title To move the page number to the left of the section title, consider using a command: \define[3]\SectionListEntry {\par \leftaligned\bgroup \hbox to 2em{\color[red]{#3}}% \hskip 1em \vtop{\hsize\dimexpr\textwidth-3em\relax#2} \egroup \par} \setuplist[section][alternative=command,command=\SectionListEntry] ## Forced Page Break \definelistextra[page][before=\page] Then, when you place your ToC, you have to call for the extras and define the spot for the break: \placecontent[extras={6=page}] %puts break before Chapter 6 list item Solution given by Hans on the mailing list on September 29, 2010, and applies to MKIV (at least). ## Dots in section numbers If you want to avoid a dot if there's no number, try this solution (by Wolfgang Schuster as of 2008-04-29): \setuplist [chapter,section,...] [numbercommand=\DotAfterNumber] \def\DotAfterNumber#1{\doiftext{#1}{#1.}} ## Author in ToC see setup for author/title/subtitle titling and author in ToC in Proceedings_style. You can write desired items to the ToC or an other list (see below) using \writetolist. ## Several ToC's in different languages If you want to have two tables of contents in your document, one in a native language and the other, for example, in english. This is how to do it: \mainlanguage[de] \starttext \completecontent %\start\language[en] \start\mainlanguage[en] \completecontent \stop \chapter{\translate[en=Chapter One,de=Kapitel Eins]} \chapter{\translate[en=Chapter Two,de=Kapitel Zwei]} \stoptext ## Creating other "Table of ..." Generally all "Table (List) of ..." are defined with \definelist and \setuplist. You can "collect" several section levels in one list using \definecombinedlist, that's even explained in "ConTeXt, an excursion" (see Manuals). To have some parts of your title texts not appear in the table of contents, use \nolist and have a look at mag-0001.pdf. ## How ConTeXt MkII generates the ToC (from Tobias Burnus on the mailinglist) The mechanism is a follows: 1. TeX is run and chapter names with pagenumbers etc. are saved into <jobname>.tui. 2. TeXutil is run and sorts some entries in *.tui and writes *.tuo 3. TeX is run again, reads .tuo and uses that data to produce the table of contents etc. (and produces a new .tui) As this changes the page numbers (for longer table of contents), TeXutil and TeX are re-run again. In principle TeXExec should do this automatically. MkIV uses *.tuc files and uses less runs. ## ToC (and other lists) via project structures ConTeXt Mark IV supports the generation of lists via Project structure. Thus, given a product (possibly within some project), and given two or more components in that product with its own sections (chapters, sections, etc) or floats (figures, tables, etc); then each component can have its own list of contents independent of the other. This is done by setting criterium=component. For example, create three files, one product and two components (slightly modified version of a sample posted by Hans in 2011): \startproduct example-1-product \definecombinedlist[summary][chapter] \setupcombinedlist[summary][criterium=component] \component example-1-component-a \component example-1-component-b \stopproduct \startcomponent example-1-component-a \starttitle[title=Contents] \placesummary \stoptitle \startchapter[title=Ward] \input ward \stopchapter \stopcomponent \startcomponent example-1-component-b \starttitle[title=Contents] \placesummary \stoptitle \startchapter[title=Zapf] \input zapf \stopchapter \stopcomponent ## Alternating filler dots The following code was provided by Wolfgang on the mailing list (http://www.ntg.nl/pipermail/ntg-context/2013/071144.html). It produces alternating dots like in Knuths' TeXbook and MetaFont book. The \definefiller command requires ConTeXt version 2013.01.13 or newer. \definefiller [pavel-0] [alternative=symbol, method=global, width=1em, leftmargin=.5em, rightmargin=.5em, symbol=\clap{.}] \definefiller [pavel-1] [pavel-0] [align=left] \definefiller [pavel-2] [pavel-0] [align=middle] \newconditional\PavelState \define\PavelFiller {\ifconditional\PavelState \global\setfalse\PavelState \filler[pavel-1]%% \else \global\settrue\PavelState \filler[pavel-2]%% \fi} \setuplistalternative [c] [filler=\PavelFiller] \starttext \completecontent [alternative=c] \dorecurse{10}{\expanded{\chapter{Chapter \recurselevel}}} \stoptext ## Suppressing page numbering for the ToC pages Sometimes the document needs to have pagenumbering according to realpages, but numbering shouldn't be visible until the first text page. This is Wolfgang's trick (January 2013) for suppressing the pagenumbers so that they'll still be counted: \startsectionblockenvironment[frontpart] \setuppagenumbering[location=] \stopsectionblockenvironment %\startsectionblockenvironment[bodypart] %\setcounter[userpage][1] %\stopsectionblockenvironment \starttext \startfrontmatter \completecontent \stopfrontmatter \startbodymatter \dorecurse{10}{\expanded{\chapter{Chapter \recurselevel}}} \stopbodymatter \stoptext
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9133094549179077, "perplexity": 5721.830490101121}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-51/segments/1544376826354.54/warc/CC-MAIN-20181214210553-20181214232553-00103.warc.gz"}
https://or.stackexchange.com/questions/6835/ilog-cpoptimizer-soft-constraints-multiple-solves-reusing-searched-space
# ILOG CPOptimizer soft constraints (multiple solves reusing searched space) I'm trying to feed information retrieved from my neural network to my CP model to help narrow down the search on big instances of my problem. However, I also want to remove the additional imposed constraints to the model and solve it to the optimum, once the best possible solution given the additional constraints from NN is found. This can be done by model.remove_expressions() function but I've noticed that the search starts from beginning without using any knowledge acquired in the previous search. Is there a way how to transfer the already searched space from more constrained model to less constrained one (i.e. remove constraints and search only the remaining space, not all of it)? I've considered warm starting (using the best solution from more constrained problem) and also instead of adding additional conditions, using the data from neural network to guide the search phases. However, from my experience both of these are not so efficient, so they are only my backup options.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.7239923477172852, "perplexity": 674.2793032711062}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-21/segments/1652662580803.75/warc/CC-MAIN-20220525054507-20220525084507-00696.warc.gz"}
https://www.datasciencecentral.com/when-bayes-ockham-and-shannon-come-together-to-define-machine/
# When Bayes, Ockham, and Shannon come together to define machine learning Acknowledgements Thanks to my CS7641 class at Georgia Tech in my MS Analytics program, where I discovered this concept and was inspired to write about it. Thanks to Matthew Mayo for editing and re-publishing this in KDnuggets. Introduction It is somewhat surprising that among all the high-flying buzzwords of machine learning, we don’t hear much about the one phrase which fuses some of the core concepts of statistical learning, information theory, and natural philosophy into a single three-word-combo. Moreover, it is not just an obscure and pedantic phrase meant for machine learning (ML) Ph.Ds and theoreticians. It has a precise and easily accessible meaning for anyone interested to explore, and a practical pay-off for the practitioners of ML and data science. I am talking about Minimum Description Length. And you may be thinking what the heck that is… Let’s peel the layers off and see how useful it is… Bayes and his Theorem We start with (not chronologically) with Reverend Thomas Bayes, who by the way, never published his idea about how to do statistical inference, but was later immortalized by the eponymous theorem. It was the second half of the 18th century, and there was no branch of mathematical sciences called “Probability Theory”. It was known simply by the rather odd-sounding “Doctrine of Chances” — named after a book by Abraham de Moievre. An article called, “An Essay towards solving a Problem in the Doctrine of Chances”, first formulated by Bayes, but edited and amended by his friend Richard Price, was read to Royal Society and published in the Philosophical Transactions of the Royal Society of London, in 1763. In this essay, Bayes described — in a rather frequentist manner — the simple theorem concerning joint probability which gives rise to the calculation of inverse probability i.e. Bayes Theorem. Many a battle have been fought since then between the two warring factions of statistical science — Bayesians and Frequntists. But for the purpose of the present article, let us ignore the history for a moment and focus on the simple explanation of the mechanics of the Bayesian inference. In the world of statistical inference, a hypothesis is a belief. It is a belief about about the true nature of the process (which we can never observe), that is behind the generation of a random variable (which we can observe or measure, albeit not without noise). In statistics, it is generally defined as a probability distribution. But in the context of machine learning, it can be thought of any set of rules (or logic or process), which we believe, can give rise to the examples or training data, we are given to learn the hidden nature of this mysterious process.
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8247502446174622, "perplexity": 1021.3485834304843}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-40/segments/1664030336880.89/warc/CC-MAIN-20221001163826-20221001193826-00252.warc.gz"}
https://www.jbc.org/article/S0021-9258(20)67369-0/fulltext
Reflections| Volume 288, ISSUE 14, P10084-10091, April 05, 2013 The Tryptophan Synthase α2β2 Complex: A Model for Substrate Channeling, Allosteric Communication, and Pyridoxal Phosphate Catalysis Open AccessPublished:February 20, 2013 I reflect on my research on pyridoxal phosphate (PLP) enzymes over fifty-five years and on how I combined research with marriage and family. My Ph.D. research with Esmond E. Snell established one aspect of PLP enzyme mechanism. My postdoctoral work first with Hans L. Kornberg and then with Alton Meister characterized the structure and function of another PLP enzyme, l-aspartate β-decarboxylase. My independent research at the National Institutes of Health (NIH) since 1966 has focused on the bacterial tryptophan synthase α2β2 complex. The β subunit catalyzes a number of PLP-dependent reactions. We have characterized these reactions and the allosteric effects of the α subunit. We also used chemical modification to probe enzyme structure and function. Our crystallization of the tryptophan synthase α2β2 complex from Salmonella typhimurium led to the determination of the three-dimensional structure with Craig Hyde and David Davies at NIH in 1988. This landmark structure was the first structure of a multienzyme complex and the first structure revealing an intramolecular tunnel. The structure has provided a basis for exploring mechanisms of catalysis, channeling, and allosteric communication in the tryptophan synthase α2β2 complex. The structure serves as a model for many other multiprotein complexes that are important for biological processes in prokaryotes and eukaryotes. B.A. at the University of Texas at Austin (1954–1957) I attended the University of Texas in Austin (UT-Austin), where my father was an associate professor of English. My mother was a librarian. My parents encouraged me to study hard and to pursue any career that interested me. Although I started at UT-Austin as a premed major, I changed to a chemistry major after working as a technician in a biochemistry laboratory. I enjoyed experimental work and contacts with graduate students. I heard good things about Esmond Snell, who was a professor of chemistry at UT and was to go to University of California, Berkeley (UC-Berkeley), in 1956 to be chairman of the Department of Biochemistry. Snell's reputation as an outstanding biochemist was one factor in my decision to start graduate work at UC-Berkeley in 1957. Ph.D. at UC-Berkeley (1957–1961) The staff and students in the Department of Biochemistry at UC-Berkeley welcomed me warmly. I learned basic biochemistry in courses taught by Esmond Snell and Frederick Carpenter. The second year, I studied basic enzymology and took an enzyme lab course taught by Jesse Rabinowitz. I decided to do my doctoral research under Esmond Snell with Jesse Rabinowitz and Edward Adelberg as thesis advisors. Snell's outstanding work included the discovery of two new forms of vitamin B6, pyridoxal and pyridoxamine, and the elucidation of the general basis for catalysis by vitamin B6-dependent enzymes. My Ph.D. thesis work was designed to test one aspect of Snell's proposed general mechanism that differed from a similar mechanism proposed independently by A. E. Braunstein (Fig. 1). I isolated bacteria from Strawberry Creek on the UC campus that used α-methylserine as a sole source of carbon and nitrogen. From these bacteria, I partially purified and characterized a pyridoxal phosphate (PLP)- and tetrahydrofolate-dependent α-methylserine hydroxymethyltransferase. The enzyme catalyzed the reversible cleavage of α-methylserine to d-alanine and formaldehyde. My results showed that this reaction does not require labilization of an α-hydrogen ( • Wilson E.M. • Snell E.E. Metabolism of α-methylserine. I. α-Methylserine hydroxymethyltransferase. , • Wilson E.M. • Snell E.E. Metabolism of α-methylserine. II. Stereospecificity of α-methylserine hydroxymethyltransferase. ). Braunstein's mechanism did include labilization of an α-hydrogen. Drs. Beverly Guirard and Hiroshi Wada were important influences on me in the Snell laboratory (Fig. 2). Hiroshi Wada was the first of many Japanese scientists to study PLP enzymes with Snell. Jesse Rabinowitz helped me with the tetrahydrofolate coenzyme in my enzyme. I also learned about PLP enzymes from a new young faculty member, Terry Jenkins, who had recently characterized highly purified glutamic aspartatic aminotransferase for his Ph.D. research with Irwin Sizer at the Massachusetts Institute of Technology. The Department of Biochemistry had an excellent series of guest speakers on Friday afternoons. I was especially impressed by the lecture by Hans L. Kornberg, who had recently discovered the glyoxylate cycle, and I applied to do postdoctoral work in his laboratory. Postdoctoral Work (1962–1966) My postdoctoral work with Hans Kornberg at the University of Leicester in England from 1962 through 1963 was funded by an American Cancer Society grant (Fig. 3). My problem was to understand the control of the glyoxylate cycle in Achromobacter d-15 and how the control differed from that of the glyoxylate cycle in Escherichia coli. I used isotopes in short-term growth experiments and measured enzymatic activities in cell-free extracts. Many of the assays and oxidation experiments utilized Warburg manometers. I joked that I was at the bottom of a line of succession: I learned to use the Warburg manometer from Hans Kornberg, who had learned from Hans Krebs, who had learned from Otto Warburg. I found an important difference between Achromobacter d-15 and E. coli. Achromobacter d-15 lacked oxaloacetate decarboxylase but had high levels of l-aspartate 4-carboxy-lyase, a PLP-dependent enzyme that converts l-aspartate to α-alanine and carbon dioxide. I purified this PLP enzyme and characterized a number of its properties ( • Wilson E.M. • Kornberg H.L. Properties of crystalline l-aspartate 4-carboxy-lyase from Achromobacter sp. ). I obtained beautiful hexagonal crystals of the purified enzyme that were too thin for x-ray crystallography at that time. The most important thing I learned was that my main love was the study of pure PLP enzymes, not the study of metabolic control. I enjoyed a second postdoctoral position with Alton Meister in the Department of Biochemistry at Tufts University School of Medicine in Boston (Fig. 4). His group was studying l-aspartate 4-carboxy-lyase (also known as l-aspartate β-decarboxylase) from a different source, Alcaligenes faecalis. To learn more about the binding of PLP to this enzyme, I determined the optical rotatory dispersion of various forms of the enzyme and compared the Cotton effects obtained with those that had been reported for glutamate-aspartate transaminase ( • Wilson E.M. • Meister A. Optical rotatory dispersion of l-aspartate β-decarboxylase and its derivatives. ). I next studied the reaction of β-hydroxyaspartate with l-aspartate 4-carboxy-lyase and postulated a new mechanism of inhibition ( • Miles E.W. • Meister A. The mechanism of the reaction of β-hydroxyaspartate with l-aspartate β-decarboxylase. A new type of pyridoxal 5′-phosphate-enzyme inhibition. ). I made many friends and important professional contacts at Tufts, including Kenji Soda, James Manning, Jonathan Nishimura, Paul Anderson, Elizabeth Mooz, Mahtab Bamji, and Vaira and Daniel Wellner. Independent Research at the National Institutes of Health (1966–Present) I was fortunate to be offered an independent position in the Laboratory of Biochemical Pharmacology at the National Institute of Arthritis and Metabolic Diseases (NIAMD) of the National Institutes of Health (NIH) in 1966. I have spent the rest of my career at NIH in the same laboratory, which has been renamed the Laboratory of Biochemistry and Genetics. I have been a Scientist Emeritus since 2000. My studies of PLP enzymes during my doctoral and postdoctoral work convinced me that PLP enzymes are ideal targets for studies of enzyme structure and function. The PLP coenzyme provides a spectrophotometric probe that is useful for following catalytic intermediates, reaction kinetics, and conformational changes. My search for an especially interesting PLP enzyme for my independent work led me to tryptophan synthase. Irving Crawford and Charles Yanofsky had reported that E. coli tryptophan synthase is an enzyme complex consisting of two separable polypeptides, now designated the α subunit and the β subunit ( • Crawford I.P • Yanofsky C. On the separation of the tryptophan synthetase of Escherichia coli into two protein components. ). The separate α subunit catalyzes the α reaction (Equation 1). $Indole-3-glycerol phosphate←→indole +D-glyceraldehyde 3-phosphate$ (Eq. 1) The separate β subunit catalyzes the PLP-dependent β reaction (Equation 2). $Indole+L-serine→L-tryptophan+H2O$ (Eq. 2) The activity of each separate enzyme is increased by 30–100-fold when one subunit is mixed with the other subunit. The physiologically important reaction, the αβ reaction (Equation 3), is catalyzed only by the α2β2 complex. $Indole-3-glycerol phosphate+L-serine→ L-tryptophan+D-glyceraldehyde 3-phosphate+H2O$ (Eq. 3) The αβ reaction is essentially the sum of the α reaction and the β reaction. Indole is an intermediate in the αβ reaction. The finding that the reaction rates of the β subunit in the β reaction were increased by 30-fold by interaction with the α subunit suggested that this would be a good system for investigating the specificity and control of the catalytic site of a PLP enzyme. I started my work at NIH with generous help from Irving Crawford at the Scripps Clinic and Research Foundation in La Jolla, California. Irving sent me purified enzymes and E. coli strains for preparing the separate α and β subunits. Absorption spectra of the β subunit in the presence of l-serine showed that the β subunit converted l-serine to pyruvate (Equation 4). $L-Serine→pyruvae+ammonia$ (Eq. 4) The absorption spectra of the β subunit in the presence of l-serine and mercaptoethanol suggested that PLP was converted to pyridoxamine phosphate (PMP) by a transamination reaction. I learned from Irving Crawford that he and M. Hatanaka had earlier found evidence for a transamination reaction. However, they had been unable to publish their results because they had not identified the keto acid reaction product. Irving and I decided to collaborate on this problem. Fortunately, I was able to work with him in his laboratory at the Scripps Clinic and Research Foundation for three weeks in the summer of 1967 on my way to the International Congress of Biochemistry in Tokyo. My husband, Harry Todd Miles, worked with Leslie Orgel during the same time. This was a very productive three weeks for all of us. Irving and I carried out many essential experiments for a joint paper ( • Miles E.W. • Hatanaka M. • Crawford I.P. A new thiol-dependent transamination reaction catalyzed by the B protein of Escherichia coli tryptophan synthetase. ). We demonstrated that the β subunit converted l-serine and mercaptoethanol to S-hydroxyethyl-l-cysteine (Equation 5), as demonstrated previously for the α2β2 complex. $Mercaptoethanol+L-serine→ S-hydroxyethyl-L-cysteine+H2O$ (Eq. 5) We identified S-pyruvylmercaptoethanol and PMP as the products of the new transamination reaction (Equation 6). $PLP+L-serine+mercaptoethanol←→ S-pyruvylmercaptoethanol+PMP+H2O$ (Eq. 6) This transamination reaction is useful for removing PLP from the β subunit to give the apo-β subunit. The results in the article showed that the α subunit completely inhibits the serine deaminase reaction (Equation 4) and the transaminase reaction (Equation 6) but stimulates the β replacement reactions (Equations 2 and 5). Thus, tryptophan synthase is a promising system for studying how subunit interaction controls reaction rates and reaction specificity in a multienzyme complex. Irving Crawford was an important mentor to me and helped me to get my research at NIH off to a good start. Although we never published another paper together, we had extensive correspondence and enjoyed meeting each other at tryptophan conferences. Unfortunately, he died prematurely in 1989. Chemical Modification In some of my early work at NIH, I used chemical modification to identify key residues in the β subunit and to determine the effects of modification on enzymatic activities and subunit interaction. I found that 5,5′-dithiobis(2-nitrobenzoic acid) and N-ethylmaleimide modify two SH residues in the apo-β subunit and one SH residue in the holo-β subunit. Interestingly, modification of the holo-β subunit by N-ethylmaleimide stimulates activity in Equation 4 but inhibits activity in Equation 2 ( • Miles E.W. The B protein of Escherichia coli tryptophan synthetase. I. Effects of sulfhydryl modification on enzymatic activities and subunit interaction. ). We later modified an essential histidyl residue in the β subunit by photo-oxidation in the presence of PLP and l-serine and by reaction with diethylpyrocarbonate ( • Miles E.W. • Kumagai H. Modification of essential histidyl residues of the β2 subunit of tryptophan synthetase by photo-oxidation in the presence of pyridoxal 5′-phosphate and l-serine and by diethylpyrocarbonate. ). After this paper was published, I was invited to write a review for Methods in Enzymology on the modification of histidyl residues by diethylpyrocarbonate ( • Miles E.W. Modification of histidyl residues in proteins by diethylpyrocarbonate. ). This review has been highly cited. We later modified arginine and amino groups. Proteolysis and Protein Folding We discovered that limited proteolysis of the α2β2 complex by trypsin produced a single site of cleavage in the α subunit at Arg-188 and produced an active “nicked” α2β2 complex ( • Higgins W. • Fairwell T. • Miles E.W. An active proteolytic derivative of the α subunit of tryptophan synthase. Identification of the site of cleavage and characterization of the fragments. ). This report led to a productive collaboration with Katsuhide Yutani at the Institute for Protein Research of Osaka University in Japan. We found that guanidine hydrochloride induced stepwise unfolding of the α subunit. Parallel unfolding experiments with the two α subunit fragments provided evidence for the stepwise unfolding of the two α subunit domains ( • Miles E.W. • Yutani K. • Ogasahara K. Guanidine hydrochloride induced unfolding of the α subunit of tryptophan synthase and of the two α proteolytic fragments: evidence for stepwise unfolding of the two α domains. ). New Reactions of Tryptophan Synthase and Reaction Mechanism Hidehiko Kumagai determined the activities of the tryptophan synthase β subunit and the tryptophan synthase α2β2 complex in β elimination reactions and β replacement reactions with different amino acid substrates. The results provide evidence that the reactions proceed through a key common enzyme-bound aminoacrylic intermediate, as reported previously for the reactions of tryptophanase with the same substrates. The main difference between the reactions of tryptophan synthase and tryptophanase is the failure of tryptophan synthase to catalyze the formation of pyruvate and indole from l-tryptophan ( • Kumagai H. • Miles E.W. The B protein of Escherichia coli tryptophan synthetase. II. New β-elimination and β-replacement reactions. ). Later, Robert Phillips and I compared the inhibition of the tryptophan synthase α2β2 complex and tryptophanase by the tryptophan analogs oxindolyl-l-alanine and 2,3-dihydro-l-tryptophan, which have structures similar to the indolenine tautomer of l-tryptophan. Both compounds are very good competitive inhibitors of both enzymes. This result provides evidence that the indolenine tautomer of l-tryptophan is an intermediate in reactions catalyzed by both tryptophanase and tryptophan synthase ( • Phillips R.S. • Miles E.W. • Cohen L.A. Interactions of tryptophan synthase, tryptophanase, and pyridoxal phosphate with oxindolyl-l-alanine and 2,3-dihydro-l-tryptophan: support for an indolenine intermediate in tryptophan metabolism. ). Using fluorine NMR, we discovered two new, very slow reactions catalyzed by the tryptophan synthase α2β2 complex: the isomerization of 5-fluoro-l-tryptophan and (3S)-2,3-dihydro-5-fluoro-l-tryptophan ( • Miles E.W. • Phillips R.S. • Yeh H.J. • Cohen L.A. Isomerization of (3S)-2,3-dihydro-5-fluoro-l-tryptophan and of 5-fluoro-l-tryptophan catalyzed by tryptophan synthase: studies using fluorine-19 nuclear magnetic resonance and difference spectroscopy. ). Enzyme Purification and Crystallization Early studies of E. coli tryptophan synthase showed that it was made up of two proteins, which were partially separated by chromatography on DEAE-cellulose ( • Crawford I.P • Yanofsky C. On the separation of the tryptophan synthetase of Escherichia coli into two protein components. ). This was one of the first published uses of DEAE-cellulose. Subsequent studies of tryptophan synthase focused on the purification and characterization of the separate subunits from strains that contained only one of the two subunits. The α2β2 complex was then reconstituted from the isolated subunits. When Osao Adachi came to my laboratory, he wanted to purify the “native α2β2 complex” and to compare its properties with those of the α2β2 complex that was reconstituted from the isolated subunits. We were fortunate to receive from Charles Yanofsky a strain of E. coli that overproduced large amounts of both the α and β subunits. Adachi developed a five-step purification procedure that involved two chromatography steps on DEAE-Sephadex. Crystallization by ammonium sulfate in the final step yielded beautiful crystals with a uniform rod shape. The native and reconstituted α2β2 complexes exhibited identical absorption spectra, enzymatic activities, sedimentation velocity patterns, and bands on SDS gel electrophoresis ( • Kohn L.D. • Miles E.W. Crystalline α2β2 complexes of tryptophan synthetase of Escherichia coli. A comparison between the native complex and the reconstituted complex. ). Adachi also developed an improved procedure for preparing and crystallizing the β subunit ( • Miles E.W. A rapid method for preparing crystalline β2 subunit of tryptophan synthetase of Escherichia coli in high yield. ). Although we tried very hard to obtain crystals of the β subunit and α2β2 complex from E. coli that were large enough for x-ray crystallography, we did not succeed. Our later discovery that the Salmonella typhimurium α2β2 complex gives much better crystals than the E. coli enzyme is an example of serendipity. I was purifying the S. typhimurium enzyme for protein folding studies when I noticed that the enzyme crystallized in the fraction collection tubes. I suggested to a new postdoctoral fellow, Ashraf Ahmed, that he might try to grow crystals of the Salmonella enzyme for x-ray crystallography. After brief instructions on crystal growth from David Davies' laboratory, Ashraf grew excellent large crystals that diffracted well ( • Ahmed S.A. • Miles E.W. • Davies D.R. Crystallization and preliminary x-ray crystallographic data of the tryptophan synthase α2β2 complex from Salmonella typhimurium. ). We demonstrated that the enzyme was fully functional in the crystalline state by studies with microcrystals ( • Ahmed S.A. • Hyde C.C. • Thomas G. • Miles E.W. Microcrystals of tryptophan synthase α2β2 complex from Salmonella typhimurium are catalytically active. ). Microspectrophotometric studies on single crystals of the tryptophan synthase α2β2 complex demonstrated that substrates, substrate analogs, and reaction intermediate analogs form chromophoric intermediates with PLP at the active site of the β subunit ( • Mozzarelli A. • Peracchi A. • Rossi G.L. • Ahmed S.A. • Miles E.W. Microspectrophotometric studies on single crystals of the tryptophan synthase α2β2 complex demonstrate formation of enzyme-substrate intermediates. ). Ligands that bind to the α subunit alter the distribution of intermediates at the β site in both the soluble and crystalline states. X-ray Crystallography The three-dimensional structure of the tryptophan synthase α2β2 complex from S. typhimurium revealed for the first time the architecture of a multienzyme complex and the presence of an intramolecular tunnel (Fig. 5) ( • Hyde C.C. • Ahmed S.A. • Miles E.W. • Davies D.R. Three-dimensional structure of the tryptophan synthase α2β2 multienzyme complex from Salmonella typhimurium. ). The four polypeptide chains are arranged in a nearly linear αββα order and form a complex 150 Å long. The active site of the α subunit was located by the presence of a substrate analog, indolepropanol phosphate. The active site of the β subunit was located by the presence of the PLP coenzyme. The active sites of the α and β subunits are 25 Å apart and are connected by a remarkable hydrophobic tunnel. The tunnel is believed to provide a passageway for the diffusion of indole from the site of its production from indole-3-glycerol phosphate at the active site of the α subunit to the site of tryptophan synthesis at the active site of the β subunit (see (Eq. 1), (Eq. 2), (Eq. 3)). Intramolecular tunneling could prevent the escape of indole to the solvent during catalysis. The structure showed that the α subunit has an 8-fold αβ barrel fold. The PLP coenzyme is sandwiched between the two domains of the β subunit. The β subunit structure was the first structure reported for an enzyme in a class of PLP enzymes termed group II. Other enzymes in this class include O-acetylserine sulfhydrylase, cystathionine β-synthase, and threonine dehydratase. Additional crystal structures have been determined on wild-type and mutant forms of the S. typhimurium tryptophan synthase α2β2 complex and in the presence of substrates and analogs by Sangkee Rhee in David Davies' group and by Ilme Schlichting's group. These structures demonstrate the presence of a series of PLP intermediates at the active site of the β subunit and in the presence and absence of ligands at the active site of the α subunit. The structural results, combined with kinetic and spectroscopic studies, provide evidence for allosteric communication and open and closed sites in the α and β subunits (see a recent review by Dunn ( • Dunn M.F. Allosteric regulation of substrate channeling and catalysis in the tryptophan synthase bienzyme complex. )). Protein Engineering Identifies Key Residues While the initial crystal structure determination was under way, we developed methods of site-directed mutagenesis of tryptophan synthase from S. typhimurium. Our ability to make mutations in the S. typhimurium α2β2 complex would allow us to grow crystals of mutant forms of the S. typhimurium α2β2 complex as well as to characterize the mutant forms by spectroscopic and kinetic studies. After the structure was available, we mutated potentially important residues in the active sites of the α and β subunits and in the interaction site between the two subunits. Solution studies with nineteen mutants at α subunit Glu-49 showed that Glu-49 is essential for activity ( • Miles E.W. • McPhie P. • Yutani K. Evidence that glutamic acid 49 of tryptophan synthase α subunit is a catalytic residue. Inactive mutant proteins substituted at position 49 bind ligands and transmit ligand-dependent effects to the β subunit. ). Asp-60 is another catalytic residue in the α subunit ( • Nagata S. • Hyde C.C. • Miles E.W. The α subunit of tryptophan synthase. Evidence that aspartic acid 60 is a catalytic residue and that the double alteration of residues 175 and 211 in a second-site revertant restores the proper geometry of the substrate binding site. ). Cryocrystallography of the α2β2 complex with an α subunit D60N mutation with the true substrate revealed the correct orientation of α subunit active site Glu-49 ( • Rhee S. • Miles E.W. • Mozzarelli A. • Davies D.R. Cryocrystallography and microspectrophotometry of a mutant (αD60N) tryptophan synthase α2β2 complex reveals allosteric roles of αAsp60. ). Mutations of the β subunit clarified the roles of β subunit His-86, Lys-87, Arg-148, Cys-170, and Cys-230 ( • Miles E.W. • Kawasaki H. • Ahmed S.A. • Morita H. • Morita H. • Nagata S. The β subunit of tryptophan synthase. Clarification of the roles of histidine 86, lysine 87, arginine 148, cysteine 170, and cysteine 230. ). Crystal structures of the β subunit K87T mutant with ligands bound to the active sites of the α and β subunits revealed ligand-induced conformational changes ( • Rhee S. • Parris K.D. • Hyde C.C. • Ahmed S.A. • Miles E.W. • Davies D.R. Crystal structures of a mutant (βK87T) tryptophan synthase α2β2 complex with ligands bound to the active sites of the α- and β-subunits reveal ligand-induced conformational changes. ). We characterized several mutants in collaboration with Michael Dunn, including the β subunit E109D mutant ( • Brzović P.S. • Kayastha A.M. • Miles E.W. • Dunn M.F. Substitution of glutamic acid 109 by aspartic acid alters the substrate specificity and catalytic activity of the β-subunit in the tryptophan synthase bienzyme complex from Salmonella typhimurium. ). This mutant also has been used in studies of channeling (see below). We are especially interested in the functional role of a flexible loop (loop 6) in the α subunit because loop 6 contains the site of limited tryptic cleavage at Arg-188 that we discovered earlier ( • Higgins W. • Fairwell T. • Miles E.W. An active proteolytic derivative of the α subunit of tryptophan synthase. Identification of the site of cleavage and characterization of the fragments. ). Studies with the α subunit R179L mutant showed that loop 6 is important both for ligand binding to the α site and for the ligand-induced conformational change from an “open” to a “closed” structure ( • Brzović P.S. • Sawa Y. • Hyde C.C. • Miles E.W. • Dunn M.F. Evidence that mutations in a loop region of the α-subunit inhibit the transition from an open to a closed conformation in the tryptophan synthase bienzyme complex. ). Our results showed that Thr-183 in α subunit loop 6 also plays a role in allosteric regulation ( • Yang X.J. • Miles E.W. Threonine 183 and adjacent flexible loop residues in the tryptophan synthase α subunit have critical roles in modulating the enzymatic activities of the β subunit in the α2β2 complex. ). Channeling and Intersubunit Communication The tryptophan synthase α2β2 complex is thought to channel indole, which is an intermediate in the αβ reaction (Equation 3), from the active site of the α subunit to the active site of the β subunit. The three-dimensional structure of the tryptophan synthase α2β2 complex ( • Hyde C.C. • Ahmed S.A. • Miles E.W. • Davies D.R. Three-dimensional structure of the tryptophan synthase α2β2 multienzyme complex from Salmonella typhimurium. ) provides physical evidence for a 25-Å hydrophobic tunnel that connects the α and β subunits (Fig. 5). We probed the role of the tunnel in indole channeling and in intersubunit communication by kinetic characterization of wild-type and mutant forms of the tryptophan synthase α2β2 complex ( • Anderson K.S. • Miles E.W. • Johnson K.A. Serine modulates substrate channeling in tryptophan synthase. A novel intersubunit triggering mechanism. ). Rapid chemical quench flow experiments showed that indole is channeled rapidly and that l-serine at the β site increases the rate of indole-3-glycerol phosphate cleavage at the α site. A mutation at the β site (E109D) slows the reaction rate enough to permit detection of the indole intermediate in a single turnover in the αβ reaction. Rapid kinetic analyses of the α2β2 complex with mutations in the β site (C170F and C170W) designed to restrict the tunnel showed that the mutations interfere with efficient indole channeling such that indole can be detected in a single turnover ( • Anderson K.S. • Kim A.Y. • Quillen J.M. • Sayers E. • Yang X.J. • Miles E.W. Kinetic characterization of channel impaired mutants of tryptophan synthase. ). We also probed the indole tunnel by Nile Red fluorescence with wild-type, mutant, and chemically modified enzymes (Fig. 6) ( • Ruvinov S.B. • Yang X.J. • Parris K.D. • Banik U. • Ahmed S.A. • Miles E.W. • Sackett D.L. Ligand-mediated changes in the tryptophan synthase indole tunnel probed by Nile Red fluorescence with wild type, mutant, and chemically modified enzymes. ). The interaction of Nile Red in the nonpolar tunnel near Cys-170 and Phe-280 in the β site is supported by experiments with residues altered at these positions by mutation or chemical modification. The results of our experiments with Nile Red as a probe of conformational changes in the tunnel suggest that allosteric ligands and active site ligands induce a tunnel restriction near Phe-280 that controls the passage of indole. Thus, the rapid kinetic experiments and the experiments with Nile Red provide evidence for ligand-dependent intersubunit communication. Allosteric Regulation We and others have found that pH, temperature, cations, and α subunit ligands regulate the tryptophan synthase α2β2 complex by altering the equilibrium distribution of PLP intermediates with serine ( • Peracchi A. • Mozzarelli A. • Rossi G.L. Monovalent citations affect dynamic and functional properties of the tryptophan synthase α2β2 complex. , • Peracchi A. • Bettati S. • Mozzarelli A. • Rossi G.L. • Miles E.W. • Dunn M.F. Allosteric regulation of tryptophan synthase: effects of pH, temperature, and α-subunit ligands on the equilibrium distribution of pyridoxal 5′-phosphate-l-serine intermediates. , • Fan Y.X. • McPhie P. • Miles E.W. Regulation of tryptophan synthase by temperature, monovalent cations, and an allosteric ligand. Evidence from Arrhenius plots, absorption spectra, and primary kinetic isotope effects. ). The results support a model in which reaction conditions alter the equilibrium distribution between a low-activity open conformation and a high-activity closed conformation. The aminoacrylate PLP Schiff base is associated with the closed form and is stabilized by temperature, protons, and α subunit ligands. Status of Tryptophan Synthase Research Today I am gratified that investigations of tryptophan synthase continue to bring rich rewards. A recent review ( • Dunn M.F. Allosteric regulation of substrate channeling and catalysis in the tryptophan synthase bienzyme complex. ) summarizes the current status of research and presents evidence for the allosteric regulation of substrate channeling and catalysis in the tryptophan synthase α2β2 complex. The catalytic pathway is composed of a series of at least nine reactions involving PLP and l-serine at the β site and reactions with the indole ring. Advances in understanding the relationship between chemical events at the α and β sites and allosteric regulation have been greatly aided by the synthesis of substrate analogs and characterization of the interactions of these analogs with the α and β sites by x-ray crystallography and kinetic analyses ( • Dunn M.F. Allosteric regulation of substrate channeling and catalysis in the tryptophan synthase bienzyme complex. ). The most interesting of these structures involves the reaction of indoline at the α site with bound glyceraldehyde 3-phosphate. The combination of indole-3-glycerol phosphate binding to the α site and the reaction of l-serine to form aminoacrylate at the β site gives a crystal structure with both α and β sites closed. Dunn ( • Dunn M.F. Allosteric regulation of substrate channeling and catalysis in the tryptophan synthase bienzyme complex. ) concluded that ligand binding at the α and β sites and allosteric interactions acting over 25 Å choreograph the switching of the α and β subunits between low-activity open conformations and high-activity closed conformations. The conformational switches synchronize the activities of the α and β subunits and lead to efficient channeling of indole and overall catalysis. Mentors, Models, and Important Professional Contacts Herbert Tabor has been my most important mentor at NIH (Fig. 7). He was chief of the Laboratory of Biochemical Pharmacology when I joined the laboratory in 1966. He encouraged me to set my own course in independent research. Herb has always treated men and women equally and fairly. Several women in his laboratory have inspired me: Celia Tabor, Loretta Leive, Nancy Nossal, and Claude Klee. Other distinguished women scientists at NIH also have served as role models: Thressa Stadtman, Maxine Singer, Elizabeth Neufeld, Ruth Kirschstein, and Ann Ginsburg. I have enjoyed fruitful collaborations with several NIH scientists, including Robert Phillips, Peter McPhie, Allen Minton, David Davies, Craig Hyde, Ashraf Ahmed, Sangkee Rhee, and Boon Chock. Mentors and co-workers outside of NIH include Charles Yanofsky, Irving Crawford, Ronald Bauerle, Kasper Kirschner, Michael Goldberg, Michael Dunn, Karen Anderson, Kenneth Johnson, Katsuhide Yutani, Andrea Mozzarelli, and Ilme Schlichting. I developed many professional contacts at meetings, including Charles Yanofsky's tryptophan meetings at Stanford University and Asilomar and B6 meetings in Moscow, Leningrad, Finland, Japan, and Capri. I have visited Japan several times in connection with a research grant with Katsuhide Yutani and several meetings. I have had contacts with many Japanese scientists, including Kenji Soda, Hideaki Yamada, Hidehiko Kumagai, Hiroshi Wada, Osao Adachi, and Katsuyuki Tanizawa. Family and Activities I met my husband, Harry Todd Miles, at a Federation of American Societies for Experimental Biology (FASEB) meeting in Atlantic City in 1964. Todd was at the meeting with his research associate Frank Howard, whom I had known when we were both graduate students at UC-Berkeley. Todd was chief of the Organic Chemistry Section in the Laboratory of Molecular Biology at NIDDK. We married in 1966 and had two sons in 1969 and 1971. I thank Todd for being an excellent co-parent and for encouraging me to continue in research. We were fortunate to be able to enter our sons into the NIH preschool in 1974 a few months after it opened. Todd and I were both involved in the Parents of Preschoolers, Inc. (POPI), when it established parental governance of the preschool in 1975. Our sons attended Ayrlawn Elementary School and continued in the POPI afterschool program at Ayrlawn. All four of us drove together to our work and school in the morning and home together in the evening for seven years. Our sons are now well educated and have rewarding careers and families. Todd and I both retired on September 30, 2000, and are Scientists Emeritus. We enjoy classes and travel together and visits with our children and grandchildren. I am doing hand-building pottery at Glen Echo. I am active with the Bethesda Chapter of the Association for Women in Science as a mentor and program organizer. I work with my neighborhood association to help seniors continue to live in their homes (Aging in Place). Acknowledgments I thank my postdoctoral fellows, collaborators, friends, mentors, and family for inspiration and encouragement. I especially thank Herbert Tabor and NIH for providing research facilities and a good working environment. The noontime seminars and journal clubs in our laboratory had a strong influence on all of us. REFERENCES • Wilson E.M. • Snell E.E. Metabolism of α-methylserine. I. α-Methylserine hydroxymethyltransferase. J. Biol. Chem. 1962; 237: 3171-3179 • Wilson E.M. • Snell E.E. Metabolism of α-methylserine. II. Stereospecificity of α-methylserine hydroxymethyltransferase. J. Biol. Chem. 1962; 237: 3180-3184 • Wilson E.M. • Kornberg H.L. Properties of crystalline l-aspartate 4-carboxy-lyase from Achromobacter sp. Biochem. J. 1963; 88: 578-587 • Wilson E.M. • Meister A. Optical rotatory dispersion of l-aspartate β-decarboxylase and its derivatives. Biochemistry. 1966; 5: 1166-1174 • Miles E.W. • Meister A. The mechanism of the reaction of β-hydroxyaspartate with l-aspartate β-decarboxylase. A new type of pyridoxal 5′-phosphate-enzyme inhibition. Biochemistry. 1967; 6: 1734-1743 • Crawford I.P • Yanofsky C. On the separation of the tryptophan synthetase of Escherichia coli into two protein components. Proc. Natl. Acad. Sci. U.S.A. 1958; 44: 1161-1170 • Miles E.W. • Hatanaka M. • Crawford I.P. A new thiol-dependent transamination reaction catalyzed by the B protein of Escherichia coli tryptophan synthetase. Biochemistry. 1968; 7: 2742-2753 • Miles E.W. The B protein of Escherichia coli tryptophan synthetase. I. Effects of sulfhydryl modification on enzymatic activities and subunit interaction. J. Biol. Chem. 1970; 245: 6016-6025 • Miles E.W. • Kumagai H. Modification of essential histidyl residues of the β2 subunit of tryptophan synthetase by photo-oxidation in the presence of pyridoxal 5′-phosphate and l-serine and by diethylpyrocarbonate. J. Biol. Chem. 1974; 249: 2843-2851 • Miles E.W. Modification of histidyl residues in proteins by diethylpyrocarbonate. Methods Enzymol. 1977; 47: 431-442 • Higgins W. • Fairwell T. • Miles E.W. An active proteolytic derivative of the α subunit of tryptophan synthase. Identification of the site of cleavage and characterization of the fragments. Biochemistry. 1979; 18: 4827-4835 • Miles E.W. • Yutani K. • Ogasahara K. Guanidine hydrochloride induced unfolding of the α subunit of tryptophan synthase and of the two α proteolytic fragments: evidence for stepwise unfolding of the two α domains. Biochemistry. 1982; 21: 2586-2592 • Kumagai H. • Miles E.W. The B protein of Escherichia coli tryptophan synthetase. II. New β-elimination and β-replacement reactions. Biochem. Biophys. Res. Commun. 1971; 44: 1271-1278 • Phillips R.S. • Miles E.W. • Cohen L.A. Interactions of tryptophan synthase, tryptophanase, and pyridoxal phosphate with oxindolyl-l-alanine and 2,3-dihydro-l-tryptophan: support for an indolenine intermediate in tryptophan metabolism. Biochemistry. 1984; 23: 6228-6234 • Miles E.W. • Phillips R.S. • Yeh H.J. • Cohen L.A. Isomerization of (3S)-2,3-dihydro-5-fluoro-l-tryptophan and of 5-fluoro-l-tryptophan catalyzed by tryptophan synthase: studies using fluorine-19 nuclear magnetic resonance and difference spectroscopy. Biochemistry. 1986; 25: 4240-4249 • Kohn L.D. • Miles E.W. Crystalline α2β2 complexes of tryptophan synthetase of Escherichia coli. A comparison between the native complex and the reconstituted complex. J. Biol. Chem. 1974; 249: 7756-7763 • Miles E.W. A rapid method for preparing crystalline β2 subunit of tryptophan synthetase of Escherichia coli in high yield. J. Biol. Chem. 1974; 249: 5430-5434 • Ahmed S.A. • Miles E.W. • Davies D.R. Crystallization and preliminary x-ray crystallographic data of the tryptophan synthase α2β2 complex from Salmonella typhimurium. J. Biol. Chem. 1985; 260: 3716-3718 • Ahmed S.A. • Hyde C.C. • Thomas G. • Miles E.W. Microcrystals of tryptophan synthase α2β2 complex from Salmonella typhimurium are catalytically active. Biochemistry. 1987; 26: 5492-5498 • Mozzarelli A. • Peracchi A. • Rossi G.L. • Ahmed S.A. • Miles E.W. Microspectrophotometric studies on single crystals of the tryptophan synthase α2β2 complex demonstrate formation of enzyme-substrate intermediates. J. Biol. Chem. 1989; 264: 15774-15780 • Hyde C.C. • Ahmed S.A. • Miles E.W. • Davies D.R. Three-dimensional structure of the tryptophan synthase α2β2 multienzyme complex from Salmonella typhimurium. J. Biol. Chem. 1988; 263: 17857-17871 • Dunn M.F. Allosteric regulation of substrate channeling and catalysis in the tryptophan synthase bienzyme complex. Arch. Biochem. Biophys. 2012; 519: 154-166 • Miles E.W. • McPhie P. • Yutani K. Evidence that glutamic acid 49 of tryptophan synthase α subunit is a catalytic residue. Inactive mutant proteins substituted at position 49 bind ligands and transmit ligand-dependent effects to the β subunit. J. Biol. Chem. 1988; 263: 8611-8614 • Nagata S. • Hyde C.C. • Miles E.W. The α subunit of tryptophan synthase. Evidence that aspartic acid 60 is a catalytic residue and that the double alteration of residues 175 and 211 in a second-site revertant restores the proper geometry of the substrate binding site. J. Biol. Chem. 1989; 264: 6288-6296 • Rhee S. • Miles E.W. • Mozzarelli A. • Davies D.R. Cryocrystallography and microspectrophotometry of a mutant (αD60N) tryptophan synthase α2β2 complex reveals allosteric roles of αAsp60. Biochemistry. 1998; 37: 10653-10659 • Miles E.W. • Kawasaki H. • Ahmed S.A. • Morita H. • Morita H. • Nagata S. The β subunit of tryptophan synthase. Clarification of the roles of histidine 86, lysine 87, arginine 148, cysteine 170, and cysteine 230. J. Biol. Chem. 1989; 264: 6280-6287 • Rhee S. • Parris K.D. • Hyde C.C. • Ahmed S.A. • Miles E.W. • Davies D.R. Crystal structures of a mutant (βK87T) tryptophan synthase α2β2 complex with ligands bound to the active sites of the α- and β-subunits reveal ligand-induced conformational changes. Biochemistry. 1997; 36: 7664-7680 • Brzović P.S. • Kayastha A.M. • Miles E.W. • Dunn M.F. Substitution of glutamic acid 109 by aspartic acid alters the substrate specificity and catalytic activity of the β-subunit in the tryptophan synthase bienzyme complex from Salmonella typhimurium. Biochemistry. 1992; 31: 1180-1190 • Brzović P.S. • Sawa Y. • Hyde C.C. • Miles E.W. • Dunn M.F. Evidence that mutations in a loop region of the α-subunit inhibit the transition from an open to a closed conformation in the tryptophan synthase bienzyme complex. J. Biol. Chem. 1992; 267: 13028-13038 • Yang X.J. • Miles E.W. Threonine 183 and adjacent flexible loop residues in the tryptophan synthase α subunit have critical roles in modulating the enzymatic activities of the β subunit in the α2β2 complex. J. Biol. Chem. 1992; 267: 7520-7528 • Anderson K.S. • Miles E.W. • Johnson K.A. Serine modulates substrate channeling in tryptophan synthase. A novel intersubunit triggering mechanism. J. Biol. Chem. 1991; 266: 8020-8033 • Anderson K.S. • Kim A.Y. • Quillen J.M. • Sayers E. • Yang X.J. • Miles E.W. Kinetic characterization of channel impaired mutants of tryptophan synthase. J. Biol. Chem. 1995; 270: 29936-29944 • Ruvinov S.B. • Yang X.J. • Parris K.D. • Banik U. • Ahmed S.A. • Miles E.W. • Sackett D.L. Ligand-mediated changes in the tryptophan synthase indole tunnel probed by Nile Red fluorescence with wild type, mutant, and chemically modified enzymes. J. Biol. Chem. 1995; 270: 6357-6369 • Peracchi A. • Mozzarelli A. • Rossi G.L. Monovalent citations affect dynamic and functional properties of the tryptophan synthase α2β2 complex. Biochemistry. 1995; 34: 9459-9465 • Peracchi A. • Bettati S. • Mozzarelli A. • Rossi G.L. • Miles E.W. • Dunn M.F. Allosteric regulation of tryptophan synthase: effects of pH, temperature, and α-subunit ligands on the equilibrium distribution of pyridoxal 5′-phosphate-l-serine intermediates. Biochemistry. 1996; 35: 1872-1880 • Fan Y.X. • McPhie P. • Miles E.W. Regulation of tryptophan synthase by temperature, monovalent cations, and an allosteric ligand. Evidence from Arrhenius plots, absorption spectra, and primary kinetic isotope effects. Biochemistry. 2000; 39: 4692-4703
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 6, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.7241365909576416, "perplexity": 21441.257561208786}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2023-06/segments/1674764494974.98/warc/CC-MAIN-20230127065356-20230127095356-00155.warc.gz"}
http://docs.php.net/manual/it/book.com.php
# COM and .Net (Windows) ### User Contributed Notes 7 notes robert dot johnson at icap dot com 2 years ago (From?) PHP 5.4.5 for Windows:If you are seeing this in your error log:Fatal error:  Class 'COM' not found You require this in php.ini:[PHP_COM_DOTNET]extension=php_com_dotnet.dllPreviously it was compiled as built-in on the Windows build.  I assume this has happened because the com extension can now be built as a shared component. acsandeep at gmail dot com 5 years ago If you are trying to get the properties of a Word document opened via COM object, you may need to define some constants in your script like so. <?php define('wdPropertyTitle', 1); define('wdPropertySubject', 2); define('wdPropertyAuthor', 3); define('wdPropertyKeywords', 4); define('wdPropertyComments', 5); define('wdPropertyTemplate', 6); define('wdPropertyLastAuthor', 7); $word = new COM("word.application") or die ("Could not initialise MS Word object.");$word->Documents->Open(realpath("Sample.doc")); $Author =$word->ActiveDocument->BuiltInDocumentProperties(wdPropertyAuthor); echo $Author; ?> Anonymous 5 years ago Add hyperlink at a Word Document's bookmark <?php // Create COM instance to word function clsMSWord($Visible = false)        {            $this->handle = new COM("word.application") or die("Unable to instanciate Word");$this->handle->Visible = $Visible; } function WriteHyperlink($Bookmark,$Path,$Text)        {                $objBookmark =$this->handle->ActiveDocument->Bookmarks($Bookmark);$range = $objBookmark->Range;$objHyperlink = $this->handle->ActiveDocument->Hyperlinks;$objHyperlink->add($range,$Path,"","",$Text); } ?> Dave Bachtel 5 years ago Hello everybody!Here is some helpful advice for people attempting to use COM with Microsoft MapPoint 2006 or 2009 with PHP.If you are using apache, it MUST be running under the same credentials as a desktop user that has already installed/run mappoint or modifiy the service and select the "Allow Service to Interact with Desktop" option. Otherwise, it won't work due to the EULA popup having to be accepted. Mappoint 2004 works just fine, this only applies to 2006 and 2009.For troubleshooting, the error that appears in the System event viewer is:The server {31851F82-AFE6-11D2-A3C9-00C04F72F340} did not register with DCOM within the required timeout.The error generated by PHP, if you happen to get lucky and let the COM() function timeout by cranking up set_time_limit() timeout is:<?phpset_time_limit(1000) ;$mapoint =  new COM("MapPoint.Application") or die("Unable to instantiate Mappoint COM object");?>Generates:Fatal error: Uncaught exception 'com_exception' with message 'Failed to create COM object MapPoint.Application': Server execution failed ' in [somefile.php]:9 Stack trace: #0 [somefile.php](9): com->com('MapPoint.Applic...') #1 {main} thrown in [somefile.php] on line 9Also, if you have multiple versions of MapPoint installed, you will need to run:c:\path\to\mappoint\MapPoint.exe /registerserver (using the correct folder path) to select the right version of the com object to use.  Gooood luck! Hello people! I'm trying to use COM class for LDAP, something like : $ADSI = new COM("LDAP:");$user = $ADSI->OpenDSObject("LDAP://".$server."/".$newuser_dn,$adminuser, $adminpassword, 1);$user->SetPassword($newuser_password);$user->SetInfo();But  it not conect.....don't change the password and I'm need that this works....Help me!!! When you work with MS Excel, Word, ... and other applications, never forget that COM doesn’t know their predefined constants, thus if you want to do sth. that would look like this in VB:With myRange.Borders(xlEdgeBottom)    .LineStyle = xlContinuous    .Weight = xlThin    .ColorIndex = xlAutomaticEnd Withyou should either use numbers or define constants above, like this:<?phpdefine('xlEdgeBottom', 9);define('xlContinuous', 1);define('xlThin', 2);define('xlAutomatic', -4105);$ex = new COM("Excel.Application", NULL, CP_UTF8) or Die ("Did not instantiate Excel");$wb = $ex->Application->Workbooks->Add();$ws = $wb->Worksheets(1);$xra = $ws->Range("A1:A6");$bs = $xra->Borders(xlEdgeBottom);$bs->LineStyle = xlContinuous;$bs->Weight = xlThin;$bs->ColorIndex = xlAutomatic;?>It is pointless to try to use text strings, i.e.<?php$bs =$xra->Borders('xlEdgeBottom');$bs->Weight = 'xlThin';?>this will cause an error: Unable to set property Weight of class Border ... -2 ilayansmano at gmail dot com 6 years ago Extracting text from Word Documents via PHP and COM <?php$word = new COM("word.application") or die ("Could not initialise MS Word object."); $word->Documents->Open(realpath("Sample.doc")); // Extract content.$content = (string) $word->ActiveDocument->Content; echo$content; $word->ActiveDocument->Close(false);$word->Quit(); $word = null; unset($word); ?> `
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.5591112971305847, "perplexity": 28849.870016371504}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-52/segments/1418802769990.68/warc/CC-MAIN-20141217075249-00027-ip-10-231-17-201.ec2.internal.warc.gz"}
http://www.ck12.org/physics/Transfer-of-Electric-Charge/enrichment/Separation-and-Transfer-of-Charge-Example-3/
<img src="https://d5nxst8fruw4z.cloudfront.net/atrk.gif?account=iA1Pi1a8Dy00ym" style="display:none" height="1" width="1" alt="" /> # Transfer of Electric Charge ## Introduction to conduction, friction, and polarization. Estimated6 minsto complete % Progress Practice Transfer of Electric Charge MEMORY METER This indicates how strong in your memory this concept is Progress Estimated6 minsto complete % Separation and Transfer of Charge - Example 3 ### Explore More Sign in to explore more, including practice questions and solutions for Transfer of Electric Charge.
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8387398719787598, "perplexity": 28252.244865629604}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-04/segments/1484560280835.60/warc/CC-MAIN-20170116095120-00248-ip-10-171-10-70.ec2.internal.warc.gz"}
http://mathoverflow.net/questions/129257/a-question-about-the-proof-of-beilinson-bernstein-localisation
A question about the proof of Beilinson-Bernstein localisation I'm trying to understand the proof of the Beilinson-Bernstein localisation theorem at the moment, but there's just one point where I'm having a mental block, and was wondering if anybody could clarify things for me. Specifically, it's this (not quite general) theorem which I'm trying to prove: Let $G$ be a semisimple algebraic group over $\mathbb{C}$, $B$ a Borel subgroup and $X=G/B$ the flag variety. Let $\mathfrak{g}$ be the Lie algebra of $G$, $\mathfrak{b}$ the Lie algebra of $B$, $\mathfrak{h}$ the Cartan subalgebra contained in $\mathfrak{b}$, $\mathfrak{b}=\mathfrak{h}\oplus\mathfrak{n}$ ($\mathfrak{n}$ nilpotent), $\mathfrak{g}=\mathfrak{n}^-\oplus\mathfrak{h}\oplus\mathfrak{n}$ (you get the picture). Writing $U(\mathfrak{g})$ for the universal enveloping algebra of $\mathfrak{g}$ we have $U(\mathfrak{g})=U(\mathfrak{h})\oplus(\mathfrak{n}^-U(\mathfrak{g})+U(\mathfrak{g})\mathfrak{n})$, by PBW. Let $\lambda:B\to \mathbb{C}^\times$ be a character, and let $\mathcal{L}^\lambda$ denote the $G$-equivariant invertible sheaf on $X$ with fiber $\mathbb{C}^{-\lambda}$ at $eG$. That is, $B$ acts on the trivial $\mathbb{C}^{-\lambda}$-bundle $G\times\mathbb{C}^{-\lambda}$ by $b(g,m)=(gb^{-1},(-\lambda)(b)m)=(gb^{-1},\lambda(b^{-1})m)$, and $B\backslash G\times(\mathbb{C}^{-\lambda})$ is a $G$-equivariant $\mathbb{C}^{-\lambda}$-bundle on $X$; $\mathcal{L}^\lambda$ is then its sheaf of sections. Since $\mathcal{L}^\lambda$ is $G$-equivariant we obtain a homomorphism $\alpha^\lambda:U(\mathfrak{g})\to\Gamma(X,\mathcal{D}_X^\lambda)$ where $\mathcal{D}_X^\lambda=\mathcal{L}^\lambda\otimes\mathcal{D}_X\otimes\mathcal{L}^{-\lambda}$ is the sheaf of differential operators on $\mathcal{L}^\lambda$. (tensor products taken over ${\mathcal{O}_X}$). Then, what I would like to prove is that the restriction of $\alpha_\lambda$ to the centre $Z(\mathfrak{g})$ of $U(\mathfrak{g})$ factors through the character $\chi_\lambda$ (i.e. the map $Z(\mathfrak{g})\to U(\mathfrak{h})$ coming from the direct sum decomposition above, composed with the map $\lambda:U(\mathfrak{h})=\operatorname{Sym}(\mathfrak{h})\to\mathbb{C}$). Of course, this isn't the whole theorem, but it's the only part I'm having trouble with. I believe I am correct in thinking that $\mathcal{L}^\lambda$ is nothing more than the pushforward from $G$ of a certain subsheaf of $\mathcal{O}_G$, namely the one whose sections $f$ are those satisfying $f(gb)=\lambda(b)f(g)$ for $g\in G,b\in B$. So it should be enough to show that $Z(\mathfrak{g})$ acts on that in the right way. (For some reason, I find the action of $\mathfrak{g}$ on $\mathcal{O}_G$ much easier to think about than its action on $\mathcal{L}^\lambda$.) But I am really stuck. I've looked in the book "D-modules, Perverse Sheaves and Representation Theory" by Hotta et al., where they seem to prove this on pages 278-279, but only found it confusing (and they gave the wrong definition of the Harish-Chandra homomorphism, which I found off-putting). I've also looked in Gaitsgory's notes (http://www.math.harvard.edu/~gaitsgde/267y/catO.pdf) where he seems to prove this (at least, the case $\lambda=0$) on pages 42-43, but that's also confusing. What's worse, is that apparently Gaitsgory's proof makes no use of the algebraic geometry of $\mathfrak{g}$, whereas Hotta et al. appear the Springer resolution of the nilpotent cone in an important way. - One suggestion is to be more careful about the formulation of the Harish-Chandra homomorphism, due to the varying notational conventions of your sources. The shift by $-\rho$ is sometimes hidden, but is essential. –  Jim Humphreys May 1 '13 at 14:11 Good point - I am keeping track of my rhos though, I believe! I was mistaken when I said the Hotta et al. book had an error, I just misread is all. –  user30576 May 2 '13 at 5:21 The universal enveloping algebra can be identified with right $G$-invariant differential operators on the group $G$, via the map sending an element of the Lie algebra to the corresponding left translation vector field (and vice versa with left and right switched). In particular, the center of $U(\mathfrak{g})$ is given by bi-invariant differential operators on the group, in two different ways. If I want to understand how elements of the center act on functions that satisfy $f(gb)=\lambda(b)f(g)$, then I should write them as $z=h(z) + n_1m_1+ \cdots+n_km_k$ where $m_i\in U(\mathfrak{n})\mathfrak{n}$ which is possible by the PBW theorem and the fact that central elements have weight 0 (here $h\colon Z(\mathfrak{g})\to U(\mathfrak{h})$ is the Harish-Chandra homomorphism). Thus, $z\cdot f= d\lambda (h(z)) f$; here I'm using $d\lambda$ to distinguish between characters of the group and Lie algebra. Ok, I'm basically done, but I cheated a little here, since here I was using the map of the center to bi-invariant operators for the right action, and you really want the one that comes from the left action, which a priori might be different. Let me be lazy, and note that we've now shown that the map of the center factors through some character, and that this is induced by some ring map $Z(\mathfrak{g}) \to U(\mathfrak{h})$ (maybe not the HC homomorphism). Thus, it suffices to check it at a Zariski dense set of points; this follows from Borel-Weil, since we know how the center acts on the sections of $\mathcal{L}^\lambda$.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9684513807296753, "perplexity": 108.93286304150391}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-23/segments/1405997888972.38/warc/CC-MAIN-20140722025808-00227-ip-10-33-131-23.ec2.internal.warc.gz"}
https://cnrs.hal.science/LAMA_UMR8050/search/index/?q=producedDateY_i%3A2009&submitType_s=file
Search - Archive ouverte HAL Access content directly 51 Results Deposit type : Fulltext ### On mean discounted numbers of passage times in small balls of Ito processes observed at discrete times [Research Report] RR-6813, 2009, pp.19 Reports ### On mean discounted numbers of passage times in small balls of Ito processes observed at discrete times Electronic Communications in Probability, 2009, 14, pp.19 Journal articles ### Penalized nonparametric drift estimation in a continuous time one-dimensional diffusion process 2009 Preprints, Working Papers, ... ### Cyclic and ruled Lagrangian surfaces in complex Euclidean space Bulletin Brazilian Mathematical Society, 2009, 40 (3), pp.341-369. ⟨10.1007/s00574-009-0015-y⟩ Journal articles ### Extremal domains for the first eigenvalue of the Laplace-Beltrami operator Annales de l'Institut Fourier, 2009, ⟨10.5802/aif.2438⟩ Journal articles ### A Bernstein type inequality and moderate deviations for weakly dependent sequences 2009 Preprints, Working Papers, ... ### Optimal length estimates for stable CMC surfaces in $3$-space forms Proceedings of the American Mathematical Society, 2009, 137, pp.2761--2765. ⟨10.1090/S0002-9939-09-09885-2⟩ Journal articles ### Weak order for the discretization of the stochastic heat equation. Mathematics of Computation, 2009, 78 (266), pp.845-863 Journal articles ### Well-posedness of the spatially homogeneous Landau equation for soft potentials Journal of Functional Analysis, 2009, 256 (8), pp.2542-2560. ⟨10.1016/j.jfa.2008.11.008⟩ Journal articles ### Optimal double stopping time 2009 Preprints, Working Papers, ... ### State estimation in quantum homodyne tomography with noisy data Inverse Problems, 2009, 25 (1), pp.22 Journal articles ### Phase-space analysis and pseudodifferential calculus on the Heisenberg group 2009 Preprints, Working Papers, ... ### On the well-posedness of the spatially homogeneous Boltzmann equation with a moderate angular singularity Communications in Mathematical Physics, 2009, 289 (3), pp.803-824. ⟨10.1007/s00220-009-0807-3⟩ Journal articles ### On multifractality and time subordination for continuous functions Advances in Mathematics, 2009, 220 (3), pp.936-963 Journal articles ### Random walk in quasi-periodic random environment Stochastics and Dynamics, 2009, 9 (1), pp.47-70 Journal articles ### One-dimensional finite range random walk in random medium and invariant measure equation Annales de l'Institut Henri Poincaré (B) Probabilités et Statistiques, 2009, 45 (1), pp.70-103 Journal articles ### Large Deviations for Statistics of the Jacobi Process Stochastic Processes and their Applications, 2009, 119 (2), pp.518--533 Journal articles ### Une nouvelle caractérisation des sphères géodésiques dans les espaces modèles Comptes Rendus. Mathématique, 2009, 347 (19-20), pp.1197-1200 Journal articles ### Optimal multiple stopping time problem 2009 Preprints, Working Papers, ... ### Multiple recurrence for two commuting transformations 2009 Preprints, Working Papers, ... ### Singular limit of a two-phase flow problem in porous medium as the air viscosity tends to zero 2009 Preprints, Working Papers, ... ### A direct proof of the functional Santalo inequality Comptes Rendus. Mathématique, 2009, 347, pp.55 - 58. ⟨10.1016/j.crma.2008.11.015⟩ Journal articles ### Pointwise smoothness of space-filling functions Applied and Computational Harmonic Analysis, 2009, 26 (2), pp.181-199 Journal articles ### Penalized nonparametric drift estimation in a continuous time one-dimensional diffusion process 2009 Preprints, Working Papers, ... ### Estimation of second order parameters using probability-weighted moments 2009 Preprints, Working Papers, ... ### A combined finite volume-finite element scheme for the discretization of strongly nonlinear convection-diffusion-reaction problems on nonmatching grids Numerical Methods for Partial Differential Equations, 2009, 26 (3), pp.612-646. ⟨10.1002/num.20449⟩ Journal articles istex ### Modelling track geometry by a bivariate Gamma wear process, with application to maintenance xx. Risk and Decision Analysis in Maintenance Optimization and Flood Management, IOS Press, Delft, pp.123--136, 2009 Book sections ### Schémas volumes finis multipoints pour grilles non orthogonales Mathématiques générales [math.GM]. Université Paris-Est, 2009. Français. ⟨NNT : 2009PEST1048⟩ Theses ### A CLASS OF COLLOCATED FINITE VOLUME SCHEMES FOR INCOMPRESSIBLE FLOW PROBLEMS 18th Conference on Scientific Computing, Mar 2009, Podbanské, Slovakia Conference papers ### Minimization of a Quasi-linear Ginzburg-Landau type energy Nonlinear Analysis: Theory, Methods and Applications, 2009, 71 (no 3-4), p. 860-875 Journal articles
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.762234091758728, "perplexity": 11016.972706160435}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2023-14/segments/1679296949573.84/warc/CC-MAIN-20230331051439-20230331081439-00509.warc.gz"}
https://zbmath.org/?q=an:1063.34052
zbMATH — the first resource for mathematics Examples Geometry Search for the term Geometry in any field. Queries are case-independent. Funct* Wildcard queries are specified by * (e.g. functions, functorial, etc.). Otherwise the search is exact. "Topological group" Phrases (multi-words) should be set in "straight quotation marks". au: Bourbaki & ti: Algebra Search for author and title. The and-operator & is default and can be omitted. Chebyshev | Tschebyscheff The or-operator | allows to search for Chebyshev or Tschebyscheff. "Quasi* map*" py: 1989 The resulting documents have publication year 1989. so: Eur* J* Mat* Soc* cc: 14 Search for publications in a particular source with a Mathematics Subject Classification code (cc) in 14. "Partial diff* eq*" ! elliptic The not-operator ! eliminates all results containing the word elliptic. dt: b & au: Hilbert The document type is set to books; alternatively: j for journal articles, a for book articles. py: 2000-2015 cc: (94A | 11T) Number ranges are accepted. Terms can be grouped within (parentheses). la: chinese Find documents in a given language. ISO 639-1 language codes can also be used. Operators a & b logic and a | b logic or !ab logic not abc* right wildcard "ab c" phrase (ab c) parentheses Fields any anywhere an internal document identifier au author, editor ai internal author identifier ti title la language so source ab review, abstract py publication year rv reviewer cc MSC code ut uncontrolled term dt document type (j: journal article; b: book; a: book article) Eigenvalue and stability of singular differential delay systems. (English) Zbl 1063.34052 The author is concerned with the relationship between eigenvalues and stability for linear delay differential algebraic equations with constant coefficients and of the form $$E\dot x(t)=Ax(t)+Bx(t-\tau)$$ with a singular matrix $E$. It is shown that if the matrix pencil $(A,E)$ is regular and $BEE^d=EE^dB$ (where $E^d$ denotes the Drazin inverse), the familiar results including the exponential estimate remain valid. MSC: 34K06 Linear functional-differential equations 34K20 Stability theory of functional-differential equations 34A09 Implicit equations, differential-algebraic equations Full Text: References: [1] Hale, J. K.: Introduction to functional differential equations. (1992) · Zbl 0785.35050 [2] Zheng, Z.: Theory of functional differential equations. (1994) [3] Wei, J.: The degenerate differential systems with delay. (1998) · Zbl 0938.34068 [4] Wei, J.; Zheng, Z.: The general solution for the degenerate differential system with delay. Acta math. Sinica 42, 769-780 (1999) · Zbl 1024.34051 [5] Wei, J.; Zheng, Z.: The constant variation formula and the general solution of degenerate neutral differential systems. Acta math. Appl. sinica 21, 562-570 (1998) · Zbl 0962.34060 [6] Wei, J.; Zheng, Z.: The general solution of degenerate difference systems with delay. J. math. Study 31, 44-50 (1998) · Zbl 0926.39003 [7] Wei, J.; Zheng, Z.: The algebraic criteria for the all-delay stability of two-dimensional degenerate differential systems with delay. Chinese quart. J. math. 13, 87-93 (1998) · Zbl 0938.34068 [8] Wei, J.; Zheng, Z.: On the degenerate differential systems with delay. Ann. differential equations 14, 204-211 (1998) · Zbl 0967.34060 [9] Wei, J.; Wang, Z.: The controllability of singular control systems. J. hunan univ. 26, 6-9 (1999) · Zbl 0967.93010 [10] Wei, J.; Zheng, Z.: The solvability of the degenerate differential systems with delay. Chinese quart. J. math. 15, 1-7 (2000) · Zbl 0992.34044 [11] Wei, J.; Zheng, Z.: The V-functional method for the stability of degenerate differential systems with delays. Ann. differential equations 17, 10-20 (2001) · Zbl 0995.34067 [12] Wei, J.; Song, W. Z.; Fei, S.: The function-controllability of the nonlinear control systems with state and control delay. (2000) · Zbl 0964.93012 [13] Dugard, L.; Verriest, E. I.: Stability and control of time-delay systems. (1998) · Zbl 0901.00019 [14] Jie, C.: On computing the maximal delay intervals for stability of linear delay systems. IEEE transl. Automat. control 40, 1087-1093 (1995) · Zbl 0840.93074 [15] Mao, W.; Sun, Y.; Cao, Y.: Output feedback stabilization of linear systems with time-varying delayed states and controls. Control theory appl. 15, 972-974 (1998) [16] Cheng, C.; Sun, Y.: Designing of the state feedback stabilizing controller of uncertain dynamic systems with time-varying delays. Acta automat. Sinica 24, 81-84 (1998) [17] Dai, L.: Singular control systems. (1989) · Zbl 0669.93034 [18] Xu, S.; Yang, C.: Stabilization of discrete-time singular systems: a matrix inequalities approach. Automatica 35, 1613-1617 (1999) · Zbl 0959.93048 [19] Wei, J.; Zheng, Z.; Xu, J.: The exponential estimation of the solution of degenerate differential system with delay. J. math. 21, No. 4, 425-428 (2001) · Zbl 0999.34067 [20] Wei, J.; Song, W. Z.: Controllability of singular systems with control delay. Automatica 37, 1873-1877 (2001) · Zbl 1058.93012 [21] Grispos, E.; Kalogeropoulos, G.; Stratis, I. G.: On generalized linear singular delay systems. J. math. Anal. appl. 245, 430-446 (2000) · Zbl 0955.34065 [22] Zhou, X.; Wei, J.: Eigenvalue distribution of degenerated NFDE with delay. Math. appl. 15, 48-51 (2002) · Zbl 1024.34071 [23] Wei, J.: Variation formula of time varying singular delay differential systems. J. math. 24, 161-166 (2003) · Zbl 1040.34075
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.7731718420982361, "perplexity": 8265.585748370415}, "config": {"markdown_headings": false, "markdown_code": false, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2016-18/segments/1461860127407.71/warc/CC-MAIN-20160428161527-00221-ip-10-239-7-51.ec2.internal.warc.gz"}
https://brilliant.org/problems/an-algebra-problem-by-dominick-hing/
# An algebra problem by Dominick Hing Algebra Level pending How many times do the graphs of $$y\quad =\quad x$$ and $$y\quad =\quad { x }^{ 2 }$$ intersect? ×
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.29054638743400574, "perplexity": 5040.943590655504}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-04/segments/1484560280730.27/warc/CC-MAIN-20170116095120-00252-ip-10-171-10-70.ec2.internal.warc.gz"}
http://mathhelpforum.com/algebra/66265-rule-sim-eqs.html
# Math Help - Rule of sim eqs? 1. ## Rule of sim eqs? Hello. When eliminating the x or ys is their a rule for which you take awy from each other when the signs are different. example equation A B becomes C D Do you always take the larger number away? always take the C away from D or vice versa? or is it determined by the multipliers used in A nd B originally? C-D or D-C or does it change? Thanks Roger 2. Originally Posted by rogerroger Hello. When eliminating the x or ys is their a rule for which you take awy from each other when the signs are different. example equation A B becomes C D Do you always take the larger number away? always take the C away from D or vice versa? or is it determined by the multipliers used in A nd B originally? C-D or D-C or does it change? Thanks Roger It depends. For examply: x+y = 5 (1) x-y = 4. (2) If you wanted to get rid of the xs, then you subtract 1 from 2, OR subtract 2 from 1 since both xs are positive in both equations. When you have two positive numbers, and you want to get 0, then you have to subtract one from the other. To get rid of the ys you have to ADD (1) to (2), OR subtract (2) from (1), because one is positive and one is negative. When you have 1 negative number, one positive number, you have to subtract the negative from the positive, or add the negative to the positive. In another example: x-y = 5 (1) -x-y = 4 (2) Here, if you want to get rid of the xs, you use the rules from above! But if you want to get rid of ys, then you have to subtract (1) from (2), OR (2) from (1), since they are both negative. And indeed, if you had two equations in which x and y have different coefficients, then you multiply both or one of the equations by whatever is necessary to get a common coefficient. 3. Don't memorize "rules"- think about what you are doing. If I see the equations, x+ 2y= 4 and 3x- 2y= 5, I would notice that "y" in the first equation has coefficient 2 and "y" in the second has coefficient -2 and so if I add, 2y+ (-2y)= 0, I eliminate the "y". Adding the two equations gives x+ 3x+ 2y- 2y= 4+ 5 or 4x= 9. If I had different numbers I could multiply by other numbers perhaps in order to be able to eliminate a variable. But the important thing is to think about the result I want to achieve, a single equation with only one variable, rather than think about a "rule" I have to follow. Mathematics has a lot of rules it is easy to follow blindly. But mathematics is about ideas, not rules.
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8620871305465698, "perplexity": 472.4277189395008}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-15/segments/1398223206647.11/warc/CC-MAIN-20140423032006-00225-ip-10-147-4-33.ec2.internal.warc.gz"}
http://blogs.scienceforums.net/swansont/archives/category/tyagfiti
## Archive for the 'TYAGFITI' Category Does the Dog Die? Do you turn off Old Yeller before the end so you can pretend that he lived a long and happy life? Did a cute pet on a movie poster make you think it would be a fun comedy but it turned out to be a pet-with-a-terminal-illness tearjerker instead? Are you unable to enjoy the human body count in a horror movie because you’re wondering whether the dog’s going to kick the bucket? Have you ever Googled “Does the [dog/cat/horse/Klingon targ] die in [movie title]?” I imagine this is useful if you are trying to avoid tears and wailing in young ‘uns. Funny thing is that there are movies where concern for an animal’s well-being is far from the main focus, and might be considered a little strange, like The Godfather. Or there’s Indiana Jones and the Last Crusade, which has a happy face because Indiana the dog is OK, but acknowledges that rats are torched and birds collide with a plane. ### More Tubespawn Another Twitter-cartoon marriage made possible by dem tubes: @Peanutweeter @Peanutweeter matches kinda random Twitter posts with somewhat less than random Peanuts® comic strips by Charles Schulz. Genius. 140 characters fits nicely into a cartoon balloon. While I want to carry Stephen Hawking on my back, like “Master Blaster”. He’s the brains, and I’m the brawn until I get tired or he pees on me. is funny, it’s even better when Linus says it, gesturing maniacally. ### And Al Saw That it was Good For this to make sense, you need to see the xkcd cartoon “Heaven” (as usual, click to go there so you can see the hover tag) This being the internet (thank you, Al Gore), someone went and made this into a real game (hey, what were the odds?) Every so often, a large piece comes along and fits into the existing landscape. The cartoon is basically the opposite of Hell, of course, which also has a “playable” version ### Overheard in the Lab of the Day A colleague was whistling the marching tune one hears in The Bridge in on the River Kwai, and after I asked him if we were suddenly in the British Army, I wondered what the name of the song was. Luckily, there’s a way to find such things out, called the internet (which is apparently a series of tubes.) Turns out it’s called The Colonel Bogey March, and the Wikipedia entry implies it has some interesting (not G-rated) lyrics, which it does. ### NonOedipal Snakes On a NonOedipal Plane. But here’s the thing: if someone asked me five minutes ago what tangled-up snakes demented checkerboards and crazy twisty surfaces have in common, what would you have answered? This is why I love mathematics. The moment when you realize that something seemingly arbitrary and confusing is actually part of something. It’s better than the cleverest possible ending to any crime show or mystery novel, because that’s only the beginning. There’s more wonderful doodling at her website, or on youtube. All full of math-y goodness, puns, denunciation of poor teaching, stop-action video and a remarkable lack of commas. (Does she ever inhale?) You can show these to people, and it’s possible they won’t know they learned some math. Sneaky. ### Chirp Finally signed up for “The Twitter.” When I first learned of it, I thought it would just be a compendium of noise, since the threshold to tweet is so low. And this is precisely why I don’t do Facebook very much — I am just not all that interested in the level of minutia of my friend’s lives, and I shudder to think they are that interested in mine (or feel that they’re missing out because I don’t post such trivialities very often). But today I found out that Steve Martin is tweeting, so I signed up to follow that. I don’t want it to be a collection of “Boy, I could use more fiber in my diet” or “De-linting my belly button!” tweets. On the other hand, I do have these random thoughts, which I occasionally blog. That’s the kind of stupid stuff I’ll probably tweet. Probably. I understand it’s protocol to follow those who follow you, but … no. I’m not going to return the favor in order to be a statistic, or even to be polite. I am a physicist, and have no social skills. Follow only if you have some slight possible interest in the content. ### Restore Truthiness, Enhance Teachiness Any of you who watch The Colbert Report might have seen a snippet last week, in which Colbert mentioned a reddit-led push to have him hold a “Restore Truthiness” rally, as a rebuttal to Glenn Beck’s recent trip to the Lincoln Memorial. Well, since online petitions are easy, they are trying to show sincerity by putting their money where there mouth is, via a cause that is supported by several of us in the physics/science blogging community.. See, anyone can join a reddit or Facebook group or sign a petition. It takes, like, one minute and doesn’t demonstrate much effort. So the rally movement has been looking for ways to show that they’re serious, that they’re willing to lift a finger to make this happen. And an idea has just been hatched: pony up some cash to one of Stephen’s favorite charities. Stephen Colbert is a board member of a non-profit called DonorsChoose.org. It’s a place where schoolteachers can make a request for the supplies they need and aren’t getting. As the name suggests, donors get to choose which specific teacher they want to support (lazy donors can just let the charity decide). If “Restore Truthiness” can raise a large sum of money, it will be a fantastic show of strength. And even if it fails as a publicity stunt, it’ll still make a difference in our world. Restoring Truthiness Giving Page (More than 2,000 donors and $80,000 raised as of writing this. Wow.) Update: 3300 donors, and over$135k at ~1730 EDT, obliterating their goal of \$101,010 by 10/10/10. In a day. Holy crap. Update II: Colbert responds I almost had a pregnant when I saw what you had done at DonorsChoose.org for classrooms around the country. I am humbled and honored (a rare combination for me), and find myself wishing there was a Look of Approval. ### Transmogrify Your Life Calvin & Hobbes search engine It’s not perfect, because sometimes scientific progress goes, “Boink! ### The Thing Ted Stevens Wasn't Talking about A series of tubes: A live map of the London Underground (which is not a political movement; I checked) This map shows all trains on the London Underground network in approximately real time. The yellow pins are stations (click for a local map of that station), the red pins trains. TYAGFITI (Thank you, Al Gore, for inventing the internet)
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.27148711681365967, "perplexity": 3151.3713340271847}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-17/segments/1492917122174.32/warc/CC-MAIN-20170423031202-00118-ip-10-145-167-34.ec2.internal.warc.gz"}