text
stringlengths
14
5.77M
meta
dict
__index_level_0__
int64
0
9.97k
import logging import os import pexpect import unittest import exceptions import tempfile import sys import re class TestError(exceptions.Exception): """Raised when a test fails """ def __init__(self, result): self.result = result class LoggerWriter(object): def __init__(self): pass def write(self, data): print(data.rstrip()) def flush(self): #no op self def spawn(command): child = pexpect.spawn(command) child.logfile_read = LoggerWriter() return child def sinan(command): def check_accepts(f): def new_f(*args, **kwds): print("Running Command %s in %s" % (command, os.getcwd())) self = args[0] ebin = "" with open(os.path.join(self.sinan_dir, "sinan.config"), "r") as fl: data = fl.read() vsn = re.search(r"""{project_vsn, "(.+)"}""", data).group(1) ebin = os.path.join(self.sinan_dir, "_build", "sinan", "lib", "sinan-" + vsn, "ebin") child_cmd = ("erl -noshell -pa %s " " -s sinan manual_start" " -s sinan main" " -extra %s" % (ebin, command)) print child_cmd child = spawn(child_cmd) res = f(self, child, *(args[1:]), **kwds) print("Finished %s successfully" % command) return res new_f.func_name = f.func_name return new_f return check_accepts class AppDesc(object): def __init__(self, user_name=None, email=None, copyright_holder=None, project_name=None, project_version=None, app_names=None): self.user_name = user_name self.email = email self.copyright_holder = copyright_holder self.project_name = project_name self.project_version = project_version self.app_names = app_names def run_tests(class_obj): cases = unittest.defaultTestLoader.loadTestsFromTestCase(class_obj) result = unittest.TextTestRunner().run(cases) if len(result.errors) > 0 or len(result.failures) > 0: raise TestError(result) class SmokeTest(unittest.TestCase): def get_project_root(self, cwd): return os.path.abspath(cwd) def setUp(self): self.release_name = None self.release_version = None self.smokedir = tempfile.mkdtemp(prefix='smoke_test_') self.current_dir = os.getcwd() self.sinan_dir = self.current_dir sys.path.append(self.current_dir) os.chdir(self.smokedir) def tearDown(self): os.chdir(self.current_dir) def assert_dirs_exist(self, base, *dirs): for d in dirs: check_dir = "" if type(d) == list: check_dir = os.path.join(base, *d) else: check_dir = os.path.join(base, d) self.assertTrue(os.path.isdir(check_dir)) def assert_files_exist(self, base, *files): for f in files: check_file = "" if type(f) == list: check_file = os.path.join(base, *f) else: check_file = os.path.join(base, f) self.assertTrue(os.path.isfile(check_file)) def do_apply(self, fun_list, arg): res = arg for n in fun_list: f = getattr(self, n) res = f(res) return res @sinan("gen") def run_gen(self, child, appdesc): child.expect("your name> ") child.sendline(appdesc.user_name) child.expect("your email> ") child.sendline(appdesc.email) child.expect('copyright holder \("%s"\)> ' % appdesc.user_name) child.sendline() child.expect('project name> ') child.sendline(appdesc.project_name) child.expect('project version> ') child.sendline(appdesc.project_version) child.expect('Please specify the ERTS version \(".*"\)> ') child.sendline() child.expect('Is this a single application project \("n"\)> ') child.sendline() child.expect("app> ") child.sendline(appdesc.app_names[0]) for n in appdesc.app_names[1:]: child.expect('app \(""\)> ') child.sendline(n) child.expect('app \(""\)> ') child.sendline() child.expect('\("y"\)> ') child.sendline() child.expect("Project was created, you should be good to go!") child.expect(pexpect.EOF) return appdesc def verify_gen(self, a): projdir = os.path.join(os.getcwd(), a.project_name) self.assert_dirs_exist(projdir, "config", "lib") self.assert_files_exist(projdir, ["config", "sys.config"], "sinan.config") for n in a.app_names: ppath = os.path.join(projdir, "lib", n) self.assert_dirs_exist(ppath, "ebin", "src", "include", "doc") self.assert_files_exist(ppath, ["src", n + "_app.erl"], ["src", n + "_sup.erl"], ["src", n + ".app.src"]) return a # gen a new project in the test dir def do_gen(self, appdesc): return self.do_apply(["run_gen", "verify_gen"], appdesc) def build_validate(self, child, appdesc): child.expect(pexpect.EOF) build_tmp = self.get_build_root_path() build_dir = os.path.join(*(build_tmp)) self.assertTrue(build_dir) for n in appdesc.app_names: app_dir = os.path.join(build_dir, "lib", "%s-0.1.0" % n) print app_dir self.assert_dirs_exist(app_dir, "ebin", "src", "include", "doc") self.assert_files_exist(app_dir, ["src", n + "_sup.erl"], ["src", n + "_app.erl"], ["ebin", n + "_sup.beam"], ["ebin", n + "_app.beam"]) return appdesc # build the project @sinan("build") def do_build(self, child, appdesc): return self.build_validate(child, appdesc) # clean the project @sinan("clean") def do_clean(self, child, appdesc): child.expect(pexpect.EOF) self.assertTrue(not os.path.isdir(os.path.join(os.getcwd(), "_build"))) return appdesc # test the project @sinan("test") def do_t(self, child, appdesc): child.expect(pexpect.EOF) return appdesc # release @sinan("release") def do_release(self, child, appdesc): child.expect(pexpect.EOF) version = appdesc.project_version name = appdesc.project_name build_tmp = self.get_build_root_path() build_tmp.append("releases"), build_tmp.append(version) version_dir = os.path.join(*build_tmp) print("Checking version directory at %s " % version_dir) self.assert_files_exist(version_dir, "%s.boot" % name, "%s.rel" % name, "%s.script" % name, "sys.config") return appdesc # dist (check the tarball) @sinan("dist") def do_dist(self, child, appdesc): child.expect(pexpect.EOF) build_tmp = self.get_release_root_path() build_tmp.append("tar") build_tmp.append("%s-%s.tar.gz" % (appdesc.project_name, appdesc.project_version)) tar_file = os.path.join(*build_tmp) print tar_file self.assertTrue(os.path.isfile(tar_file)) return appdesc def do_run(self, appdesc): self.current_app_desc = appdesc a = self.do_gen(appdesc) self.project_dir = os.path.join(self.smokedir, a.project_name) os.chdir(os.path.join(self.project_dir)) self.do_apply(["do_build", "do_clean", "do_build", "do_t", "do_release", "do_dist"], a) def get_build_root_path(self, project_dir=None, release_name=None, release_version=None): release_root = self.get_release_root_path(project_dir) if not release_name and not self.release_name: release_name = self.current_app_desc.project_name elif not release_name: release_name = self.release_name if not release_version and not self.release_version: release_version = self.current_app_desc.project_version elif not release_version: release_version = self.release_version release_root.append(release_name) return release_root def get_release_root_path(self, project_dir=None): if not project_dir: project_dir = self.project_dir return [project_dir, "_build"]
{ "redpajama_set_name": "RedPajamaGithub" }
1,886
Q: typeerror: string indices must be integer pandas datareader I want to print out SPY's stock data however it keeps shows typeerror: string indices must be integer import pandas_datareader.data as web spy = web.get_data_yahoo('SPY',start='2022-12-23',end='2022-10-24') print(spy) A: I would solve for this way: import pandas from pandas_datareader import data as pdr import yfinance as yfin yfin.pdr_override() spy = pdr.get_data_yahoo('SPY', start='2022-10-24', end='2022-12-23') print(spy) I think it will work.
{ "redpajama_set_name": "RedPajamaStackExchange" }
8,696
The World Behind the Veil: What Listening to Women Taught This Man nietzschesbreeches — June 29, 2014 This is a guest piece by Mazen Abdallah, an ex-Muslim, comedian, and teacher. He is an American of Syrian-Lebanese origin who lives in Lebanon, and would like to talk about how reading the Ex-Hijabi Photo Journal–which you should go check out if you haven't already–has influenced his perception of the culture around him, and the drastic differences in the ways female and male bodies are perceived. I've never really asked veiled women or ex-veiled women about their experiences with the veil. There are a number of reasons for that. The first is that I come from a culture where the veil was totally normal. So asking someone about it would be really weird, it'd be like asking why someone wears shoes. The second reason was that I assumed I knew the story already. To me, there were two categories: Women who were forced to veil and women who did so by choice. I never really thought past it at all. Over time I saw the nuances more and more, but for some reason I didn't really ask anyone for the full story. I debated the veil's societal role, I passionately argued with people about the rights of women, but I never stopped and asked a woman 'Hey, what is/was it like for you to wear a veil'. Even when I thought I was this progressive, cultured guy advocating the rights and critiquing a society that would curtail the freedoms of women, I didn't make an effort to actually understand the lives of women who had worn the veil for any reason whatsoever. To me, it boiled down to 'someone is forcing you to do something that you do not want to do' and it became this basic matter of personal freedoms. But there was so much more that I wasn't seeing. The fact is, many women develop a complex relationship with the veil because it represents so many different things: identity, family, spirituality, personal development. It was so much more than either doing something or not doing it. First of all I realized that, a lot of the time, it wasn't necessarily forced upon the children by their parents. Some women decided to wear it as part of a philosophical decision in their exploration of Islam. Some were emotionally blackmailed, pressured by their families and their communities. Some came into contact with pro-veil ideology. Others wore it to fit in. That's one of the things I learned: The veil means different things to different people. But one common narrative came about as a result of it. I realized how much emphasis was placed by Islamic culture on conservative dress and being presentable in a certain way. Every kid is forced to do things by their parents. Like, put on this sweater before you go outside, do your homework, etc. At the end of the day, that's what parents do, they put their feet down. So if you think about it that way, maybe the veil isn't so bad. But when I started reading Ex Hijabi Fashion, I realized that parents don't just walk in, hand the kid a scarf and tell them they're wearing it now. They're giving them a philosophy, an ideology. They're telling their girls that they need to cover themselves up, to be modest, to avoid attracting attention from boys. In some cases they'll get in the heads of these girls and make them feel shame because of their bodies. I was forced to do a great many things when I was a kid. I'm a grown-ass man and my mom still puts her foot down. But I was never made to feel conscious of my body or exposing it. I never really looked that much into the veil. To me, it was about covering up your parts so that men wouldn't be tempted by you. And once the veil came off, boom, not religious anymore, not veiled anymore, problem solved, let's move on. But the women I read about on Ex-Hijabi fashion had gone through so much more than covering up. They had been made to look at their own bodies in a shameful way. To feel self-conscious and uncomfortable in their own skin. Even some that had veiled of their own volition would start to feel this way about themselves. We all have body issues, hell, I have a bunch of my own. But I never felt this need or desire to cover myself up and obscure a part of me. Like, I probably should. I'm overweight, and fairly conscious about my man-tits, but past that I have like no problem taking off my clothes. Even if I'm in company, I end up taking off my shirt or (if I've been drinking) at some point my pants and I have no problem with it. Obviously guys have a threshold for that sort of thing so I'm eventually asked to put my clothes right back on, but past that I don't really mind having them off. And I realize, I've felt embarrassment about my body plenty of times, like you would with a house you haven't properly cleaned up. But I've never really felt shame. I've never really felt that it was wrong of me to expose my body. I laughed like a madman every time I made a dick of myself in public in a way that involved nudity, and it just didn't matter. And hell, a lot of guys I know were also like that, whipping their dicks out for comic effect or mooning each other. We never had anyone tell us we should be ashamed of our bodies. The veil isn't the problem here, the problem is the culture that the veil emerges from. What surprised me in some cases was that the family wouldn't really be particular about the veil, but they would have their own strict set of modesty rules that shamed women. You didn't need to have a veil on to feel shame. Ultimately my eyes were opened to the diverse range of experiences women faced with the veil. It opened my eyes to the way that being asked to cover up and be modest, demure and conservative affected them and changed their outlook. It opened my eyes to the fact that veiling sometimes had little to do with their families but had more to do with their own body image or ideology. And I think, before we start talking about the veil and what it means in society and who can wear it and 'oh look at this fatwa', we should maybe ask women what they think. Giant thanks to Mazen! If you've been wondering where I've been these past couple of weeks, I've been focusing a lot of my energies on the Ex-Hijabi Fashion Photo Journal. Regular posts to resume shortly. I love you all! -Marwa/Hiba Why I now have a 'Donate' button on this blog. The Etiology of Cultural Hegemony I don't oppose the hijab because I was forced; I oppose the hijab because it sucks nietzschesbreeches GenderHuman Rights Women's Rights vs Anti- Muslim Bigotry: An Unfortunate Tension GenderHuman RightsUncategorized Honor Violence, Qandeel Baloch, and the Straw that Broke my Heart
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
8,955
Joe Mixon NFL Winners and Losers: Joe Burrow is all the way back and the Bengals' future is bright American football quarterback It was lost amid Justin Herbert's great finish to last season, but Joe Burrow was practically even with Herbert before his knee injury. And we've been reminded this season how hard it can be for rookie quarterbacks to play well. Burrow validated the Cincinnati Bengals' decision to take him first overall in the first half of last season, but the problem was that knee injury. Burrow tore multiple ligaments, making his return less than certain. He'd be back on the field but there was no guarantee he'd ever be the same. Ask a Washington fan about Robert Griffin III sometime. Those concerns have been erased. Not that there was any lingering doubt after Burrow's solid start to the season, but it was obvious Burrow was all the way back on his first touchdown in a 41-10 rout of the Pittsburgh Steelers. He rolled to his left and took off running, made some moves in the open field and got into the end zone. It was his first rushing touchdown this season. He didn't look like a quarterback coming off a serious knee injury. The questions about the Bengals coming into the season weren't limited to Burrow's health. Zac Taylor was 6-25-1 through his first two seasons as head coach. The Bengals had some talent but it was hard to predict big things for them. It's the Bengals, after all. A lot of those doubts are being erased too. Cincinnati looks like a team on the rise. The Bengals are in great position to make the playoffs and maybe win the division. There has been some inconsistency, but a few strong performances as well. The Bengals have already blown out division rivals Baltimore and Pittsburgh this season. They swept the Steelers and were clearly the better team in both games. The skill-position talent is fantastic. Ja'Marr Chase is a rookie of the year candidate. Tee Higgins is good too; he had 114 yards and a touchdown against the Steelers. Joe Mixon had a monster game Sunday. He became the first running back in 20 years to rush for 100 yards in the first half against the Steelers, according to ESPN Stats and Info. Mixon finished with 165 yards rushing and two touchdowns. The Bengals are going to get better in future years, too. They can focus resources on the offensive line, which is an area of potential growth, and reinforcing a defense that has been much better than expected. And Burrow will get better too. He is throwing too many interceptions this season. Part of that might be playing behind a below-average line. He's also a very young quarterback who will continue to improve and cut out mistakes. Burrow makes enough great plays, like a nice 32-yard touchdown to Higgins on Sunday, that there's very little question he's going to be a good quarterback for many years. A foundation is in place for Cincinnati to be very good. We've seen what they're capable of in the wins over the Ravens and Steelers. The biggest question for any NFL team is having a quarterback in place. The Bengals couldn't be sure they had that quarterback due to Burrow's devastating knee injury last season. But that box is checked, again. Joe Burrow of the Cincinnati Bengals celebrates after running for a touchdown against the Steelers. (Photo by Justin Casterline/Getty Images) Here are the winners and losers from Week 12 of the NFL season: Joe Barry: Barry, the Green Bay Packers' defensive coordinator, had an interesting plan against the Los Angeles Rams. They rarely blitzed. It worked. Matthew Stafford struggled as the Packers sat back in coverage, Cooper Kupp had his quietest day of the season and the Packers defense made the key play of the game with a pick-six late in the third quarter of a big 36-28 win. Barry has had a good season running the Packers defense. That side of the ball has dealt with key injuries and they have continued to play well most weeks. Barry has a lot to do with that. The Packers' offense is good as usual and the defense might be a reason Green Bay takes the next step this postseason. Sunday's win was a huge step in the Packers' quest to get the No. 1 seed in the NFC and a bye. Patrick Surtain II: The Denver Broncos' decision to pick Surtain over Justin Fields will be questioned as long as Fields develops and the Broncos don't find an answer at quarterback. However, one thing that won't be argued is that Surtain is a very good player. Surtain had two key interceptions, one in the end zone and another that went off Austin Ekeler's hands that Surtain picked and returned for a touchdown to seal the Broncos' 28-13 win over the Los Angeles Chargers. The Broncos have been up and down but they're hanging around in the playoff race at 6-5. Surtain's fine rookie season has played a role in that. Tampa Bay Buccaneers defense: When the Buccaneers brought back all their starters from the Super Bowl championship team, that wasn't just Tom Brady and his friends on offense. The Bucs have a championship defense and that's what saved them in a 38-31 win over the Indianapolis Colts on Sunday. The Colts had all the momentum, leading 24-14. Then Shaq Barrett had a strip-sack of Carson Wentz, and that was a turning point. Another huge play was safety Antoine Winfield making a great interception on a deep pass to Michael Pittman Jr. For most of the game the Bucs' elite run defense turned NFL rushing leader Jonathan Taylor into a non-factor. The offense did enough, especially on its final drive. Brady slowly marched the Bucs downfield, and then Leonard Fournette broke a 28-yard touchdown run, his fourth touchdown of the game, with 20 seconds left.The offense did its part. But it was the defense that gave the Bucs a chance to put together that game-winning drive. New York Jets: Sunday's win by the Jets won't help their draft standing, but it's always good for a young team to experience winning. Yes, it was just the Houston Texans that the Jets beat 21-14 on Sunday, but it's still a win. Zach Wilson made a few plays, the defense that has been bad this season tightened up after giving up two early touchdowns, and the Jets improved to 3-8. It's not a win that will go in the time capsule, but it still matters. Maybe the Jets can pick up another win or two before the season is done, against teams better than the Texans. Cordarrelle Patterson: Patterson may be the most surprising player in the NFL this season. He was always a good kickoff returner but never did a ton on offense. The Atlanta Falcons beat the Jacksonville Jaguars 21-14, and Patterson was the key player. He had 102 yards rushing, 27 receiving and scored two touchdowns. Patterson was with four teams his first eight seasons and never had much impact on offense. At age 30, he has finally emerged as a fantastic offensive threat. Kirk Cousins: Cousins had his chances to lead the Minnesota Vikings to a win on Sunday. He just couldn't get it done. Aside from the embarrassing moment in which Cousins lined up under right guard for a key fourth-and-goal in the fourth quarter, causing the Vikings to call a timeout, he misfired frequently late in the game. On the final drive, with a chance to at least tie, Cousins threw wildly on third and fourth down and the San Francisco 49ers hung on to a 34-26 win. Minnesota's offense scored just one touchdown on five second-half possessions in a winnable game. One of those possessions ended with a bad Cousins interception deep in Vikings territory that the 49ers turned into a short touchdown. Cousins has had a good season. He gets criticized too often, and fans don't remember the times he actually has led the Vikings back to a win. But he didn't play well enough on Sunday. The Panthers' playoff outlook: It's hard to fault the Carolina Panthers for believing they could be a playoff team. They started 3-0. They traded for cornerbacks C.J. Henderson and Stephon Gilmore, being aggressive in what looked like a promising season. The hope for the playoffs is fading fast. The Panthers fell to 5-7 with a bad 33-10 loss at the Miami Dolphins. Cam Newton looked really bad. Newton was 3 of 15 with two interceptions in the first half. He was benched in the fourth quarter, with a 5.8 passer rating. His return to the Panthers has been a fun story, but Sunday's game renews questions about what he has left as a passer. The Panthers could still turn it on and make the playoffs. An expanded playoff field keeps hope alive. But it has been a long time since they've looked like a playoff team. Jalen Hurts: Hurts can play. He has had enough good games for the Philadelphia Eagles to prove that. The problem is that his bad games are really bad. Sunday was one. Hurts threw three interceptions and had less than 100 passing yards in a 13-7 loss against the New York Giants. It was an ugly performance. The worst of the interceptions came near the goal line at the end of the first half, when Hurts rolled out and tossed it right to Giants linebacker Tae Crowder. It's hard to tell which Eagles receiver Hurts was even throwing to. The Eagles still had a shot to win, but on fourth down Jalen Reagor dropped a pass in his hands near the goal line. Reagor is a certified first-round bust, but Eagles fans knew that already. The big question the rest of the way is Hurts and his future with the team. Hurts has probably earned the chance to be the Eagles starter in 2022. But that's not guaranteed, especially if he has more games like Sunday before the season is over. Titans offensive coaches: The Titans' best skill-position player is Dontrell Hilliard. Let that sink in. Hilliard was out of football for two months before Tennessee signed him to its practice squad in late October. It's nearly impossible to figure out how the Titans can generate offense. Julio Jones, A.J. Brown and Derrick Henry are all on injured reserve. The offense was top heavy coming into the season and paper thin without those three stars. Hilliard had a nice game but the Titans as a whole did little in a 36-13 loss to the New England Patriots. The Titans are still in fine shape in the AFC North, but it'll be a struggle each week to score points. The Bergen Record NFL playoff picks: Our expert predictions for every game against the spread Throughout the NFL season and playoffs, our staff across the USA TODAY NETWORK Atlantic Region makes picks for every game against the spread. 'Good chance' 49ers get first-round pick for Jimmy Garoppolo, Peter King says If the 49ers look to trade Jimmy Garoppolo in the offseason, Peter King believes there is a "good chance" they can get a first-round pick in return.
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
624
#ifndef __UIRICHTEXT_H__ #define __UIRICHTEXT_H__ #include "ui/UIWidget.h" #include "ui/GUIExport.h" NS_CC_BEGIN /** * @addtogroup ui * @{ */ namespace ui { /** *@brief Rich text element base class. * It defines the basic common properties for all rich text element. */ class CC_GUI_DLL RichElement : public Ref { public: /** *@brief Rich element type. */ enum class Type { TEXT, IMAGE, CUSTOM }; /** * @brief Default constructor. * @js ctor * @lua new */ RichElement(){}; /** * @brief Default destructor. * @js NA * @lua NA */ virtual ~RichElement(){}; /** * @brief Initialize a rich element with different arguments. * * @param tag A integer tag value. * @param color A color in @see `Color3B`. * @param opacity A opacity value in `GLubyte`. * @return True if initialize success, false otherwise. */ bool init(int tag, const Color3B& color, GLubyte opacity); protected: Type _type; int _tag; Color3B _color; GLubyte _opacity; friend class RichText; }; /** *@brief Rich element for displaying text. */ class CC_GUI_DLL RichElementText : public RichElement { public: /** *@brief Default constructor. * @js ctor * @lua new */ RichElementText(){_type = Type::TEXT;}; /** *@brief Default destructor. * @js NA * @lua NA */ virtual ~RichElementText(){}; /** * @brief Initialize a RichElementText with various arguments. * * @param tag A integer tag value. * @param color A color in Color3B. * @param opacity A opacity in GLubyte. * @param text Content string. * @param fontName Content font name. * @param fontSize Content font size. * @return True if initialize scucess, false otherwise. */ bool init(int tag, const Color3B& color, GLubyte opacity, const std::string& text, const std::string& fontName, float fontSize); /** * @brief Create a RichElementText with various arguments. * * @param tag A integer tag value. * @param color A color in Color3B. * @param opacity A opacity in GLubyte. * @param text Content string. * @param fontName Content font name. * @param fontSize Content font size. * @return RichElementText instance. */ static RichElementText* create(int tag, const Color3B& color, GLubyte opacity, const std::string& text, const std::string& fontName, float fontSize); protected: std::string _text; std::string _fontName; float _fontSize; friend class RichText; }; /** *@brief Rich element for displaying images. */ class CC_GUI_DLL RichElementImage : public RichElement { public: /** * @brief Default constructor. * @js ctor * @lua new * */ RichElementImage(){_type = Type::IMAGE;}; /** * @brief Default destructor. * @js NA * @lua NA */ virtual ~RichElementImage(){}; /** * @brief Initialize a RichElementImage with various arguments. * * @param tag A integer tag value. * @param color A color in Color3B. * @param opacity A opacity in GLubyte. * @param filePath A image file name. * @return True if initialize success, false otherwise. */ bool init(int tag, const Color3B& color, GLubyte opacity, const std::string& filePath); /** * @brief Create a RichElementImage with various arguments. * * @param tag A integer tag value. * @param color A color in Color3B. * @param opacity A opacity in GLubyte. * @param filePath A image file name. * @return A RichElementImage instance. */ static RichElementImage* create(int tag, const Color3B& color, GLubyte opacity, const std::string& filePath); protected: std::string _filePath; Rect _textureRect; int _textureType; friend class RichText; }; /** *@brief Rich element for displaying custom node type. */ class CC_GUI_DLL RichElementCustomNode : public RichElement { public: /** * @brief Default constructor. * @js ctor * @lua new */ RichElementCustomNode(){_type = Type::CUSTOM;}; /** * @brief Default destructor. * @js NA * @lua NA */ virtual ~RichElementCustomNode(){CC_SAFE_RELEASE(_customNode);}; /** * @brief Initialize a RichElementCustomNode with various arguments. * * @param tag A integer tag value. * @param color A color in Color3B. * @param opacity A opacity in GLubyte. * @param customNode A custom node pointer. * @return True if initialize success, false otherwise. */ bool init(int tag, const Color3B& color, GLubyte opacity, Node* customNode); /** * @brief Create a RichElementCustomNode with various arguments. * * @param tag A integer tag value. * @param color A color in Color3B. * @param opacity A opacity in GLubyte. * @param customNode A custom node pointer. * @return A RichElementCustomNode instance. */ static RichElementCustomNode* create(int tag, const Color3B& color, GLubyte opacity, Node* customNode); protected: Node* _customNode; friend class RichText; }; /** *@brief A container for displaying various RichElements. * We could use it to display texts with images easily. */ class CC_GUI_DLL RichText : public Widget { public: /** * @brief Default constructor. * @js ctor * @lua new */ RichText(); /** * @brief Default destructor. * @js NA * @lua NA */ virtual ~RichText(); /** * @brief Create a empty RichText. * * @return RichText instance. */ static RichText* create(); /** * @brief Insert a RichElement at a given index. * * @param element A RichElement type. * @param index A given index. */ void insertElement(RichElement* element, int index); /** * @brief Add a RichElement at the end of RichText. * * @param element A RichElement instance. */ void pushBackElement(RichElement* element); /** * @brief Remove a RichElement at a given index. * * @param index A integer index value. */ void removeElement(int index); /** * @brief Remove specific RichElement. * * @param element A RichElement type. */ void removeElement(RichElement* element); /** * @brief Set vertical space between each RichElement. * * @param space Point in float. */ void setVerticalSpace(float space); /** * @brief Rearrange all RichElement in the RichText. * It's usually called internally. */ void formatText(); //override functions. virtual void ignoreContentAdaptWithSize(bool ignore) override; virtual std::string getDescription() const override; CC_CONSTRUCTOR_ACCESS: virtual bool init() override; protected: virtual void adaptRenderers() override; virtual void initRenderer() override; void pushToContainer(Node* renderer); void handleTextRenderer(const std::string& text, const std::string& fontName, float fontSize, const Color3B& color, GLubyte opacity); void handleImageRenderer(const std::string& fileParh, const Color3B& color, GLubyte opacity); void handleCustomRenderer(Node* renderer); void formarRenderers(); void addNewLine(); protected: bool _formatTextDirty; Vector<RichElement*> _richElements; std::vector<Vector<Node*>*> _elementRenders; float _leftSpaceWidth; float _verticalSpace; }; } // end of ui group /// @} NS_CC_END #endif /* defined(__UIRichText__) */
{ "redpajama_set_name": "RedPajamaGithub" }
9,452
{"url":"http:\/\/simulx.webpopix.org\/simulx\/userGuide\/regression.html","text":"R script: regression.R\n\n\n# 1 Introduction\n\nA regression variable is a variable $$x$$ which is a given function of time, which is not defined in the model but which is used in the model.\n\nA regression variable is defined in the R script as a vector $$(x_1, x_2, \\ldots, x_m)$$ together with a vector of times $$(t_1, t_2, \\ldots, t_m)$$, where $$x_j=x(t_j)$$ is the value of $$x$$ at time $$t_j$$.\nThen, this regression variable is used as an input of the Mlxtran code.\n\n$$x$$ is only defined at time points $$t_1, t_2, \\ldots, t_m$$ but $$x$$ is a function of time that should be defined for any $$t$$. Then, Mlxtran defines the function $$x$$ by intepolating the given values $$(x_1, x_2, \\ldots, x_m)$$. In the current version of Mlxtran, interpolation is performed by using the last given value:\n\n$x(t) = x_j \\quad \\text{for} \\ \\ t_j \\leq t < t_{j+1}$\n\n# 2 Examples\n\n## 2.1 Example 1\n\nConsider a Emax model where the effect $$E(t)$$ at time $$t$$ is function of the concentration $$C(t)$$: $E(t) = E_{\\rm max} \\frac{C(t)}{EC_{50} + C(t)}$ Assume that $$C$$ is given at times $$t_1, t_2, \\ldots , t_m$$. We therefore use $$C$$ as a regression variable in the Mlxtran code model\/regression1a.txt\n\n[LONGITUDINAL]\ninput = {Emax, EC50, C}\nC = {use=regressor}\n\nEQUATION:\nE = Emax*C\/(C+EC50)\n\n\nA regression variable is defined as an input argument regression of simulx. It is a list with three elements: name, time and value.\n\nAssume in this example that $x(t) = e^{-0.1\\, t}$ is given for $$t=0,1,2,\\ldots,50$$.\n\nt <- seq(0,50,by=1)\nreg <- list(name='C',\ntime=t,\nvalue=exp(-0.1*t))\n\nout <- list(name='E',\ntime=t)\n\nres <- simulx( model = \"model\/regression1a.txt\",\nparameter = c(Emax=100, EC50=0.3),\nregressor = reg,\noutput = out)\n\nplot(ggplot(data=res$E) + geom_line(aes(x=time, y=E))) If we want to define the regression variable as an output of the model, then, we have to define a new variable which will be defined as an output of the model: [LONGITUDINAL] input = {Emax, EC50, C} C = {use=regressor} EQUATION: E = Emax*C\/(C+EC50) Cout = C library(gridExtra) out <- list(name=c('E','Cout'), time=t) res <- simulx( model = \"model\/regression1b.txt\", parameter = c(Emax=100, EC50=0.3), regressor = reg, output = out) names(res[2]) <- \"C\" names(res$C) <- c(\"time\",\"C\")\nplot1 <- ggplot(data=res$C) + geom_line(aes(x=time, y=C)) plot2 <- ggplot(data=res$E) + geom_line(aes(x=time, y=E))\ngrid.arrange(plot1, plot2, ncol=2)\n\n## 2.2 Example 2\n\nA regression variable can also be used as a state. Model regression2.txt assumes that there exist two states $$-1$$ and $$+1$$, such that the derivative of a variable $$f$$ depends on the state (here, $$\\deriv{f}=a$$ if $$x(t)=1$$ and $$\\deriv{f}=b$$ if $$x(t)=-1$$):\n\n[LONGITUDINAL]\ninput = {a, b, x}\nx = {use=regressor}\n\nEQUATION:\nif x==1\ndf = a\nelse\ndf = b\nend\nt0 = 0\nf_0 = 0\nddt_f = df\n\n\nIn this example, the state changes every 5 hours: function $$f$$ is therefore a piecewise linear function, which slope abruptly changes every 5 hours.\n\nx <- list(name='x',\ntime=c(0,5,10,20,25,30,40),\nvalue=c(1,-1,1,-1,1,-1,1))\n\nf <- list(name='f',\ntime=seq(0, 50, by=1))\n\nres <- simulx( model = \"model\/regression2.txt\",\nparameter = c(a=1, b=-0.5),\nregressor = x,\noutput = f)\n\nprint(ggplot(data=res$f) + geom_line(aes(x=time, y=f))) ## 2.3 Example 3 In this new model regression3a.txt, $$x$$ is used as rate function. [LONGITUDINAL] input = {k, f0, x} x = {use=regressor} EQUATION: t0 = 0 f_0 = f0 ddt_f = -k*f + x This rate function is a piecewise constant function which changes abruptly every 5 hours. x <- list(name='x', time=c(0,5,10,20,25,30,40), value=c(1,-1,1,-1,1,-1,1)) f <- list(name='f', time=seq(-5, 50, by=1)) res <- simulx( model = \"model\/regression3a.txt\", parameter = c(k=0.2, f0=0), regressor = x, output = f) print(ggplot(data=res$f) + geom_line(aes(x=time, y=f)))\n\nDifferent values of the same regression variable can be defined per group. In this example, the regression variable $$x$$ is defined every at different time points for groups 1 and 2 (see Defining groups: part I for more details about the use of groups with ):\n\nx1 <- list(name='x',\ntime=c(0,5,10,20,25,30,40),\nvalue=c(1,-1,1,-1,1,-1,1))\nx2 <- list(name='x',\ntime=c(0,4,14,24,34),\nvalue=c(1,-0.5,1.5,-1,0.2))\ng1 <- list(regressor = x1)\ng2 <- list(regressor = x2)\n\nf <- list(name='f',\ntime=seq(-5, 50, by=1))\n\nres <- simulx( model = \"model\/regression3a.txt\",\nparameter = c(k=0.2, f0=0),\ngroup = list(g1,g2),\noutput = f)\n\nprint(ggplot(data=res$f) + geom_line(aes(x=time, y=f, colour=id)) + theme(legend.position=c(0.9, 0.85))) Alternatively, individual values of the same regression variables can be defined in a datafile (converted here into a data frame). See Using data files and data frames for more details. x <- inlineDataFrame(\" id time x 1 0 1 1 5 -1 1 10 1 1 20 -1 1 25 1 1 30 -1 1 40 1 2 0 1.0 2 4 -0.5 2 14 1.5 2 24 -1.0 2 34 0.2 \") f <- list(name='f', time=seq(-5, 50, by=1)) res <- simulx( model = \"model\/regression3a.txt\", parameter = c(k=0.2, f0=0), regressor = x, output = f) print(ggplot(data=res$f) + geom_line(aes(x=time, y=f, colour=id)) + theme(legend.position=c(0.9, 0.85)))\n\nA regression variable is used to define a function of time $$f$$. This function computed at some given time points $$t_1, t_2, \\ldots, t_n$$ can then be used as a prediction for some continuous data $$y_1, y_2, \\ldots, y_n$$, as in the following model regression3b.txt:\n\n[LONGITUDINAL]\ninput = {k, f0, a, x}\nx = {use=regressor}\n\nEQUATION:\nt0 = 0\nf_0 = f0\nddt_f = -k*f + x\nDEFINITION:\ny = {distribution=normal, prediction=f, sd=a}\n\n\nThe regression variable $$x$$, the function $$f$$ and the longitudinal data $$y$$ can be defined at different times (see Continuous data for more details about the definition of continuous data model).\n\nx <- list(name='x',\ntime=c(0,5,10,20,25,30,40),\nvalue=c(1,-1,1,-1,1,-1,1))\n\ny <- list(name='y', time=seq(4, 48, by=1))\n\nres <- simulx( model = \"model\/regression3b.txt\",\nparameter = c(k=0.2, f0=0, a=0.5),\nregressor = x,\noutput = list(f,y))\n\nprint(ggplot() + geom_line(data=res$f, aes(x=time, y=f), colour=\"black\") + geom_point(data=res$y, aes(x=time, y=y), colour=\"red\"))\n\n## 2.4 Example 4\n\nSeveral regression variables can be used in the same model (regression4.txt).\n\n[LONGITUDINAL]\ninput = {k1, k2, f0, g0, x1, x2}\nx1 = {use=regressor}\nx2 = {use=regressor}\n\nEQUATION:\nt0 = 0\nf_0 = f0\ng_0 = g0\nddt_f = -k1*f + k2*g + x1\nddt_g = k1*f - k2*g + x2\n\n\nThese regression variables can be defined at different time points. Each of them will be interpolated using the same method (i.e.\u00a0using the last given value).\n\nx1 <- list(name='x1',\ntime=c(0,10,20,30,40),\nvalue=c(1,-1,1,-1,1)*0.5)\nx2 <- list(name='x2',\ntime=c(5,15,25,35),\nvalue=c(1,-1,1,-1)*0.3)\n\nfg <- list(name=c('f','g'),\ntime=seq(-5, 50, by=1))\n\nres <- simulx( model = \"model\/regression4.txt\",\nparameter = c(k1=0.2, k2=0.1, f0=0, g0=0),\nregressor = list(x1, x2),\noutput = fg)\n\nprint(ggplot() + geom_line(data=res$f, aes(x=time, y=f, colour=\"blue\")) + geom_line(data=res$g, aes(x=time, y=g, colour=\"red\")) +\nscale_colour_manual(name=\"\",values=c('blue'='blue','red'='red'),labels=c('f','g')) +\ntheme(legend.position=c(0.9, 0.85)))","date":"2021-09-20 01:23:07","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 2, \"mathjax_display_tex\": 2, \"mathjax_asciimath\": 1, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.6244193315505981, \"perplexity\": 3638.3533083765515}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2021-39\/segments\/1631780056974.30\/warc\/CC-MAIN-20210920010331-20210920040331-00479.warc.gz\"}"}
null
null
Q: Laravel Model Relationships show Admin details from post I'm creating a small social media where admin can posts to user's dashboard but having trouble with showing the admin details like profile picture to the posts this is the error i got Trying to get property 'profile_image' of non-object Check my code Post.php // my post model public function admin() { return $this->belongsTo('App\Admin'); } Admin.php // my admin model public function post() { return $this->hasMany('App\Post'); } now on my user dashboard i would try to access my admin details using this code {{ $post->admin->profile_image }} A: You get that error because $post->admin is null You must be check $post->admin empty or not {{ $post->admin ? $post->admin->profile_image : ''}}
{ "redpajama_set_name": "RedPajamaStackExchange" }
9,334
<selector xmlns:android="http://schemas.android.com/apk/res/android"> <item android:state_checked="false" android:state_enabled="false" android:color="#ff7e7e" /> <item android:state_checked="true" android:state_enabled="false" android:color="#e84848" /> <item android:state_selected="false" android:state_enabled="false" android:color="#ff7e7e" /> <item android:state_selected="true" android:state_enabled="false" android:color="#e84848" /> <item android:color="#cc0000" /> </selector>
{ "redpajama_set_name": "RedPajamaGithub" }
9,363
{"url":"http:\/\/math.stackexchange.com\/questions\/98267\/wave-equation-with-2-neuman-conditions","text":"# wave equation with 2 neuman conditions\n\nI am looking for a solution to a wave equation\n\n$\\frac{\\partial^2 u}{\\partial \\tau^2} = \\frac{\\partial^2 u}{\\partial \\xi^2}$\n\nin which $t_c\\tau = t$, $L\\xi = x$,\n\nand $t_c = L\/v_c$ is the characteristic time,\n\n$L$ is the sample thickness,\n\nand $v_c$ is the characteristic wave speed,\n\nwith an IC of\n\n$\\left [\\frac{\\partial u}{\\partial \\tau} \\right]_{x,t=0} = \\theta \\left (x, t=0 \\right)$\n\nand a BC of\n\n$\\left [\\frac{\\partial u}{\\partial \\xi} \\right]_{x=0,t} = \\phi \\left (x=0, t \\right)$\n\nI have tried the D' Alembert solution, but I get a function $u\\left(\\xi, \\tau \\right)$ that is a function of the integral of phi which I don't know since it is not analytic, and it also introduces two new unknowns, $f\\left (\\tau_0 \\right)$ and $g\\left (\\tau_0 \\right)$ and I'm actually trying to find $\\frac{\\partial u}{\\partial \\tau}$ and $\\frac{\\partial u}{\\partial \\xi}$ not u.\n\nI haven't tried separation of variables, Sturm-Liouville or Fourier transform yet.\n\nThis system is similar to Cauchy-Riemann equations.\n\n-\n\nI'm going to ignore all that rescaling and just treat the wave equation in $\\tau$ and $\\xi$.\n\nThe general solution of that wave equation is\n\n$$u(\\tau,\\xi)=f_+(\\tau+\\xi)+f_-(\\tau-\\xi)$$\n\nwith arbitrary functions $f_\\pm$. Then\n\n$$\\frac{\\partial u}{\\partial \\tau}=g_+(\\tau+\\xi)+g_-(\\tau-\\xi)$$\n\nand\n\n$$\\frac{\\partial u}{\\partial \\xi}=g_+(\\tau+\\xi)-g_-(\\tau-\\xi)\\;,$$\n\nwith $g_\\pm=f'_\\pm$, which are arbitrary since the $f_\\pm$ are arbitrary. Substituting your initial and boundary conditions (please don't use abbreviations like that; it's so much less effort for you to write it out than it is for all your readers to have to think about what it means) then yields (I'm not distinguishing between $x$ and $\\xi$ or $t$ and $\\tau$ here; you're going to have to add some scale factors)\n\n$$g_+(\\xi)+g_-(-\\xi)=\\theta(\\xi)$$\n\nand\n\n$$g_+(\\tau)-g_-(\\tau)=\\phi(\\tau)$$\n\n(where I've dropped the dummy zero argument on $\\theta$ and $\\phi$). Then solving for $g_\\pm$ yields\n\n$$g_+ = \\frac{\\theta+\\phi}2$$\n\nand\n\n$$g_- = \\frac{\\theta-\\phi}2\\;,$$\n\nso the derivatives you're interested in are\n\n$$\\frac{\\partial u}{\\partial \\tau}=\\frac{\\theta(\\tau+\\xi)+\\phi(\\tau+\\xi)+\\theta(\\tau-\\xi)-\\phi(\\tau-\\xi)}2$$\n\nand\n\n$$\\frac{\\partial u}{\\partial \\xi}=\\frac{\\theta(\\tau+\\xi)+\\phi(\\tau+\\xi)-\\theta(\\tau-\\xi)+\\phi(\\tau-\\xi)}2\\;.$$\n\nNote that I used your conditions for all values of the arguments. That may not be what you had in mind, since you called them initial and boundary conditions. If you're only interested in one quadrant of the $(\\tau,\\xi)$ plane and intended the conditions only to apply to the boundaries of that quadrant, then the system is underspecified; you can have arbitrary waves coming in from infinity and being reflected at the boundary, and that freedom corresponds to the freedom of choosing the values at the remaining boundaries.\n\n-\nThanks for your response. You were correct that I am only looking for u in the 1st quadrant. But maybe I should back up. I am actually looking for a solution to a system of equations of only first derivatives. But there are two fields: theta(xi,tau) and phi(xi,tau). So the system of equations actually looks like this: dtheta\/dtau = thetaphi and dphi\/dxi = thetaphi. I just thought that by substitution of variables I could convert it to the wave equation which might be easier to solve. \u2013\u00a0Mark Mikofski Jan 19 '12 at 19:21","date":"2016-06-30 07:05:45","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 1, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.8385109901428223, \"perplexity\": 251.1984929571323}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2016-26\/segments\/1466783398209.20\/warc\/CC-MAIN-20160624154958-00023-ip-10-164-35-72.ec2.internal.warc.gz\"}"}
null
null
Q: how to append react component with appendChild method I have to use the following component more than 1 time. const OnePen = (props) => { return ( <div className="cerd" key={props.ID}> <div className=" card-body"> <h2 className="card-title">{props.title}</h2> <p className="card-text">{props.desc}</p> </div> </div> ); }; export default OnePen; Using the above component, the following code is working fine and doing the perfect job. import OnePen from "./OnePen"; const PensList = ({ pens }) => { return ( <> <div id="mainPen" className=" wrapper"> {pens.map((pen) => ( **<OnePen ID={pen.id} title={pen.title} desc={pen.description}></OnePen>** ))} </div> </> ); }; export default PensList; However, the following code does not work as it says the type of element appending with appendChild should be Node. when I create the same component with react-create-element it works fine but I don't have to do it again and again and I want to reuse the same component. Here is the code with the problem... commented code does the job but I don't have to create it like that.. I have to append const handleSubmit = (e) => { e.preventDefault(); const pen = { title, description }; axios.post("http://localhost:5000/addPen", pen).then((res) => { if (res.status === 200) { // const div1 = document.createElement("div"); // div1.className = "cerd"; // const key = document.createAttribute("key"); // key.value = res.data._id; // const div2 = document.createElement("div"); // div2.className = "card-body"; // const h2 = document.createElement("h2"); // h2.className = "card-title"; // h2.innerHTML = res.data.title; // const p = document.createElement("p"); // p.className = "card-text"; // p.innerHTML = res.data.description; // div2.appendChild(h2); // div2.appendChild(p); // div1.appendChild(div2); **document .getElementById("mainPen") .appendChild( <OnePen ID={res.data._id} title={res.data.title} desc={res.data.description}></OnePen> );** } }); }; return ( //some code ); }; export default CreatePen; Thanks for considrations A: This is how you could approach this. If you have an API, create an endpoint to fetch data from (all pens or a specific pen). Then, you could make a component where you fetch the data with axios.get() and put it in a state variable. If you put this code inside the useEffect() hook, the data will be fetched on load of the component. // state for your pens const [pens, setPens] = useState([]); // fetch on load useEffect(() => { axios .get("http://localhost:5000/getPens") // set to your endpoint .then((response) => { setPens(response.data); }) .catch((error) => { console.log(error); }); }, []); Then, you could render the pens by mapping through them like you did before to display all of them, or you could display a single pen. /*********** * Render ***********/ const pensDisplay = pens.map((p, i) => { return ( <div key={i}> {/* for example */} <p>{p.size}</p> </div> ); }); return ( <div> <h3>Pens</h3> {pensDisplay} </div> ); In React, you should avoid using appendChild() and such and try doing as much as possible in the "React" way.
{ "redpajama_set_name": "RedPajamaStackExchange" }
5,484
\section{INTRODUCTION} A remarkable variety of unexpected phenomena arises when magnetic impurities are introduced in a metal. One of the most striking examples is that of Gold (Au) metal, which is, in its pure form, a good conductor displaying decreasing electrical resistance with decreasing temperature. A few parts per million (ppm) of magnetic Iron impurities in Au, however, cause the resistance to rise logarithmically at a material-specific Kondo impurity temperature $T_{K}$ \cite{AuFe}. This so-called single-ion Kondo effect reflects the incoherent scattering of conduction electrons by the magnetic impurities introduced into the host \cite{Kondo}. In addition to introducing only a few ppm of magnetic impurities, it is also possible to synthesize materials in which there is a periodic lattice of Kondo ``impurities" coupled to the surrounding sea of itinerant electrons. At temperatures well above $T_{K}$, conduction electrons are scattered incoherently by the periodic array of Kondo ``impurities", but at much lower temperatures, translational invariance of the ``impurities" requires that a strongly renormalized Bloch state develop in which scattering is coherent \cite{ReviewKondo}. A good example of these behaviors is found when La is replaced systematically by magnetic (Kondo) Ce ions in the non-magnetic host LaCoIn$_{5}$ \cite{Nakatsuji2002}. In the Kondo-lattice limit CeCoIn$_{5}$, the low temperature resistivity is very small, and the compound hosts unconventional superconductivity in which the effective mass of electrons in the renormalized conduction bands is large \cite{dhva}. If a small number of Ce atoms in CeCoIn$_{5}$ now is replaced by non-magnetic La ions, a Kondo-impurity effect develops on the La ions. The absence of a Ce ion in the periodic Kondo lattice creates a ``Kondo-hole" such that La acts as a Kondo impurity and incoherently scatters electrons in the renormalized heavy conduction bands \cite{KHEric} In the series Ce$_{1-x}$La$_{x}$CoIn$_{5}$, the system evolves at low temperatures as a function of $x$ from a coherent Kondo lattice ($x=0$) to a a collection of incoherently scattering Kondo impurities at $x \sim 0.4$. Interestingly, this cross-over coincides with the percolation limit of a 2D square lattice on which the La/Ce ions sit. Magnetic doping has not been extensively studied in CeCoIn$_{5}$ or other members within the Ce$_{m}M_{n}$In$_{3m+2n}$ ($M$ = transition metals Co, Rh, Ir, Pd, Pt; $n=0,1,2$; $m=1,2,3$) family of which it is a part. Recently, however, Nd-doping in CeCoIn$_{5}$ at concentrations $5-10$\% has been shown to induce an unexpected magnetic state inside the zero-field superconducting (SC) phase \cite{CeNdPetrovic}. Remarkably, the propagation vector and moment size of the incommensurate magnetic order in Ce$_{0.95}$Nd$_{0.05}$CoIn$_{5}$ are identical to those observed in the field-induced magnetically ordered phase in pure CeCoIn$_{5}$ ($Q$-phase) \cite{CeNdNeutrons,QPhase}. These results indicate that the Nd ions are fundamentally changing the electronic system and not acting simply as a Kondo hole. To our knowledge, there is no report on the effects of magnetic doping in the antiferromagnetic (AFM) member CeRhIn$_{5}$ up to date. In the following we address the open questions: (i) how will Nd interact with the AFM order of CeRhIn$_{5}$? (ii) will there be a ``Kondo-hole" effect? We note that ``Kondo-hole" behavior has been observed in Ce$_{1-x}$La$_{x}$RhIn$_{5}$ where AFM order decreases linearly with La. In the limit $T_{N} \rightarrow 0$, the critical La concentration, $x_{c}\sim 40\%$, also reflects the percolation limit of the 2D square lattice \cite{CeLaPagliuso}. In this work, we report the first study of magnetic doping in CeRhIn$_{5}$ by means of X-ray diffraction, microprobe, magnetization, heat capacity, and electrical resistivity measurements. Our data show that the AFM ordering temperature of CeRhIn$_{5}$ ($T_{N}^{\mathrm{Ce}} = 3.8$~K) decreases linearly with Nd concentration, $x_{\mathrm{Nd}}$, and extrapolates to zero at a critical Nd concentration of $x_{c} \sim 30\%$. Hence, in the dilute regime, Nd ions behave as a free paramagnetic impurity, i.e. a moment-bearing ``Kondo-hole" in the Ce system. The fact that $x_{c}$ is below the percolation limit indicates that there is another mechanism frustrating the magnetic order of the Ce sublattice. We argue that this mechanism is the crystal-field frustration due to the different spin configurations of CeRhIn$_{5}$ (easy $c$-axis magnetization but with ordered moments in-plane) and NdRhIn$_{5}$ (Ising spins along c-axis). In fact, around $x_{\mathrm{Nd}} \sim 0.2$, the Ising AFM order of the Nd sublattice is stabilized and $T^{\mathrm{Nd}}_{N}$ increases up to $11$~K in pure NdRhIn$_{5}$. \section{EXPERIMENTAL DETAILS} Single crystalline samples of Ce$_{1-x}$Nd$_{x}$RhIn$_{5}$ ($x~=~0,0.05,0.1,0.15,0.2,0.3,0.5,0.7,0.9,1$) were grown by the In-flux technique. The crystallographic structure was verified by X-ray powder diffraction at room temperature. In addition, several samples were characterized by elemental analysis using a commercial Energy Dispersive Spectroscopy (EDS) microprobe. Magnetization measurements were performed using a commercial superconducting quantum interference device (SQUID). The specific heat was measured using a commercial small mass calorimeter that employs a quasi-adiabatic thermal relaxation technique. The in-plane electrical resistivity was obtained using a low-frequency ac resistance bridge and a four-contact configuration. \section{RESULTS} Figure~\ref{fig:Fig1}a shows the actual Nd concentration obtained by EDS ($x_{\mathrm{EDS}}$) as a function of the nominal Nd concentration $x_{\mathrm{nominal}}$. The smooth and monotonic relationship between the $x_{\mathrm{EDS}}$ and $x_{\mathrm{nominal}}$ indicates that Nd is being incorporated in the lattice. Further, the small error bars, $\Delta x$, point to a rather homogeneous distribution of Nd. In the extremes of the series, $x_{EDS}$ has an error bar of $\Delta x \sim 0.02$. For Nd concentrations around $50\%$, a larger variation of $\Delta x = 0.05$ is observed, which is expected for concentrations in the middle of the series. We note, however, that $\Delta x$ is the standard deviation accounting for different samples from the same batch and not for a single sample. On average, the variation within a single crystal ($\sim 0.01$) is smaller than the standard deviation. These results indicate that Nd substitutes Ce homogeneously instead of producing an intergrown of NdRhIn$_{5}$. Herein, we will refer to the actual EDS concentration. \begin{figure}[!ht] \begin{center} \hspace{-0.35cm} \includegraphics[width=1.\columnwidth]{Fig1} \vspace{-0.5cm} \end{center} \caption{a) Actual concentration measured by EDS, $x_{EDS}$, as a function of nominal concentration, $x_{\mathrm{nominal}}$ in the series Ce$_{1-x}$Nd$_{x}$RhIn$_{5}$. b) Tetragonal lattice parameters as a function of $x_{EDS}$ along the series Ce$_{1-x}$Nd$_{x}$RhIn$_{5}$.} \label{fig:Fig1} \end{figure} Figure~\ref{fig:Fig1}b shows the lattice parameters obtained by powder X-ray diffraction as a function of Nd concentration. The X-ray powder patterns show that all members of the series crystallize in the tetragonal HoCoGa$_{5}$ structure and no additional peaks are observed. A smooth decrease is found in both lattice parameters $a$ and $c$, in agreement with Vegard's law. This result implies that the volume of the unit cell is decreasing with Nd concentration, suggesting that Nd doping produces positive chemical pressure. Using the bulk modulus of CeRhIn$_{5}$, we estimate that a rigid shift of the lattice parameters from CeRhIn$_{5}$ to Ce$_{0.95}$Nd$_{0.05}$RhIn$_{5}$ corresponds to $\Delta P = 0.25$~GPa of applied pressure. From the phase diagram of CeRhIn$_{5}$ under pressure \cite{TusonNature}, this $\Delta P$ would correspond to an increase of $T_{N}$ by $0.1$~K. We will see below that the AFM order actually is suppressed in Ce$_{0.95}$Nd$_{0.05}$RhIn$_{5}$, indicating that chemical pressure is not the main tuning parameter determining $T_{N}$. Figures~\ref{fig:Fig2}a and b show the $T$-dependence of the magnetic susceptibility, $\chi(T)$, for a field of $1$~kOe applied along the $c$-axis and $ab$-plane, respectively. For low Nd concentrations ($x_{\mathrm{Nd}}=0.05,\,0.14$), there is no evidence of $T_{N}$ in the $\chi_{c}(T)$ data, i.e., when $H||c$-axis. This result is somewhat unexpected because AFM order is observed by a clear peak in heat capacity measurements of both CeRhIn$_{5}$ and Ce$_{1-x}$Nd$_{x}$RhIn$_{5}$. Instead of an expected peak in $\chi(T)$, we observe a low-$T$ Curie-tail, suggesting that the Nd ions are free paramagnetic impurities embedded in the Kondo lattice. When $H||ab$-plane, however, $\chi_{ab}(T)$ displays a very similar behavior when compared to pure CeRhIn$_{5}$: there is a maximum in $\chi(T)$ followed by a kink at $T_{N}^{\mathrm{Ce}}$. We attribute this difference to the fact that the spins in NdRhIn$_{5}$ point along the $c$-axis and the magnetic susceptibility along this direction is much larger than the in-plane susceptibility. Thus, $\chi_{ab}(T)$ data reveals a linear decrease of $T_{N}^{\mathrm{Ce}}=3.8$~K with $x_{\mathrm{Nd}}$ up to $x_{\mathrm{Nd}}=0.14$. Between $x_{\mathrm{Nd}}=0.14$ and $x_{\mathrm{Nd}}=0.23$, the transition temperature starts to increase again, suggesting that the AFM order due to Nd ions starts to develop at $T_{N}^{\mathrm{Nd}}$. Though not obvious in these data, $\chi_{ab}(T)$ reaches a maximum at $T^{\mathrm{max}}_{\chi} > T_N^{\mathrm{Ce}}$ in CeRhIn$_{5}$ and lightly Nd-doped samples. The temperature $T^{\mathrm{max}}_{\chi}$ also decreases with $x_{\mathrm{Nd}}$, from $\sim 7.5$~K in pure CeRhIn$_{5}$ to $\sim3.2$~K at $x_{\mathrm{Nd}}=0.23$. Evidence for $T^{\mathrm{max}}_{\chi}$, however, is lost for $x_{\mathrm{Nd}} > 0.23$ due to the dominant contribution from the Nd AFM order. We will return to this analysis when discussing the phase diagram of Fig. 5. Finally, for higher Nd concentrations, both $\chi_{c}(T)$ and $\chi_{ab}(T)$ show AFM behavior of a typical local moment system. \begin{figure} \begin{center} \hspace{-0.65cm} \includegraphics[width=0.85\columnwidth,keepaspectratio]{Fig2} \vspace{-0.5cm} \end{center} \caption{a) Temperature dependence of the magnetic susceptibility, $\chi_{c}(T)$, of representative samples in the Ce$_{1-x}$Nd$_{x}$RhIn$_{5}$ series in a field of $1$~kOe applied along the $c$-axis. Inset shows the inverse susceptibility of the polycrystalline average $vs$ temperature. Solid lines are linear fits to the data. b) Temperature dependence of the magnetic susceptibility, $\chi_{ab}(T)$, for the same samples in a field of $1$~kOe applied along the $ab$-plane. Inset shows the Curie-Weiss temperature, $\theta$, for all compositions of Ce$_{1-x}$Nd$_{x}$RhIn$_{5}$.} \label{fig:Fig2} \end{figure} From fits of the polycrystalline average of the data (inset of Fig.~\ref{fig:Fig2}a) to a Curie-Weiss law, we obtain effective magnetic moments of 2.5(1) $\mu_{B}$, 2.7(1) $\mu_{B}$, 3.2(1) $\mu_{B}$, and 3.7(1) $\mu_{B}$ for $x_{\mathrm{Nd}}=0.05, 0.14, 0.47, 0.9$, respectively. These calculated values are in good agreement with the theoretical values of 2.59 $\mu_{B}$, 2.69 $\mu_{B}$, 3.05 $\mu_{B}$, and 3.52 $\mu_{B}$ , respectively. We also obtain the paramagnetic Curie-Weiss temperature, $\theta_{\mathrm{poly}}$, which averages out crystal electrical field (CEF) effects. The inset of Fig.~\ref{fig:Fig2}b shows $\theta_{\mathrm{poly}}$ as well as $\theta_{c}$ and $\theta_{ab}$. In a molecular field approximation, $\theta_{\mathrm{poly}}$ is proportional to the effective exchange interaction, $J$, between rare-earth ions. The fact that $\theta_{\mathrm{poly}}$ is negative is in agreement with the AFM correlations found in the series. A reduction of $\theta_{\mathrm{poly}}$ is observed going from CeRhIn$_{5}$ ($\theta_{\mathrm{poly}}=-31$~K) to NdRhIn$_{5}$ ($\theta_{\mathrm{poly}}=-17$~K), which suggests within a molecular field model that $J$ also decreases along the series. As a consequence, this reduction in $J$ would be expected to decrease the AFM ordering temperature. The experimental data, however, shows the opposite behavior: $T_{N}$ in NdRhIn$_{5}$ ($T_{N}^{\mathrm{Nd}}=11$~K) is almost three times larger than in CeRhIn$_{5}$ ($T_{N}^{\mathrm{Ce}}=3.8$~K). Moreover, in a Kondo latice like CeRhIn$_{5}$, $\theta_{\mathrm{poly}}$ also includes the AFM Kondo exchange that tends to reduce $T_{N}$ relative to that expected solely from the indirect Ruderman-Kittel-Kasuya-Yosida (RKKY) interaction \cite{Doniach}. Because there is no Kondo effect in NdRhIn$_{5}$, the variation in $\theta_{\mathrm{poly}}$ with $x_{\mathrm{Nd}}$ implies a suppression of the Kondo contribution and increased dominance of the RKKY interaction. This is reflected in the ratio $T_{N}/\theta_{\mathrm{poly}}$, which is $0.12$ in CeRhIn$_{5}$ and $0.65$ in NdRhIn$_{5}$. As illustrated in the inset of Fig.~\ref{fig:Fig2}b, $\theta_{\mathrm{poly}}$ reaches a plateau between $x_{\mathrm{Nd}}=0.23$ and $0.47$, suggesting that Kondo interactions are essentially quenched before $x_{\mathrm{Nd}}=0.47$. Consequently, one might expect $T_N$ to increase initially as Nd replaces Ce and then to remain approximately constant for $x_{\mathrm{Nd}}>0.47$. As we will come to, this is not the case and $T_{N}$ is a non-monotonic function of Nd content. The above discussion indicates that there is another relevant mechanism determining the magnetic ordering in the series Ce$_{1-x}$Nd$_{x}$RhIn$_{5}$. From the nearly constant values of $\theta_{c}$ and pronounced change of $\theta_{ab}$, which anisotropy is a consequence of CEF effects, it is reasonable to expect that CEF effects play an important role. In fact, from the high-temperature expansion of $\chi(T)$ we can readily observe that the main tetragonal CEF parameter, $B_{2}^{0} \propto (\theta_{ab}-\theta_{c})$, systematically decreases with Nd concentration. \begin{figure}[!ht] \begin{center} \includegraphics[width=0.85\columnwidth,keepaspectratio]{Fig3} \vspace{-0.7cm} \end{center} \caption{a) Field dependence of the magnetization at $1.8$~K for fields along the $c$-axis. Data for $x_{\mathrm{Nd}}=0$ coincide with that for $x_{\mathrm{Nd}}=0.9$ for $H \leq 50$~kOe. b) Field dependence of the magnetization at $1.8$~K for fields along the $ab$-plane.} \label{fig:Fig3} \end{figure} Figures~\ref{fig:Fig3}a and b show the $H$-dependence of the magnetization, $M(H)$, at 1.8~K for fields applied along the $c$-axis and $ab$-plane, respectively. Although $M_{c}(H)$ for CeRhIn$_{5}$ displays a linear response with field, at low Nd concentrations ($x_{\mathrm{Nd}}=0.05,\,0.14$) there is a non-linear behavior that resembles a Brillouin function. This supports our interpretation of the origin of the low-$T$ Curie tail in $\chi_{c}$ for low Nd content, namely that Nd ions at low concentrations act as free paramagnetic entities. Because the Brillouin-like contribution to $M_{c}(H)$ is substantially larger than expected from just a simple free Nd moment, this behavior implies that Nd moments also are locally inducing free-moment like character on neighboring Ce atoms. This is most pronounced for $H||c$ due to the much higher susceptibility of Nd moments along this direction. At light Nd doping, then, Nd acts as a rather different type of ``Kondo hole" compared to that induced by non-magnetic La substitution for Ce. The Nd ions carry a net magnetic moment that is not quenched by the Kondo-impurity effect. At high $x_{\mathrm{Nd}}$, $M_{c}(H)$ displays a field-induced transition to a spin-polarized state, as observed in NdRhIn$_{5}$. When the field is along the ab-plane, pure CeRhIn$_{5}$ also displays a weak field-induced anomaly in $M_{ab}(H)$ ($H_{c} \sim 22$~kOe), which signals a change in ordering wavevector \cite{Flouquet} and is suppressed with $x_{\mathrm{Nd}}$. \begin{figure}[!ht] \begin{center} \hspace{-0.75cm} \includegraphics[width=0.85\columnwidth,keepaspectratio]{Fig4} \end{center} \vspace{-0.7cm} \caption{a) Temperature dependence of the specific heat, $C/T$, of LaRhIn$_{5}$, CeRhIn$_{5}$ and representative samples of the series Ce$_{1-x}$Nd$_{x}$RhIn$_{5}$. b) Magnetic contribution to the specific heat, $C_{\mathrm{mag}}/T$, as a function of temperature. Inset shows the entropy per Ce normalized by $R$ln$2$.} \label{fig:Fig4} \end{figure} Figure~\ref{fig:Fig4}a shows the temperature dependence of the heat capacity over temperature, $C/T$, for four representative Nd concentrations. LaRhIn$_{5}$, the non-magnetic member, and pure CeRhIn$_{5}$ also are included. The sharp peak at $T_{N}=3.8$~K displayed by CeRhIn$_{5}$ first decreases linearly with Nd concentrations up to $x_{\mathrm{Nd}}=0.14$. At $x_{\mathrm{Nd}}=0.14$, the transition at $T_{N}$ starts to broaden and further increase in $x_{\mathrm{Nd}}$ reveals an enhancement of $T_{N}$, in agreement with $\chi(T)$ data. Figure~\ref{fig:Fig4}b shows the magnetic contribution to the heat capacity, $C_{\mathrm{mag}}/T$, after subtracting LaRhIn$_{5}$ from the data. The transition temperature at which $C_{\mathrm{mag}}/T$ peaks is marked by the arrows. As the temperature is lowered further, an upturn is observed for all crystals with finite $x_{\mathrm{Nd}}$, including NdRhIn$_{5}$, suggesting that the Nd ions are responsible for it. Reasonably, the upturn may be associated with the nuclear moment of Nd ions, and it can be fit well by a sum of both electronic ($\propto \gamma$) and nuclear ($\propto T^{-3}$) terms \cite{SchottkyNuclear}, consistent with the presence of a nuclear Schottky contribution. The magnetic entropy as a function of temperature is obtained by integrating $C_{\mathrm{mag}}/T$ over $T$. The inset of Figure~\ref{fig:Fig3}b shows the $T$- dependence of the magnetic entropy recovered per Ce ion. The entropy is normalized by $R$ln2, which is the entropy of the ground state doublet. In pure CeRhIn$_{5}$ (black bottom curve), the magnetic entropy increases with $T$ followed by a kink at $T_{N}$. We observe an increase in the recovered entropy below $T_{N}$ even when a very small amount of Nd is introduced (e.g., $x_{\mathrm{Nd}}=0.05$). Increasing the concentration to $x_{\mathrm{Nd}}=0.14$ yields a further entropy increase. This result indicates that the magnetic entropy does not scale with the Ce concentration, in turn suggesting that the extra magnetic entropy comes from the free paramagnetic Nd ions. \begin{figure}[!ht] \begin{center} \hspace{-0.7cm} \includegraphics[width=1.05\columnwidth,keepaspectratio]{Fig5} \end{center} \caption{a) In-plane electrical resistivity, $\rho_{ab}(T)$, of Ce$_{1-x}$Nd$_{x}$RhIn$_{5}$ as a function of temperature. b) Low temperature $\rho_{ab}(T)$ data. Arrows mark $T_{N}$.} \label{fig:Fig6} \end{figure} Finally, we discuss the temperature dependence of the in-plane electrical resistivity, $\rho_{ab}(T)$, of Ce$_{1-x}$Nd$_{x}$RhIn$_{5}$. Figure~5a shows $\rho_{ab}(T)$ for samples with $x_{\mathrm{Nd}}<0.5$. At $x_{\mathrm{Nd}}=0.05$, $\rho_{ab}(T)$ is very similar in magnitude and $T$-dependence to that of pure CeRhIn$_{5}$. In particular, the broad peak at $\sim 40$~K indicates the crossover from incoherent Kondo scattering at high temperatures to the heavy-electron state at low temperatures. As $x_{\mathrm{Nd}}$ is increased, $\rho_{ab}(T)$ decreases monotonically with temperature and the initial peak turns into a broad feature around $70$~K when $x_{\mathrm{Nd}}=0.47$. We note that the second CEF excited state of NdRhIn$_{5}$ is near 68~K, suggesting that the broad feature in $\rho_{ab}(T)$ is likely associated with CEF depopulation \cite{OnukiRRhIn5}. This evolution is consistent with an increase in the local character of the $4f$ system. Further, the low-temperature data shown in Fig.~5b display an increase in $\rho_{0}$. Typically disorder scattering would be expected to be a maximum near $x=0.5$, but this is not the case. As shown in Ref.~\cite{OnukiRRhIn5}, the residual resistivity of pure NdRhIn$_{5}$ is much lower than that of our Ce$_{0.1}$Nd$_{0.9}$RhIn$_{5}$ crystal. This difference implies that spin-disorder scattering plays a significant role in determining $\rho_{0}$ in this series. \section{DISCUSSION} In Figure~\ref{fig:Fig6} we summarize our results in a $T-x$ phase diagram, in which two distinct regimes become clear. The first one, at low Nd concentrations, presents a linear decrease of $T_{N}$ with $x_{\mathrm{Nd}}$. Interestingly, a linear dependence of $T_{N}$ also has been observed in Ce$_{1-x}$La$_{x}$RhIn$_{5}$, where La creates a ``Kondo hole'' in the system via dilution. In the La-doping case, however, $T_{N}$ extrapolates to $T=0$ at a critical concentration of $x_{c}\sim 40\%$, which is the percolation limit of the $2D$ lattice. Here, Nd-doped CeRhIn$_{5}$ displays a smaller $x_{c}$ of $\sim 30\%$, indicating that there is an additional mechanism that frustrates N\'eel order in the Ce sublattice \cite{CeLaPagliuso}. \begin{figure}[!ht] \begin{center} \hspace{-0.75cm} \includegraphics[width=1\columnwidth,keepaspectratio]{Fig6} \end{center} \caption{$T-x$ phase diagram of the series Ce$_{1-x}$Nd$_{x}$RhIn$_{5}$.} \label{fig:Fig6} \end{figure} It has been shown $-$ both theoretically and experimentally $-$ that $T_{N}$ in tetragonal structures is enhanced with respect to their cubic $R$In$_{3}$ ($R=$ rare-earth ions) counterparts whenever $R$ has an Ising magnetic structure, i.e., spins polarized along the $c$-axis \cite{PG2006,SerranoTb}. This is due to the fact that the tetragonal CEF parameters in these structures favor a groundstate with Ising symmetry, as supported by the fact that the $c$-axis susceptibility is larger than the $ab$-plane susceptibitility in members whose $R$ element has finite orbital momentum. Because NdRhIn$_{5}$ displays commensurate Ising-like order below $T_{N}=11$~K, it is reasonable to assume that Nd$^{3+}$ ions will retain their Ising-like character when doped into the Ce sites \cite{NdRhIn5MagStruc}. CeRhIn$_{5}$, however, has an incommensurate magnetic structure with spins perpendicular to the $c$-axis \cite{CeRhIn5MagStruc}. Hence, a crystal-field frustration of the in-plane order in CeRhIn$_{5}$ is induced by Nd$^{3+}$ Ising spins. As a consequence, $T_{N}^{\mathrm{Ce}}$ of the Ce sublattice extrapolates to zero before the percolation limit and $T_{N}^{\mathrm{Nd}}$ of the Nd sublattice is stabilized. \vspace{1cm} \section{CONCLUSIONS} In summary, we synthesize single crystals of Ce$_{1-x}$Nd$_{x}$RhIn$_{5}$ using the In-flux technique. X-ray diffraction and microprobe measurements show a smooth evolution of lattice parameters and Nd concentration, respectively. Across the doping series, there is a complex interplay among Kondo-like impurity physics, magnetic exchange and crystal-field effects as the Nd content changes. At low $x_{\mathrm{Nd}}$, there is an unusual type of magnetic ``Kondo hole" and $T^{\mathrm{Ce}}_{N}$ decreases linearly with $x_{\mathrm{Nd}}$. The extrapolation of $T^{\mathrm{Ce}}_{N}$ to zero temperature occurs below the 2D percolation limit due to crystal-field frustration effects. Around $x_{\mathrm{Nd}} \sim 0.2$, the Ising AFM order from Nd ions is stabilized and $T^{\mathrm{Nd}}_{N}$ increases up to $11$~K in pure NdRhIn$_{5}$. Further investigation of the Ce$_{1-x}$Nd$_{x}$RhIn$_{5}$ series under pressure will be valuable to understand this interplay in the superconducting state. \begin{acknowledgments} Work at Los Alamos was performed under the auspices of the U.S. Department of Energy, Office of Basic Energy Sciences, Division of Materials Science and Engineering. P. F. S. R. acknowledges: (i) a Director's Postdoctoral Fellowship though the LANL LDRD program; (ii) FAPESP Grant 2013/20181-0. \end{acknowledgments}
{ "redpajama_set_name": "RedPajamaArXiv" }
8,222
Pholeomyia excelsior är en tvåvingeart som först beskrevs av Becker 1907. Pholeomyia excelsior ingår i släktet Pholeomyia och familjen sprickflugor. Artens utbredningsområde är Bolivia. Inga underarter finns listade i Catalogue of Life. Källor Sprickflugor excelsior
{ "redpajama_set_name": "RedPajamaWikipedia" }
3,080
Panama Exhibit Site Just another LHL Cloud Sites site Who Was Who What Was What Maps & Blueprints Explore the Timeline A Popular Account of the Railroad The Panama Railroad as depicted on the cover of Harper's New Monthly Magazine, (vol. 18, no. 103, January 1859), with an annotated page from A.B. Nichols' copy of the same issue. Harper's New Monthly Magazine published an early account of the Panama Railroad a few years after the railroad was completed. Armchair readers who had not been to Panama would have been captivated by the images and descriptions of the railroad and the sites that one could expect to see on a trip across the Isthmus. But to A.B. Nichols, who was familiar with Panama and the railroad from his years of work on the canal, there were many inaccuracies. He made snide comments in the margins of his copy to point them out. Whoever saw "a Panama monkey nearly as large as a man?" he wrote next to one illustration. Panama Railroad at the Summit, 1855 Early view of Culebra, which was known as Summit during construction of the railroad. From R. Tomes, Panama in 1855 : An account of the Panama Rail-Road. New York, 1855. The Panama Railroad was completed at Summit in 1855, a point about 10 miles northwest of Panama City on the Pacific side. The last rail was laid there on January 27, and the completed line immediately attracted passengers. Robert Tomes was one of them. He published an account of his trip on the new railroad during that first year of its operation. Summit, or Culebra as it was later called, had been for some time the terminus of the railroad while under construction. Its principal feature, Tomes reported, was a "groggery", where passengers bought bad brandy before continuing by mule to Panama City. Once the railway line was completed, Summit became only a railway station where passengers had barely enough time to buy a few oranges from street venders at exorbitant prices before completing the journey. Front St. in Colón, the Atlantic terminus of the railroad, in the early 1880s. From A.B. Nichols Notebooks. View in Digital Collection » As completed in 1855, the Panama Railroad line was about 47 ½ miles long. It started in the town of Aspinwall on the Atlantic side of the Isthmus. Since the town was built as a terminus for the railroad, its main street was the railway line, named Front Street, as it was adjacent to the bay. When Ferdinand de Lesseps and other travelers arrived in Panama, they usually stepped off the ship at Aspinwall, or Colón as it was renamed. They could walk directly from the wharf to the train tracks on Front Street, get on the next train and begin the trip across Panama. Blueprint plan of the town of Aspinwall or Colón, 1863. From A.B. Nichols Notebooks. View in Digital Collection » The Atlantic terminus of the Panama Railroad was named in honor of one of the railroad's founders, William H. Aspinwall, but the name was later changed to Colón, the Spanish form of Christopher Columbus's last name. For a while, the town had a dual identity, as shown on the early blueprint, which allows for both names. Railroad people preferred Aspinwall, while the Columbian government chose Colón. First Steamship to Cross the Isthmus The Explorer in the Colorado River, reassembled after crossing Panama by rail. Illustration by J.J. Young from a sketch by H.B. Möllhausen. From J.C. Ives, Report upon the Colorado River of the West, Explored in 1857 and 1858. Washington, 1861. The first ship to cross the Isthmus of Panama was a paddlewheel steamship, the Explorer. It was hauled across in pieces by the Panama Railroad Company. The 50-foot iron steamer was ordered from a Philadelphia firm and built in sections for transport by rail. The eight sections of the hull, the disassembled pieces of the engine and paddlewheel, and the unwieldy boiler weighing over three tons, were an awkward mass of freight for the new railroad. The pieces reached the Atlantic terminus on August 20, 1857. Transporting them across the Isthmus, in Ives' estimation, was a "source of more trouble than profit." Barbacoas Bridge Barbacoas Bridge across the Chagres River in 1879. From A.B. Nichols Notebooks. View in Digital Collection » The Panama Railroad crossed the Chagres River at Barbacoas, an Indian word meaning "bridge." It is about 23 miles from Colón and halfway to Panama City. The bridge was a massive wrought iron structure, more than 600 feet in length and rising 40 feet above the river on stone piers. In November 1879, only a few weeks before Ferdinand de Lesseps arrived in Panama for the ground-breaking ceremony, torrential rains caused the river to rise 46 feet and engulf the bridge in the worst flood on record. When de Lesseps arrived at the bridge with the party of visiting dignitaries, the train stopped and passengers had to walk across on planks that covered the gaps to transfer to another train on the other side. The bridge had been out of service for freight and passengers for five weeks, but in the official record of the visit the incident received only passing mention as an unexplained delay at Barbacoas. Stephens' Tree Stephens' Tree, a landmark along the railroad, next to the double track in 1912. From A.B. Nichols Notebooks. View in Digital Collection » Stephens' Tree as depicted for armchair travelers in Harper's New Monthly Magazine, vol. 18, no. 103, January 1859. View in Digital Collection » Stephens' Tree, named in honor of one the founders of the Panama Railroad, John L. Stephens, was a landmark along the line. Pictures of it occur in early travel accounts from the 1850s, shortly after the railroad was completed. In those pictures, the tree is shown next to a single track. The double track shown here was one of the improvements made after the Americans began their efforts in Panama. John F. Stevens, the American engineer who came to lead the project in 1905, found that the Panama Railroad he inherited was hopelessly out-of-date and described it as a "phantom railroad." Double tracks were installed across the Isthmus, dock capacity was increased, heavier rail was installed, and bridges were strengthened. By December 1905, the line had been rehabilitated, and Stephens' Tree had been preserved through it all. "Mogul" Locomotives A locomotive for the Isthmian Canal Commission, supplied by the Baldwin Locomotive Works of Philadelphia. From A. B. Nichols Notebooks. View in Digital Collection » One of the first priorities of John F. Stevens when he became chief engineer of the canal project in 1905 was the rehabilitation of the Panama railroad. Within weeks of his arrival he placed an order for 120 Mogul locomotives, which had 2 leading wheels and 6 drive wheels. More orders followed, and these Mogul locomotives became the principal type of locomotive used in the Canal Zone during construction. The Isthmian Canal Commission placed large orders with several builders for Moguls, including the Baldwin Locomotive Works in Philadelphia. A distinctive feature of them all, regardless of manufacturer, was the long and low sloping tender, designed for greater ease in reverse running. They were also designed to avoid toppling over because of the uneven and rough roadbeds over which they had to run. An Idealized Plan for Excavation Artist Richard Rummel's depiction of a proposal for excavating the Culebra Cut, based on a design by Capt. Fremont Hill of New York. From A.B. Nichols Notebooks. View in Digital Collection » The railroad was always seen as integral to any plans for building a canal in Panama, as it was necessary for transporting not only people but equipment, supplies, and excavated dirt. This undated painting shows an idealized plan for excavating the difficult Culebra Cut in the mountains at the continental divide in Panama. Excavation from the top is being done by the use of cableways to haul away and dump the spoil. A massive rail operation is meanwhile hauling away dirt and rock from below, carrying the spoil from a tunnel after it would have been loosened by explosives. Eventually, the two operations would meet to create the huge cut through the mountains that was necessary for the canal. The Interoceanic Ship Railway A steamer in transit across the Isthmus of Tehuantepec in Mexico, as proposed by James B. Eads. From Scientific American, December 27, 1884. In 1880, American engineer James B. Eads, reacting to the 1879 proposal from Paris for a Panama Canal, proposed a giant railroad to haul ships across Mexico instead. There were many advantages, including cost, speed of construction, and even the speed of transit from sea to sea. Eads' plan was to lift ships out of the water on a huge cradle, dispersing the weight on multiple rails and hundreds of wheels. Huge locomotives, five times as large as any then in existence, would pull the ship across the Isthmus at 10 miles per hour, completing the entire transit in 16 hours. The U.S. Senate approved the plan in 1887, but the House bill to support it was blocked and never came up for a vote. Lifting pontoon and cradle for Eads' proposed ship railway. From Scientific American, December 27, 1884. Floating turntable for Eads' proposed ship railway. From Scientific American, December 27, 1884. The Isthmus of Tehuantepec Route of Eads' ship railroad across the Isthmus of Tehuantepec. From W.F. Johnson, Four centuries of the Panama Canal, New York, 1907. Central America has two narrow necks of land that separate Atlantic and Pacific waters. The Isthmus of Panama is 50 miles across, while Mexico's Isthmus of Tehuantepec is 134 miles. While Panama is wet and muddy, the Isthmus of Tehuantepec is dry. Relocating the Panama Railroad Gamboa Bridge in March 1911, when the Chagres River was still well below the new bridge as construction continued on the dam at Gatun. From A.B. Nichols Notebooks. View in Digital Collection » Gamboa is located on the Chagres River, at a point near the Continental Divide where the river feeds into man-made Lake Gatun. The original Gamboa Bridge across the Chagres was abandoned, since the creation of Lake Gatun plunged the bridge and most of the original railroad to the bottom of the lake. The new bridge was designed and built to rise on piers that would hold it above the level of the new lake and river. The bridge is still in use, carrying traffic on the new Panama Railroad across the Chagres River, next to the Panama Canal. New Gamboa Bridge, 1908 The new Gamboa Bridge across the Chagres River in 1908. From A.B. Nichols Notebooks. View in Digital Collection » The foreground in this photograph of the Gamboa Bridge was destined to become part of Lake Gatun. The channel for the canal runs parallel to the bridge, and ships pass the bridge daily as they transit the Panama Canal. In 2012, a large cargo ship lost control at the point marked "canal" on the photograph and nearly collided with the bridge before it was stopped. Gamboa Bridge Today Image courtesy Kansas City Southern Railroad The Gamboa Bridge, in use today by the The Panama Canal Railway. Who Was A.B. Nichols | Photo Gallery The Lure of the Pacific | Photo Gallery Envisioning a Canal | Photo Gallery Prelude to the Railroad | Photo Gallery Panama Railroad | Photo Gallery The French Plan | Photo Gallery French Attempt | Photo Gallery Fighting the Fever | Photo Gallery Making the Dirt Fly | Photo Gallery Somebody Dying Every Day | Photo Gallery Life in the Zone | Photo Gallery Locks | Photo Gallery A Century of Progress | Photo Gallery Linda Hall Library Home © 2021 Panama Exhibit Site. All Rights Reserved. Built by Infusion LindaHall.org Digital Collection Privacy Policy
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
2,149
<?php $layout_defs['Bugs']['subpanel_setup']['securitygroups'] = array( 'top_buttons' => array(array('widget_class' => 'SubPanelTopSelectButton', 'popup_module' => 'SecurityGroups', 'mode' => 'MultiSelect'),), 'order' => 900, 'sort_by' => 'name', 'sort_order' => 'asc', 'module' => 'SecurityGroups', 'refresh_page'=>1, 'subpanel_name' => 'default', 'get_subpanel_data' => 'SecurityGroups', 'add_subpanel_data' => 'securitygroup_id', 'title_key' => 'LBL_SECURITYGROUPS_SUBPANEL_TITLE', ); ?>
{ "redpajama_set_name": "RedPajamaGithub" }
8,671
{"url":"https:\/\/pt.overleaf.com\/latex\/examples\/equations-for-paper\/zlkzlqjndnck","text":"AbstractI want to see if this publishes it in MathML so I can cut & paste from there to other formats.","date":"2021-09-19 11:41:09","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 1, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.4203028678894043, \"perplexity\": 2895.0696915434787}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2021-39\/segments\/1631780056856.4\/warc\/CC-MAIN-20210919095911-20210919125911-00274.warc.gz\"}"}
null
null
Watch Gang Subscription Box Sean Tirman Category: Style When it comes to men's style, a watch is probably the quintessential piece of everyday carry. But picking out quality watches that suit your tastes and budget can be a full-time job. Of course, you could circumvent the hassle and get your hands on a personal selection of high-quality timepieces with a subscription to Watch Gang. Watch Gang works directly with major brands to supply you with a new watch to keep every month based on your specific tastes. That close working relationship also gives them the ability to offer timepieces valued at up to five times the cost of the monthly subscription. And, with free membership to the site, you'll also be granted exclusive access to deals, trades, and sales in their private community, so you can perfect your collection all in one location. Best of all, a paid subscription also gets you automatically entered into a weekly raffle with bonus watches given away almost daily. Subscriptions start at just $30 a month. Purchase: $30+ Oris Honors The U.S. Navy's First Black Diver With A Limited-Edition Watch A bronze dive watch powered by a new in-house automatic movement. Nike & Notre Link Up For The First Time On A Dunk Hi Inspired By Classic Workwear Because of variations in the suede material, no two pairs are alike. Shinola's New Auto Runwell Watch Was Inspired By American Station Agents A tribute to a trade in which punctuality came at a premium. Brooklyn Circus & Todd Snyder Link Up For A Retro Boxing-Inspired Capsule Celebrating the role boxing has historically played in cultural black resistance. These REC Watches Feature Dials Made From Two Of Carroll Shelby's Mustangs Swiss-made timepieces derived from the legendary 'Green Hornet' & 'Little Red.'
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
6,045
The 1940 National Division was the 4th edition of the Turkish National Division. Fenerbahçe won their second title. Participating clubs Beşiktaş - Istanbul Football League, 1st Fenerbahçe - Istanbul Football League, 2nd Galatasaray - Istanbul Football League, 3rd Vefa - Istanbul Football League, 4th Gençlerbirliği - Ankara Football League, 1st Muhafızgücü - Ankara Football League, 2nd Altınordu - İzmir Football League, 1st Altay - İzmir Football League, 2nd League table Results References Erdoğan Arıpınar; Tevfik Ünsi Artun, Cem Atabeyoğlu, Nurhan Aydın, Ergun Hiçyılmaz, Haluk San, Orhan Vedat Sevinçli, Vala Somalı (June 1992). Türk Futbol Tarihi (1904-1991) vol.1, Page(81), Türkiye Futbol Federasyonu Yayınları. Turkish National Division Championship seasons 1939–40 in Turkish football Turkey
{ "redpajama_set_name": "RedPajamaWikipedia" }
2,036
Bible SMS cannot accept new subscribers from India carriers at this time since all SMS Gateways in India are regulated by the Telecom Regulatory Authority of India (TRAI). Prior to the TRAI regulations, Bible SMS was capable of sending unlimited free christian SMS messages to cell phone users in India. Due to Telecom Regulatory Authority of India (TRAI), SMS Gateways have a daily SMS Limit at the present time. Bible SMS is aware of this issue and hopes to find another route for allowing subscribers from India to register.
{ "redpajama_set_name": "RedPajamaC4" }
5,060
Pivotal Software this week announced it has made the Greenplum massively parallel processing (MPP) database available in beta on Kubernetes. Announced at the Greenplum Summit, the forthcoming version 6 of Greenplum will take advantage of multiple pods with a Kubernetes cluster to process database queries in parallel. Jacque Istok, vice president of data at Pivotal, says Greenplum 6 will be available on multiple distributions of Kubernetes, including the Pivotal Container Service (PKS) instance of Kubernetes that Pivotal developed with sister company VMware. DevOps teams will be able to take advantage of the BOSH framework originally developed to automate the deployment of the open source Cloud Foundry platform-as-a-service (PaaS) environment to deploy instances of Greenplum and Kubernetes, he says, noting the combination of BOSH and Kubernetes will have a critical role in also making it easier for database administrators to keep pace with rapidly changing requirements in the age of agile development and DevOps. At the same time, Pivotal continues to work on rearchitecting Greenplum to run on top of an open source instance of the Postgres relational database. Rather than reinventing every element of a distributed relational database from the ground up, the latest generally available version of Greenplum makes use of Postgres as the foundational element of the MPP database. Pivotal will then continue to focus its efforts on add MPP services on top of Postgres. The current Greenplum 5 offering is based on version 9.4 of Postgres. Pivotal this week also announced it will make available a commercially supported instance of Postgres alongside the current Greenplum 5 platform. Greenplum itself will be updated to stay current with more recent releases of Postgres, says Istok. Pivotal already makes available a commercially supported instance of the open source MySQL database, but since Oracle acquired MySQL, many IT organizations have been transitioning to Postgres. As Pivotal moves to essentially containerize the engines that make up its MPP database using Postgres, there's a significant opportunity to expand adoption of MPP databases. Most databases today don't take full advantage of modern multi-core processor platforms. MPP databases increase utilization of those platforms by distributing processing engines across the platform. Istok says customers have been asking Pivotal to make Greenplum available on top of Kubernetes to make it easier for them to take advantage of a platform through which it becomes easier to manage MPP database engines running in multiple Kubernetes pods. Support for Kubernetes is part of a long-term Pivotal effort to unify transactional and operational analytics workloads. While these workloads today are still for the most part processed in isolation, more organizations now want to be able to process analytics in real-time alongside transactions to make more informed decisions in real time. Traditionally, however, the underlying infrastructure did not make it simple to process different types of workloads in parallel unless there was a complex proprietary MPP database in place, which tended to be very expensive to acquire, deploy and manage. Kubernetes and BOSH promise to reduce that complexity, while Postgres should help drive down overall costs. Naturally, it's too early to say whether there will be a resurgence of interest in MPP databases, but all elements required to spur increased adoption are starting to fall into place.
{ "redpajama_set_name": "RedPajamaC4" }
7,348
Q: To what extent are the different versions of $R^2$ comparable, with and without an intercept I have been trying to fit some data with a linear regression. I don't have any theoretical assumptions on the regression, and from what I know both an intercept or no intercept can be plausible results. In the evaluation of the goodness of fit, I came across two different definitions of $R^2$, the most general one $$ R^2 = 1 - \frac{SS_{res}}{SS_{tot}} $$ where $SS_{res}$ is the sum of squares of residuals and $SS_{tot}$ is the total sum of squares, and another definition that is justified only in the case of a fit with intercept $$ R^2 = \frac{SS_{reg}}{SS_{tot}} $$ where $SS_{reg}$ is the regression sum of squares and the condition $SS_{tot} = SS_{reg} + SS_{res}$ is verified. I know that some softwares (like R) use the second definition, with a "modified" version of $R^2$ in the case of no intercept, that can give conflictual results and this is a problem that has been already addressed. In another question I read that also for the first definition I should use a modified version of $R^2$ in the case of no intercept. What I want to understand is if there is a general definition of $R^2$ that is always applicable? If there isn't one, in what way can I compare the goodness of a fit with intercept and a fit without intercept, and ultimately to choose between the two? Thank you in advance A: $R^2$ compares your model to a naïve model that always predicts the same value. When your model includes an intercept, it makes sense that the naïve guess, ignoring the features, of the conditional mean would be the pooled/marginal mean of all $y$ observations: $\bar y$. If we ignore the features, the linear regression equation is: $$ y = \beta_0 + 0x_1 +\cdots + 0x_p =\beta_0 $$ OLS regression gives us $\hat\beta_0=\bar y$. This is how we wind up with the denominator of the usual $R^2$ equation. Every prediction is the same $\bar y$ value. $$ R^2_{usual} = 1-\dfrac{ \sum\bigg( y_i - \hat y_i \bigg)^2 }{ \sum\bigg( y_i - \bar y \bigg)^2 } $$ If we exclude an intercept from the model, then the model with no features is: $$ y = 0x_1 +\cdots + 0x_p = 0 $$ That is, the naïve model always predicts zero, and the comparison to the naïve model would reflect that fact. $$ R^2_{no\_intercept} = 1-\dfrac{ \sum\bigg( y_i - \hat y_i \bigg)^2 }{ \sum\bigg( y_i - 0 \bigg)^2 } $$ If you want a general way of thinking about $R^2$, I would go with the idea of comparing to a naïve model that has no features. It doesn't stop there. While this will wreck the typical interpretation of $R^2$ as the "proportion of variance explained" (which does not apply to the no-intercept version, anyway, and this interpretation vanishes in many settings with intercepts, too), you can use any baseline model in the denominator. Want to compare your fancy neural network to a linear model? $$ R^2_{sorta} = 1-\dfrac{ \sum\bigg( y_i - \hat y_{i, neural} \bigg)^2 }{ \sum\bigg( y_i - \hat y_{i, linear} \bigg)^2 } $$ This is equivalent to comparing the models on mean squared error, $MSE = \frac{1}{n}\sum\big( y_i - \hat y_i \big)^2$, however, and I do not see an advantage to $R^2_{sorta}$. Viewing the problem in terms of $R^2_{sorta}$, the $R^2_{usual}$ and $R^2_{no\_intercept}$ equations are comparing to baseline models that are totally naïve. Cardinal wrote a rather excellent answer related to this topic.
{ "redpajama_set_name": "RedPajamaStackExchange" }
799
# # First published in the United States in 2005 by Tuttle Publishing, an imprint of Periplus Editions (HK) Ltd., with editorial offices at 364 Innovation Drive, North Clarendon, Vermont 05759. Copyright © 2005 Periplus Editions All rights reserved. Nor part of this publication may be reproduced or utilized in any form or by any means, electronic or mechanical, including photocopying, recording, or by any information storage and retrieval system, without prior written permission from the publisher. Library of Congress Control Number: 2005920628 ISBN: 978-1-4629-0719-9 (ebook) IMPORTANT NOTE TO READERS: Training in the martial arts involves physical exertion, movements, and actions that can cause injury to you or others. Because the physical activities in this book may be too strenuous for some readers, you should check with a physician before you start your training. If you are ever in doubt about how to proceed or about whether a practice is safe for you, consult with a martial arts professional before proceeding. DISTRIBUTED BY North America, Latin America, and Europe Tuttle Publishing 364 Innovation Drive North Clarendon, VT 05759-9436 Tel: (802) 773-8930 Fax: (802) 773-6993 info@tuttlepublishing.com www.tuttlepublishing.com Japan Tuttle Publishing Yaekari Building, 3rd Floor 5-4-12 Ōsaki Shinagawa-ku Tokyo 141 0032 Tel: (03) 5437-0171 Fax: (03) 5437-0755 tuttle-sales@gol.com Asia Pacific Berkeley Books Pte. Ltd. 61 Tai Seng Avenue #02-12, Singapore 534167 Tel: (65) 6280-1330 Fax: (65) 6280-6290 inquiries@periplus.com.sg www.periplus.com First edition 08 07 06 05 10 9 8 7 6 5 4 3 2 1 Printed in Malaysia Illustrations by Stephanie Tok Design by Kathryn Sky-Peck TUTTLE PUBLISHING ® is a registered trademark of Tuttle Publishing. # CONTENTS 1 | What is Tai Chi? | 4 ---|---|--- Philosophy | 4 History and Styles | 5 Tai Chi Today | 6 2 | Getting Ready | 7 What You Should Wear | 7 Rules and Etiquette | 8 3 | The Tai Chi Class | 10 What Happens in a Typical Class | 10 How to Get the Most Out of Each Class | 11 4 | Warming Up | 13 Warm-Ups | 13 Breathing Warm-Ups | 16 Stretches | 16 Cooling Down | 18 5 | Stances | 20 Basic Body Movements | 20 Stances | 21 Zhang Zhuang | 22 Games with Zhang Zhuang | 23 Stance Sequence | 24 6 | Practicing Tai Chi | 29 Silk Reeling | 29 Push Hands | 32 Sparring | 35 7 | Conditioning | 36 Physical Conditioning | 36 Mental Conditioning: Meditation | 40 8 | Advancing in Tai Chi | 42 The Five Keys to Advancing in Tai Chi | 42 The Five Levels of Tai Chi | 43 Conclusion | 44 | Acknowledgements | 45 | About the Author | 47 # one WHAT IS TAI CHI? Tai Chi is a Chinese martial art that was created about 400 years ago. It was originally created for combat, but many people practice Tai Chi to improve their health, calm their mind, and better their life. ## PHILOSOPHY Tai Chi is based on the notion that all things have a way of balancing themselves through the laws of yin and yang. Yin and yang are opposites, and nothing exists without its opposite—up and down, left and right, front and back, hard and soft, fast and slow. Yin and yang are complex ideas. You may not understand them completely right at the beginning, but these important terms will become clearer to you as you practice Tai Chi. Yang is an active, giving energy. People also think of it as the light in the duo light/dark or the male in male/female. Yin is a passive, receiving energy. It is the dark in light/dark and the female in male/female. It is important to understand that each person has both yin and yang energy and yin and yang aspects. The flow of energy in a person's body is called qi. This energy is constantly changing—moving in and out of balance, ebbing and flowing. Like yin and yang, qi is something you will learn to understand better the longer you practice Tai Chi. Tai Chi balances your internal energy, your yin and yang. Tai Chi's exercises are made up of a series of complicated movements that embody the opposition of yin and yang. When you practice Tai Chi correctly, you will experience both effort and tranquility, agility and power. Tai Chi provides the benefits of exercise without the dangers of high-impact sports. It is famous for building energy and helping you recover from injuries. It can strengthen your joints so that you are less prone to injury. It can also increase the level of oxygen in your blood (which helps your athletic performance) without the stress of other training methods. Tai Chi will help you get your body and mind working together as one. Your breathing will improve. You'll have better balance (because of the motor skills you practice), better emotional health (from the peace and serenity you'll find in practicing the slow, measured movements), and heightened senses (from your concentration on the movements), and you'll be able to deal with pain better (through deep breathing and better body alignment). Along with these mental and physical benefits, Tai Chi is also a great method of self-defense. It uses the laws of physics and the principles of circular movement to help you balance the forces within you against the forces coming from an opponent. In Tai Chi, you deflect, absorb, and redirect forces brought against you. This makes it possible for you to defend yourself against an opponent with superior strength. In fact, the stronger your opponent, the more energy you have available in a countermove. Tai Chi also emphasizes evasive techniques—ways to skillfully avoid attacks—as a healthy alternative to combat in our often confrontational world. Tai Chi is one of the best things you can do for your body. It requires minimal physical effort and can help you achieve a relaxed, calm mind. Do you want to jump higher, run faster, and lift heavier objects? Tai Chi will help you develop those skills in a safe, long-lasting way. Tai Chi does all this, plus it looks cool! Yin, Yang and Communication Yin and yang represent complementary opposites. In the Tai Chi class, there are two possible forms of communication—yin and yang. If the teacher is speaking, he or she is yang. If you were speaking at the same time, then you would also be yang. Like two palms pressing against each other with equal strength, this can lead to a conflict—in any case, it makes it difficult to learn anything! If one person becomes yin and listens, you can have a conversation. In class, the teacher usually takes a yang role, while the students should adopt a yin approach to learning. ## HISTORY AND STYLES In order to get a better understanding of Tai Chi, let's take a quick look at its history. Tai Chi, or Tai Chi Chuan, translates literally as "Grand Ultimate Fist." Tai Chi started as a style of Chinese boxing. It was created in the late Ming dynasty (1600s) by the respected general, Chen Wangting, who combined Chinese medicine, the energy principles of qi, controlled breathing, and the theory of yin and yang with boxing methods developed by the famous general Qi Jiguang. The original Chen boxing style featured a balance of fast and slow—and hard and soft—movements that were both excellent for self-defense and promoted good health. Chen Wangting's family kept Tai Chi secret for several centuries. It seems likely that the Chen family form of Tai Chi is the original source for all the other styles (such as the very popular Yang style) that have developed since then. It's different from the others because it includes both soft and hard movements. Chen style is also unique in its use of "coiling" or "twining" (also known as "silk reeling") movements, or chan szu jing, that create enormous power. While there are many styles of Tai Chi, there are also different styles of Chen Tai Chi. The most enduring version was developed by Chen Changxing and is known today as the Lao Jia or "Old Frame." The Lao Jia style exhibits the long, flowing movements characteristic of Northern Chinese boxing. Different Styles? Different styles of Tai Chi have grown out of the original Chen style. All are based on similar concepts. Chen style retains its roots in martial arts, featuring a balance of both slow and fast movements. Yang style, the most popular, was founded by the first non-Chen family student, Yang Lu Chan. The style features more slow movements with an emphasis on an even rhythm and deep softness. The third most popular style is Wu. Derived from Yang style, it features highly symmetrical postures and also emphasizes slow movements. Because of the faster, more vigorous movements in Chen style, kids are especially attracted to Chen Tai Chi. The movements in this book are derived largely from Chen style, but help develop the proper skills for any Tai Chi discipline. Chen Fake, a Chen family grandmaster and genius from the middle of the last century, created the Xin Jia or "New Frame" version of the Lao Jia forms. While very popular today, Xin Jia is considered more difficult to learn because it uses a lot of twining movements. A third form of Chen Tai Chi, Xiao Jia or "Small Frame" style, features more compact movements and is gaining in popularity. Regardless of style, each version of Chen style sticks to the original concepts of chan szu jing, the silk reeling energy. ## TAI CHI TODAY It may look easy at first, or maybe not as exciting as some other sports, but learning Tai Chi's slow, measured movements can be both challenging and inspiring. The mental and physical benefits of Tai Chi greatly outweigh the difficulties. This book presents Tai Chi basics, such as stances and footwork, introduces Tai Chi training, and provides fun solo and partner Tai Chi exercises. We hope that you will enjoy learning about Tai Chi and applying the cooperative techniques it promotes. Words to Know Tai Chi —Grand Ultimate, (pronounced "tie chee") Tai Chi Chuan —Grand Ultimate Fist, (pronounced "tie chee chwan") Yin & Yang —The opposites of all things, dark/light, light/heavy, left/right, slow/fast, empty/full. Qi —Life force, the internal energy that fuels all living things. (pronounced "jee") Chen Wangting —The Ming dynasty (late 1600s) founder of Tai Chi. Chan szu jing — Silk reeling energy, (pronounced "chaan zhu jing") Zhang zhuang —Standing with intent and fullness. Tui shou —Push hands, (pronounced "twee shou") # two GETTING READY In this chapter we'll discuss how you can prepare yourself for a Tai Chi class. Not only should you know what to wear, but it's also important to understand the attitude and etiquette you need to bring to each class. ## WHAT YOU SHOULD WEAR What you wear to class depends on the school you go to, so it's important to ask your teacher what he or she prefers the students to wear. Some teachers take a more traditional approach: in these cases the student would wear a designated uniform. Other teachers are a bit more relaxed about uniforms but require a school T-shirt that might be used as a class uniform. Most teachers recommend some kind of cotton outfit. Regardless of dress code, you should wear clothing that is comfortable and loose-fitting because it is important that your clothing does not get in the way of your movement. The clothing you wear to Tai Chi class should be no different from most exercise clothing, which allows the body to move freely. Here are some suggestions. Top The top should not be too tight. It should be a relaxed fitting shirt that allows for a full range of motion. A traditional Chinese martial arts top can be worn, but sweatshirts and T-shirts are fine as well. Cotton fabrics are usually a good choice because they absorb moisture. Rayon-cotton blend fabrics also have a nice, smooth feel. Bottoms Your bottoms should be made of a similar cotton or cotton-rayon blend. They can be long pants or even shorts. They should fit nicely at your waist and should have enough room in the thigh, hip, and groin areas for easy movement. These bottoms should allow space for spreading your legs apart, because this type of movement is necessary for Tai Chi. Undergarments For boys, snug-fitting underwear will provide proper support. Any athletic sports wear is suitable for girls, but a good sports bra or snug T-shirt is also a good choice. Shoes Shoes should be low-cut. Indoor soccer sneakers, table tennis sneakers, or even skateboard shoes are recommended. These types of shoes usually have flat soles and provide the right traction; their light weight also makes them ideal for Tai Chi practice. Flat-soled sneakers provide more stability for the constant foot pivoting and stomping required by Chen Tai Chi. Basketball or running shoes have too much insole padding and a curved exterior rubber heel that prevents the natural feel your feet need when practicing. Other martial arts shoes, such as kung fu shoes, are flat-soled but probably do not have enough arch support for the foot stomping executed in Chen Tai Chi. ## RULES AND ETIQUETTE Rules and etiquette vary from teacher to teacher. But there is one constant: you must always show respect for your teacher and classmates. Here are a few rules of etiquette for proper behavior in class. How to Act toward the Teacher Teachers are given the highest level of respect in any martial arts school. They are usually greeted by their students with a salute. You perform this salute by placing your left palm (open and flat) over your right clenched fist and then bowing by leaning slightly forward. Paying attention to your teacher's instructions is important, because Tai Chi can be very complex and has many small details hidden within the movements. Above all, you should always demonstrate a high level of respect for your teacher—expressing your appreciation for the privilege of learning this specialized art. How to Act toward One Another You should treat other students in your class as you like to be treated yourself. A great deal of camaraderie—a strong sense of brotherhood and sisterhood—develops in most martial arts schools. There is also a strong sense of hierarchy that reflects your ability and commitment. Class members may be identified as older brother, younger brother, sister, uncle, and so on. Your teacher determines your rank, based on your hard work, ability, and persistence. A second-year student, for example, will usually be senior to someone with only one year of training experience. Unlike Karate, which uses colored belts to illustrate hierarchy, Tai Chi schools don't have formal symbols or insignia. Those chosen, however, share a strong bond of profound respect for their teacher, one another, and their art. When you salute, you are showing more than respect for your teacher and Tai Chi. The fist represents power and strength, while the open palm represents humility and respect. It is important to have both of these qualities at all times. # three THE TAI CHI CLASS Traditional Tai Chi schools are centered on a main training hall. This room should be wide—as most Tai Chi forms travel from right to left. There should be plenty of room to move in all directions. The centerpiece of the main training hall is usually an area devoted to a display of the school's teaching lineage. Here students can show respect to the ancestors of their chosen Tai Chi style. The formal arrangement of the classroom should serve as a constant reminder that you should demonstrate respect for the room and your teacher when you enter. ## WHAT HAPPENS IN A TYPICAL CLASS Classes usually start with standing practice (zhang zhuang, see page 22). These are stationary exercises in which you hold a specific posture for an extended period of time. They can help you develop proper body posture, leg strength, and the upper body relaxation necessary to perform Tai Chi movements. They both set the tone for the whole class and help you focus on your practice. They also sharpen your mental concentration, develop your experience of qi (internal energy), and stimulate the circulation of this energy. Zhang zhuang is probably the most important part of your training, because it builds the foundation for strength as well as focus. After standing practice, most classes move into warm-ups and stretching (see page 13). The more complicated "silk reeling" or chan szu jing exercises (see page 29) would be the last step in a warm-up sequence. These are motor skill sequences taken from the traditional choreographed forms. You'll also practice the basic movements of Tai Chi. After these exercises are completed, most classes will practice forms (or sequences), followed by partner exercises or sparring such as tui shou (push hands). When your class ends, it is customary to line up with your classmates facing your teacher. You will repeat the salute you learned a few pages ago—place your right fist against your left palm and bow. The beauty of Tai Chi is that each person develops at his or her own pace. There is no rush to learn quickly; you should simply follow your own path and schedule. If you try your best, you will get the desired result. ## HOW TO GET THE MOST OUT OF EACH CLASS Though your teacher will play a crucial role in your development of Tai Chi, it is also important that you take responsibility for yourself and your own learning. Here are some important things to remember that will help your Tai Chi. Stay Safe The safety aspects of Tai Chi are part of the actual movements—if you are respectful of the movements and your fellow classmates, you shouldn't get hurt. Because you practice sequences at slow speeds—focusing on the accuracy of your movements—the risk of injury is relatively low. Sparring is also relatively safe—the objective is to unbalance your opponent, rather than strike him or her. You'll learn how to fall safely to the ground—along with controlling your partner—as part of the push hands exercise. Make sure to keep the practice area clear of objects that might get in the way or injure someone. Tai Chi stresses nonconfrontational behavior. Bruce Lee said, "If you don't get hit, you don't have a reason to hit." This is important to remember as you learn Tai Chi. Start Small As you begin your journey toward learning Tai Chi, remember that you are traveling without knowing where you are going. Until you've practiced the movements many times, it is impossible to know how it feels to do them correctly. Take the time to take small steps. As you feel your body getting stronger, slowly set your goals higher, and make your movements bold and more expressive. The Main Elements of a Tai Chi Class 1. Zhang zhuang 2. Warm-ups and stretching 3. Chan szu jing 4. Basic Tai Chi movements 5. Form or sequence practice 6. Partner exercises Stick to Your Routine Try not to change around too much. Once you've learned something, practice it daily in a routine that stays the same. You can try variations of Tai Chi games so you don't get bored, but always give time to the same routine—so that you can see and feel the improvement as you progress. Don't Overdo It As always, listen to your body when you learn something new. Pay careful attention to the way your body is reacting. Sometimes we get carried away with practice, and don't pay attention to the way our bodies feel. Make an effort to notice when something doesn't feel natural or hurts. It could mean that your body is being overworked or that you're practicing incorrectly. If this happens, you should back off. You won't be able to practice if you're hurt, and you won't improve if you can't practice. Be Aware of Your Body Your body has natural ranges of motion. When you go beyond them, your body will feel pain. Be careful not to bend so far that you can't move into another position without straining. If you can't move comfortably, you've probably gone too far. Be Patient Don't try to advance too quickly. Tai Chi requires much more patience than other martial arts, because it's so complicated. You have to develop many different skills slowly—as you increase in knowledge and understanding. The journey is a long one, but think of it as a pleasant cruise, because when you get to where you're going, the parts that seem most difficult will seem clear, easy, and fun! Try Your Best The beauty of Tai Chi is that everyone develops at their own pace. Because the forms are practiced slowly, you can track the gradual progress you make. Tai Chi fosters progressive development: as you learn the postures, your body becomes stronger. There is no rush to learn quickly; you should simply follow your own path and schedule. If you try your best, you will get the desired result. Growth in Tai Chi is personal. Progress is measured by perseverance and patience, both of which you'll develop as you learn the basics of Tai Chi. Have Fun Don't forget to laugh at yourself when you make mistakes. Making mistakes is part of learning, and it's important to find the humor in why you couldn't stand on one leg longer than your friend. If you're competing with each other, make sure it's a cooperative game rather than being about winning. Push hands practice, for example, is about cooperation, not about who gets defeated. As you get better, you'll see that a good match is one where both you and your partner go for a long time without stopping! So have fun! # four WARMING UP You should start every Tai Chi class with warm-ups. Getting your body ready will help you to avoid injuries. And warming up gets you mentally prepared, which will help you get the most out of your class. ## WARM-UPS These simple warm-ups will get your body moving and ready for stretching. Warming-up your body before doing any stretching will help you avoid injuries—it's better for your muscles if you're at least breaking a sweat. Any form of body movement can be used, but here are a few exercises you can try. KNEE ROTATION—FEET APART You start this exercise by opening your legs shoulders' width apart and placing your hands directly under your knees. While supporting your knees with your hands, begin to circle both knees in opposite directions, first moving them out to in and then in to out. Repeat this exercise 8–12 times. Knee rotation—feet apart KNEE ROTATION—FEET TOGETHER Here you repeat the same hand placement as for the feet apart knee rotation, only now both knees circle to the right—and then to the left—at the same time. Knee rotation—feet together SIMULTANEOUS WRIST AND ANKLE ROTATION Place the toes of your right foot on the floor to the side of your body while lacing the fingers of both hands together. Rotate your clasped hands while also rotating your ankle, changing the direction of the rotations after 8–10 repetitions. Simultaneous wrist and ankle rotation TRUNK TURNS Start with your feet shoulders' width apart. Relax your arms to the sides of your body. Twist your waist from left to right while allowing your arms to freely swing across your chest area. In this version of the exercise, your feet are facing forward and are firmly planted on the ground. Trunk turns In the second version of the exercise, shift your weight from one leg to the other as you lift the toes of each foot off the floor, pivoting on your heel, one side at a time. Continue with the same arm movement as you twist your trunk. SHOULDER SLAPS Still with your feet shoulders' width apart, swing your arms freely across your chest as if you were hugging yourself, then swing them back as far as you can. Shoulder slaps BACK SLAPS Swing your arms up and over your shoulders, slapping the backs of your shoulders with your fingers. Then, swing your arms back down and behind you. Swing them back to the front at the end of every swing. Back slaps WRIST COILING With your legs shoulders' width apart, place the back of your right wrist on top of your right hip and circle your hand toward your navel, completely twisting your hand around at the wrist. Repeat with the other hand. Then, twist your hands forward, then backward for 8–10 repetitions with each hand. Try to move your hips and elbows in the same direction of your wrist as it rotates, with your knees bent. Wrist coiling ## BREATHING WARM-UPS These breathing warm-ups will help to focus your mind for Tai Chi practice. They'll also help to generate the internal energy, or qi, you need for the Tai Chi movements. PALMS ON BELLY Try this simple technique to make sure that you're breathing fully and deeply. Place both palms with the fingers spread widely on your lower abdomen. Place your tongue on the roof of your mouth, and inhale and exhale through your nose. As you inhale try to push your hands and fingers apart with each inhalation. When you exhale, close your fingers, pushing the air out. Your hand movements will imitate the inflating and deflating a balloon. You should do this in the zhang zhuang standing position (see page 22; it can also be practiced lying down or seated. Palms on belly DIAPHRAGM BREATHING Once you feel comfortable with the first breathing exercise, try it without your hands on your belly. Instead, imagine that your hands are still resting on your lower abdomen. Practice pushing the belly out and in. This can be done with your hands hanging at your sides. ## STRETCHES Now that your body is fully warmed up, you can begin your stretches. It's important to remember never to stretch past your limit. Listen to your body. If you're uncomfortable, adjust to a more comfortable position. Perform the following stretching exercises in the order they're listed in, because each exercise is progressively harder . SQUAT STRETCH Stand with your feet shoulders' width apart. Slowly sink into a sitting position. Your feet should be flat on the ground from toe to heel. Your toes should point somewhat outward. You can place your hands on the floor directly in front of you for balance. Hold this position for 30 seconds, and then rise slowly, using your hands to push off the floor, if necessary. Squat stretch HAMSTRING ROWS Stand with your legs about three feet apart. Fold your arms across each other, and extend your elbows reaching down in front of your body. Allow your arms to slide down toward your knees, coming as close to your shins as possible, and then slowly bounce up to repeat. This is similar to rowing a boat. You should bring your arms down and up in a circular fashion, going from out to in and then from in to out. Slowly "row" at least five times. Hamstring rows SLIDE STRETCH Start from a standing position. Shift your weight to your right leg and sink down as far as you can, extending your left leg out to your side. Place both hands in front of your body, leaning slightly forward. Your body weight should be supported equally on both arms. Gently shift your stance to the left leg as you extend the right leg to the other side. Hold the posture for 20 seconds, then switch to your other leg. Slide stretch HEAD TO KNEE Stand with your legs about three feet apart. Reach toward your right leg with your arms and torso, grabbing your lower leg for support. Use your arms to pull your body down toward your leg. When grabbing the support leg, be sure to reach for the calf muscle. Keep your forehead facing the leg so that you don't strain your neck. Hold this position for 20 seconds, then repeat the exercise on the opposite side of your body. Head to knee CIRCLE ARMS OUTSIDE IN Stand with your feet together and your arms at your sides. Raise your arms up and over your head slowly in a large circular motion. Inhale deeply from your diaphragm as your arms rise. Now, place your palms together over your head and lower your hands down in front of your body with the palms together. As your palms pass your waist, allow them to separate and move next to your legs, with your palms facing your outer thighs. Repeat this twice. Circle arms outside in ## COOLING DOWN It's almost as important to cool down properly as it is to warm up. At the end of any practice, whether it covers forms, silk reeling, standing, or push hands, the breathing sequence is an excellent closing exercise. CIRCLE ARMS INSIDE OUT Continue from the first posture, standing with your feet together and arms at the sides of the body. Allow your palms to come together forming a cup under your navel, as if you're cupping water in the center of your hands. Slowly raise your arms up and over your head. Inhale deeply from your diaphragm as you raise your arms. Now release your palms over your head and lower your hands down to the sides of your body with the palms gently going down, ending by your legs with your palms facing your thighs. Circle arms inside out CIRCLE ARMS OUTSIDE FRONT Continue from the second posture described above. With your feet together and your arms at the sides of your body, allow your palms to come together forming a cup under your navel, as if you're cupping water in the center of your hands. Gently raise your arms forward and upward to your chest. Gently turn the palms of your hands inward so that they face you. Open your palms over your head and lower your hands down to the sides of your body. Your palms should move gently down to the sides of your legs with your palms facing your thighs. Next, inhale deeply from your diaphragm and raise your arms again. Turn your palms over so that they face the ground as your hands gently descend down past your waist and to the sides of your body. Circle arms outside front # five STANCES Once you've warmed up, you'll start to practice your stances. Perfecting these stances will give you the strength and coordination you need to progress in Tai Chi. For example, we recommend practicing the stances in this chapter before trying the silk reeling exercises. ## BASIC BODY MOVEMENTS Tai Chi stances are made up of several subtle body movements. Let's get started by discussing some of these essential elements. Stepping It's important to remember to step with your heel first when moving forward and to step your toe first when moving backward. When stepping to the side, step with your heel first and pivot your foot until both toes face generally forward. You should feel very solid and stable in these stances. As you step, don't try to move too fast. Allow your weight to transfer only after you have secured your step. Arm Movements Your arms should always be relaxed so that you can move from one position to another with ease. As you move your arms, be sure that you're coordinating your upper body with your lower. Be alert for tension in your shoulders, and relax them by slowly dropping your elbows. Leg Movements Your legs should always feel comfortable with the direction they are bending in. If something feels unnatural as your legs move from one place to another, adjust your position until the move feels right, and then continue. Test your leg strength by extending and retracting your legs in different stances in the forms or silk reeling exercises. Head Position Your head controls the direction of your body, so watch its position. Looking down, for example, can throw you off-balance. Keep your head up. It's good for your alignment and general balance. Use the bean bag games that follow to develop good posture and body alignment. Key parts of the body in Tai Chi movement Waist controls Eyes direct Hands follow Hips and Groin Your hips and groin usually open (your legs spread apart) as you bend your knees and sink (lower) your body. Try placing your fingers on your hips as you bend at the waist to feel your hips sinking. As you get more experienced, you'll feel that the movements come not from the knees, but actually from the hip sinking and groin opening, as if you were sitting on a chair. Note that when you're moving, your groin usually opens, and when you are settling into a posture, it closes. Knees As you bend your knees, be sure they do not pass the front of your toes. If you bend your knees too far, your body will feel strained. This is your body talking to you—saying that you have gone too far! Adjust your feet if you have trouble moving the leg that is not bearing your weight. Your knees should bend naturally and comfortably. Hands The hands play a very special part in Tai Chi movements. They guide the body, showing it where to go. Think of your hands as engines of a train, pulling your body along. As you use your hands to guide and pull your body through their motions, be sure to keep all of the cars connected. Your hands should always be relaxed and soft. Eyes Your eyes also play a key role in Tai Chi training. They direct your body where to go and how fast. They should always be alert and sharp. As you move your hands, your eyes should follow your movement attentively, always remaining coordinated with the speed and direction of your movements. ## S T A N C E S Proper stances are largely responsible for the power for which Tai Chi is famous. This is especially true if you practice your stances diligently early in your training. When performing all the stances, be sure that your hips are open and your feet are flat—as if you were grabbing the ground with your feet. In the single-leg stance be sure that your support leg is always slightly bent. You should always feel as if you can move easily from one stance to another. If this is difficult, it might mean that you have stepped too widely and should take a narrower stance. FRONT TO BACK STANCES With your hands on your waist, transfer your weight from your right leg to your left leg. Pivot your right foot slightly forward as you transfer your weight to your left leg. Your back leg should be directly behind your right shoulder. Keep your weight on your right leg and hold this posture for 30 seconds. After 30 seconds simply shift your weight to your right leg and hold for 30 seconds. Front to back stances SIDE TO SIDE STANCES With your hands still on your waist, transfer your weight to the right leg and step to the left side of the body. Keep your weight on the right leg while holding this posture. With your hands still on your waist, transfer your weight from the right leg to the left leg. Keep your weight on the left using the same weight distribution as in the other previous side stance posture. Also hold this posture for 30 seconds. Next bend both legs as if you're sitting on a stool with most of the weight on the right leg. Keep the weight on the right leg and hold this posture for 30 seconds. Repeat. Side to side stances ## ZHANG ZHUANG Zhang zhuang, or the basic standing exercise, is one of the first things taught in Tai Chi. It requires a great amount of focus, concentration, and patience to execute properly. When done correctly, zhang zhuang practice produces a steady focus, while building internal strength and proper body alignment. ZHANG ZHUANG REQUIREMENTS AND TECHNIQUE Stand with your head erect, feet shoulders' width apart, legs slightly bent, and arms reaching out with bent elbows, just below chest height, as if gently hugging someone. Touch the roof of your mouth with your tongue, while keeping your mouth gently closed. Do not allow your weight to rest too much on your heels or the balls of your feet. Keep your ankles straight, maintaining an even distribution of weight on your feet. Relax your chest and shoulders, allowing your elbows to drop slightly, with your palms facing your chest. Zhang zhuang While standing in the zhang zhuang position, focus on an object, any object, about 10 feet in front of you and at eye level. Use that object as a focal point to concentrate on. Let your knees bend in response to the relaxation of your hips and waist. However, it is very important that you not let your knees bend too far forward past your toes, but rather keep them over the middle of your foot, just in front of your ankle. Your thigh muscles should be engaged throughout the exercise. Try to feel your spine suspended and open, allowing its natural curves to manifest themselves. Your shoulder blades will gradually curve away from your spine as you sink into the posture. It is common for the body to move by itself as the muscles relax. Come out of this posture slowly, letting your hands sink down and your legs slowly straighten. Savor the peaceful sense of grounding and absorb the energy it creates. The goal in practicing zhang zhuang is not to hold the posture for as long as possible. Instead, you should begin with a 5-minute pose. Add 1 minute every week, up to a maximum of 20 minutes for beginners. ## GAMES WITH ZHANG ZHUANG You can use beanbags as a fun training tool that helps you pay close attention to the proper postures that are crucial in Tai Chi. The beanbag becomes an extension of your body that must stay "connected" as you move through proper positions. If your body is in the wrong position, the bag will let you know by falling off your head! BEANBAG ON HEAD Starting from the zhang zhuang position, place a small beanbag on top of your head. Try to keep it in place for 1 minute without dropping the bag. As you improve, you can add more time to this exercise. BEANBAGS ON HANDS While standing in the zhang zhuang position, place one beanbag on the back of each hand, resting the beanbag on top of your thumb and first finger (index finger). Your hands should be positioned as if you are holding a cup—imagine how a cup would fit into the shape of your hand. Try to keep your arms extended while maintaining a slight bend in your elbows. Hold this position for 1 minute without dropping the beanbags. As you improve, you can add more time to this exercise. Pay attention to your shoulder tension. Be sure to relax your shoulders so that they are not shrugging upward. Also, as noted, focus on an object at eye level that's about 10 feet in front of you. ## STANCE SEQUENCE This routine will help you understand how the stances blend together into basic sequences of movement. This sequence should be practiced slowly with emphasis on precise movements and proper alignment. Tai Chi teachers often use yin and yang to represent different sides of the body—yang indicates the weight-bearing side of the body, while yin indicates the opposite or non-weight-bearing side. Stance sequence step 1 1. Pivot your right foot. 2. Step forward with your left foot and "sit" into a back stance. (See the description of the back stance on page 21.) 3. Transfer your weight forward onto your left leg so that you end up in a front stance. 4. Bring your right leg forward, placing the ball of your foot slightly ahead of your left foot. Make sure that your right leg is directly in front of your right shoulder and not in front of your left foot. Stance sequence step 5 5. Raise your right knee up to waist height, balancing on your left leg. Stance sequence step 6 6. Place your right heel down on the floor in front of your right shoulder. Stance sequence step 7 7. Turn your torso to the right, making sure that your shoulders and hips are facing in the same direction. Bend your knees into a cross-legged sitting position. Stance sequence step 8 8. From the sitting position step out to the left side of your body with your left foot, keeping your weight on your right leg. 9. Transfer your weight to your left leg very slowly. Stance sequence step 10 10. Step with your right leg behind your left leg and turn to the right. Make a half turn so that your body faces the opposite direction. Stance sequence step 11 11. Step back with your left leg as you extend both hands forward, with your palms facing up. Stance sequence step 12 12. Step back with your right leg as you bring your hands back to your waist, standing with your feet together. Stance sequence step 13a Stance sequence step 13b 13. Circle your arms up to your sides, then down, until they reach each side of your body. 14. Repeat the sequence in reverse, to train the opposite side of your body. In all Tai Chi movements, stay loose and. don't hold any tension in your body. The guided imagery exercises] on [page 41 can help. The practice of Tai Chi is built around sequences of repeated movements. The body builds a physical memory of these movements as you practice them. As you improve, you will be able to sense when your body moves perfectly and when it is "off." This is one physical benefit of learning Tai Chi—knowing when your body is connected and when it is not. Once you can feel and tell the difference, you are well on your way to understanding the true nature and value of Tai Chi. Picturing your body as a tree is an effective way to practice stances. Imagine that your feet are the roots, your legs and torso are the trunk of the tree, your arms and head branches, and finally your fingertips are the leaves. When wind hits a tree, it absorbs the wind in its leaves by swaying left or right, deflecting strong winds and usually surviving storms. The more the wind blows, the more power the tree stores. This is true of Tai Chi as well: the more you practice, the stronger you get. Remember to step with your heel first when moving forward and with your toe first when moving backward. Keep your arms relaxed so that you can move from one position to another with ease. Remember that your legs should always feel comfortable with the direction they are bending in. Keep your head up. Keep your hands on your hips and feel your hips sinking when you bend. Remember that your knees should bend naturally and comfortably. Remember that your hands guide the body. Keep your eyes alert and sharp. # six PRACTICING TAI CHI After building a foundation with stances, your training will progress to silk reeling exercises. These help develop the connection between the leg strength developed during stance training and whole-body movements. ## SILK REELING In silk reeling the idea is to connect all parts of your body, leading with your waist as you move your weight from one part of the body to another. Think of your upper and lower body as two bottles—one full of water, the other empty. As you fill the empty bottle (upper body) with water (transfer your weight), the water needs time to pour into the other bottle. Your job is to use your waist to balance this pouring and avoid any breaks in your movement. When the full bottle (lower body) pours its water completely into the empty bottle (upper body), your waist will have transferred your energy from your lower to your upper body as you move. This image also describes the transfer of weight and energy from the left to right sides of your body. SINGLE-ARM SILK REELING 1. Place your left hand on your left hip as you sink your weight onto your left foot. Single-arm silk reeling step 1 As you sink, raise your right arm so that your palm is facing up with your fingers pointing away from your body to the left corner. Now, step out to the right side of your body, with your feet wider than shoulders' width apart. Keep your weight on your left leg. 2. Next, start to "wipe" your right arm to your right as you transfer your weight to the right ide of your body. Continue the circle until you come back to the right side of your body once again. Now switch your arms by placing your right hand on your right waist and extending your left hand so that it is in front of your right shoulder. Repeat the same circular movement on the left side of your body. Single-arm silk reeling step 2 DOUBLE-ARM SILK REELING 1. Starting with your feet together, pivot your right foot to your right, then shift your weight to your right leg. Now, step to your left with your left leg, with your left heel touching the floor behind your left ear. Gently bring your arms out to the right side of the body as if pushing to the right. Your arms should be chest high. Double-arm silk reeling step 1 2. With your weight on your right leg, side wipe both arms to your left. As they pass your right hip, start to transfer your weight to your left leg, guiding your hands in front of your body so that they glide from right to left. Double-arm silk reeling step 2 3. Now gently glide your hands back and continue to circle them back to your original position. To move to the other side of your body, simply pivot your left foot to your left while your weight is on your right foot, and continue with another wipe. Double-arm silk reeling step 3a Double-arm silk reeling step 3b The essence of Tai Chi practice is the development of chan szu jing (silk reeling energy). Both in theory and in practice, chan szu jing can be considered a tool for refining your Tai Chi movements. As you work at developing the qualities of chan szu jing, remember that your goal is to build efficient movement, directed by your waist and powered by your legs. ## PUSH HANDS Push hands (tui shou) is one of the best examples of the concept of yin and yang in action. It is, in fact, a physical manifestation of yin and yang. When you practice push hands be sure to pay attention to the direction of your internal energy. Try to imagine where you are sending your force as you circle your arm. Be sure to coordinate the movement of your limbs with your waist, connecting your whole body to the movement. In partner push hands try to feel the direction of your opponent's force. Also try to stay aware of the constant shifting of your weight during pushing and absorbing movements. SINGLE-ARM PUSH HANDS Stand with your feet together and your arms gently at your sides. Shift your weight to your left leg, pivot your right foot out, and then transfer your weight back to your right leg. Next extend your left leg forward, stepping by placing your heel down first. Your foot should be placed forward, diagonally and to the left. Now, extend your left arm out so that it is chest high, with your fingers facing up and your palm facing in. Your right hand is placed on your hip. Gently shift your weight from your right leg to your left leg as you circle your extended hand horizontally. Continue to circle your arm by bending your elbow as you transfer your weight back, and extending it as you go forward. Repeat this five times. Single-arm push hands Then, switch to the other side of your body by pivoting your left foot out, transferring your weight onto that leg, and stepping forward with your right foot. Be sure that your right leg is not directly in front of your left leg. It should be to the right, just outside your left heel in a forward diagonal position. Repeat this five to ten times. To finish, simply step back, bringing your leg back to your original standing position. Place your arms at your sides. Remember: when you extend your arm, your palm should face forward. When you pull your arm back, your palm should face your chest. DOUBLE-ARM PUSH HANDS Start as in the previous exercise, but sweep both your arms across your torso at chest height. Your right leg should move forward at the same time that you sweep your arms across your body. Your arms should move simultaneously, in the same position. It should feel as if you're moving just one arm. PUSH HANDS WITH A PARTNER This exercise is the highlight of Tai Chi. When you do it, you learn about the absorption and redirection of qi. Your emphasis should not be on who can push the hardest—but on who can neutralize or counter their partner the best. It can also be played as a sort of game, by assigning each of you a role as either yin (absorbing) or yang (pushing). DOUBLE-PALM PUSH While facing your partner, push both of your palms against the palms of the other person, standing with your feet shoulders' width apart. This game teaches you the principles of absorbing incoming forces. The goal is to avoid falling off-balance, while sinking your hips. It's fun to try to regain your balance. You can even keep score by awarding points to the one whose feet stay in their original place. DOUBLE-PALM STRIKE While facing each other and using the same positioning as in the double-palm push, strike both of your palms against your partner's as you stand with your feet shoulders' width apart. This is a variation on the first game, but requires a different approach to absorbing the incoming force. The goal again is to sink to retain your balance. Double-palm push and strike SIDE PULLING With you and your partner facing in opposite directions, stand in a wide stance, holding each other's left hand at your left sides. Once in the stance, try to upset your partner's balance by pushing forward or pulling back. If your partner loses his or her balance, switch sides (hands) and try again. Side pulling PUSH HANDS WITH BALL Using a ball in Tai Chi games can help you develop manipulation skills. This is another great example of seeing yin/yang in action within your Tai Chi practice. This game can be played with any ball the size of a volleyball or basketball. Start by facing your partner, arm's length apart with your feet shoulders' width apart. Next, step forward with your right foot, and extend your right palm. The ball is placed between your hand and your partner's hand. Now, begin circling the ball in all possible directions without losing control. Push it back and forth and up and down until someone loses contact with the ball and it drops. Each pair can be a team—and the team who keeps the ball "in play" the longest is the winner. Push hands with ball In push hands, your elbows should stay near your hips. Another variation would be to use only your inner wrist while trying to maintain contact with the ball. This variation is much more difficult, so you should start with the palm version. A third version involves using only your forearms to rotate the ball without dropping it. Still another variation of this game is to toss the ball using just your forearms. Your partner then has to catch the ball using only his or her forearms. In this variation, you and your partner should stand about 6 feet apart. Increase the distance as you get more accurate in the game. ## SPARRING Tai Chi is usually considered a "moving meditation," but it has its roots in martial arts, and advanced practitioners study sparring techniques. Sparring practice usually starts from the base of tui shou. Traditionally, once you have perfected five increasingly sophisticated levels of push hands, you'll begin to practice sparring. Live sparring usually begins out of the free-stepping learned in push hands training, which is usually introduced at the intermediate level. Fundamental (Chen style) Tai Chi sparring is akin to upright wrestling. It then progresses to close-quarters striking before moving toward advanced combat practice with an emphasis on body and joint locking. For even more fun, develop yin and yang teams. When you notice one team getting better at their role than the other, switch roles. You'll need to practice Tai Chi for a while before you start training in sparring, but the pushing and pulling exercises in this chapter are an important part of the necessary skills. Tai Chi helps you get your mind and body working as one. You'll have: Better breathing from practicing deep breathing Better balance from working on your motor skills Better emotional health from the peace and serenity that comes from the slow, measured movements Heightened senses from the focused concentration you apply to your practice Better pain management from better body alignment and deep breathing # seven CONDITIONING To succeed in Tai Chi, it's essential to practice. However, along with practicing the movements and postures, it's important to develop your mind and body. The following conditioning exercises will help you strengthen your body and focus your mind. ## PHYSICAL CONDITIONING Tai Chi has unique physical requirements, so most teachers include strength and conditioning exercises in their classes. These exercises will help you develop a combination of balance and strength. They are crucial for strengthening your legs, which are a very important part of Tai Chi. The following exercises address different body parts. Legs, arms, and abdominal strength are all important. STANDING SINGLE-LEG BALANCE Start with your feet shoulders' width apart. Transfer your weight to your right leg, and slowly bring your left knee toward your chest. Grab your knee with your left hand and your ankle with your right hand. Pull your leg as close to your body as you can. Try to hold this position for 30 seconds. Slowly lower your leg to the shoulders' width position. Repeat with the opposite leg. Standing single-leg balance TAI CHI SQUATS: SOLO While standing in the zhang zhuang position (see page 22), try to slowly lower your body to a full squat position while maintaining erect posture. Your hands should be extended in front of you as if you're standing in the basic zhang zhuang position. Give yourself 20 seconds to squat and 20 seconds to stand back up. For fun, you can try this with a beanbag on your head. The objective is to keep your head straight and not drop the beanbag. Solo Squats TAI CHI SQUATS: WITH A PARTNER Sit facing your partner and place your feet together so that your toes are touching. Reach across and grab each other's wrists firmly. Once you both have a firm grip, try to stand slowly at the same time. When you both reach a full standing position, try to lower yourselves back to the sitting position without making a sound. This exercise should also be done with a 20-second count. Partner squats WALKING PUSH-UPS: SOLO Stand with your feet together. Squat down to a full squatting position, placing your hands on the floor in front of you. "Walk" forward on your hands to a fully stretched out position (like a push-up position). Lower your body until your chest touches the floor once. Push your body back off the floor, then "walk" back to your original squat position. Stand back up. Repeat the exercise. This time with two push-ups, then stand. Add an extra push-up for every walk-out you do. This exercise not only builds your upper body strength, but also helps you learn about weight distribution as you constantly shift your weight from one arm to the other. You can feel yin and yang working together while practicing this exercise. Walking push-ups: solo 1 Walking push-ups: solo 2 WALKING PUSH-UPS: WITH A PARTNER Start from a lying down position—on your stomach with your hands near the sides of your chest. Your partner should also be on the ground in front of you, with the top of his head facing the top of yours. Push up until your arms are fully extended, then walk back to a squat position. Stand up slowly. Next, squat down to a full squatting position placing your hands on the floor in front of you. Walk out to a fully stretched out position (the push-up position) and do one push-up. You and your partner should try to do each of these steps at the same time. Once you're both in the full push-up position, try to lift your right hand off the ground, and give your partner's right hand a quick slap. Lower yourself to the floor, walk with your hands back into the original squatting position, then stand. Repeat with your partner, adding an extra push-up for every walk-out you do. Walking push-ups: with a partner TRI-UPS Lie on your back, and extend your arms directly over your head. Raise your right knee up to your right shoulder as you bring your upper body up to meet your knee. Lie back down, then come up again, this time with your left knee to your left shoulder. The third time raise both knees to meet both shoulders, then lie back down. This counts as one tri-up. Try to do five. On the fifth one, try to balance on your buttocks, holding your legs out with your hands reaching in front of your body. Count to five, then lie back down with your arms on the floor extended back over your head. This exercise builds strength in both the upper and lower abdominal muscles. Again, you can also physically feel the yin and yang of this exercise. Tri-ups Variation For fun, gently pound on your stomach, like Tarzan or King Kong, before returning from the balance position to the lying down position. Tri-ups ## MENTAL CONDITIONING: MEDITATION Meditation is quite difficult, even for adults, especially still meditation (meditation without movement). The following meditation activities will help you focus your mind. These activities are designed both to build a mind-body connection—very important in Tai Chi—and to help you explore your imagination. GUIDED IMAGERY While standing in the zhang zhuang position, close your eyes and take an imaginary trip though your body. The key phrase here is "relax my. . . ." Start by thinking "relax my. . . ." Complete the thought with the name of a specific body part. Focus your thoughts on the body part you just named. Start from your feet, then work up to your knees, then your hips, toward your arms, shoulders, elbows, wrists, and fingers, back to your neck, then to the top of your head. Finally, just listen—focus on the sounds of wherever you are—the park, a classroom, and so on. As you listen, start taking the same trip through your body, this time backward, until you finally get all the way back to your feet. Once there, slowly open your eyes. Your trip is complete. Be sure to come out of your posture very slowly. As an alternative: Imagine that as you breathe in your breath is going to a specific part of your body. Then as you breathe out any tension in that area is carried out of you as you exhale. Start with your feet and work your way up to the top of your head. Guided imagery COUNTING Sometimes it's difficult to concentrate or focus on clearing your mind in meditation. So, instead of clearing it, why not fill it? This method will help you focus on something specific. It can be done sitting on a chair with your back upright or in the zhang zhuang position. To start, imagine a screen showing an image inside your forehead. Start with the image of the number 1, flashing in your head for one second. Follow this with the image of the number 2. Count—image by image, number by number—up to number 10. Once you hit 10, start your count again—from the number 1. Breathing is important, so try to coordinate your breaths—your inhalations and exhalations—with each count. Keep your tongue against the roof of your mouth while doing this. # eight ADVANCING IN TAI CHI Advancing in Tai Chi takes dedication and a positive attitude. It also takes a well-rounded approach to the art. For example, if you focus on just the positions, you'll probably lose some of the mental focus that complements the physical movements. Here are five main keys to remember for your Tai Chi practice. Follow these guidelines and you're sure to go far in Tai Chi. ## THE FIVE KEYS TO ADVANCING IN TAI CHI Practice As with all things in life, if you want to become good at something, you have to practice. A good method is to take a newly learned form or movement and repeat it ten times in a row, doing it slower and slower each time. Part of the power of Tai Chi comes from moving slowly so that your muscles can move very fast when you need to. But it also helps build up a physical memory, allowing your body to store the movement into its memory banks. Breathing During practice you should try to connect your breathing to the movement. This takes a lot of concentration and timing. So, don't be discouraged if you don't get it at first. It requires you to pay attention to when your body is moving and when it is stopping. You should place your tongue on the roof of your mouth and press your lips together gently. Your breathing should be deep and go in and out through your nostrils. As you breathe pay attention to the rising and settling of your stomach. Overall, when pushing or exerting, exhale. When pulling or relaxing, inhale. Repetition The purpose of repetition is not to simply repeat movement but to improve on every attempt the sequence being practiced. Each attempt gradually improves on the previous one. The trick is to pay attention to how much better each one feels and looks. Always remember that practice does not make perfect—perfect practice makes perfect. Focus Always be aware of details such as balance and connection. When you really focus on the details of what you are doing, your Tai Chi becomes more clear and precise. Try not to let your mind wander. It's very easy to get caught up in the form after you know a routine. The hard part is to keep focused on your feet and hands moving as one. This is the goal for all practitioners of Tai Chi. Understanding Think about what you're doing as you do it. Be like a scientist trying to figure out why something works. As you practice slowly you'll find that many things you never noticed start to appear. It's only when you understand the how and why that you'll really appreciate what you just learned. ## THE FIVE LEVELS OF TAI CHI Studying Tai Chi is like going from elementary school through to college. The process of acquiring knowledge is a gradual one. Without the fundamental knowledge acquired in elementary school and high school, the student cannot absorb the lessons taught in college. Learning Tai Chi is the same. New knowledge and understanding is built on what is acquired in previous stages. If you violate this principle and attempt to jump ahead prematurely, you will only lengthen your learning process. Even though traditional Chinese martial arts don't use a belt system, as in Karate, there are well-defined, clear-cut stages of progression that students must pass through. Chen Tai Chi specifically has five skill levels that are pursued by the long-term student. Summarized below, the first four levels distill classic (Chen style) Tai Chi principles applicable to all Tai Chi styles.* There are objective standards and criteria for each of these levels. The following section introduces the technical skills achieved at each of these levels. This should help you assess your current level and know what areas you need to work on to advance to the next level. Level 1: Form and Posture In the beginning, your body is not well connected. As you develop strength through good postures, you will see less angular and disconnected movement. The stance practices and games will help you in this area of development, since leg strength is crucial at this stage of your training. Over time you'll experience improvement in the direction and position of your movement and limbs, while achieving correct postures. Level 2: Body and Mind Synthesis Here, you'll have to be more mindful of your practice by carefully watching your body's movement. Thinking of where your weight is and concentrating on slow movements are good ways to get your mind and body to work as one. At this stage you'll understand your movements better, and your body will begin to make sense of what is happening during practice. You should be able to finetune your postures and understand how to express power and speed and control. Because you are stronger, you may begin to challenge yourself through some of the games in your training. You should begin to feel comfortable correcting yourself at this point. Level 3: Thinking of Circles At this level you should think of all the movements you've been practicing as circles—some big, some small. As you get better, these circles get smaller until no one can see them, but you can feel them. At this advanced level, you'll move from thinking about big circles to thinking about small circles. Your movements should be continuous and without weakness in any part of your body. Your movements should be sure and natural. Level 4: Qi Flow At this stage you will begin to feel an intrinsic heat not fueled by strenuous exercise. This internal energy is known as qi. As your movements smooth out and your postures steady, you'll begin to sense an internal warmth, especially in your hands. At this stage you should focus on relaxing your body, by hollowing your chest and slightly tucking your buttocks in, while keeping your spine straight. These more subtle physical movements will enhance the flow of qi and your sensation of it. Level 5: Balancing Hard and Soft As you progress through the first four levels, your movements will soften, but not be limp. Your Tai Chi will resemble a balance of hard and soft, fast and slow, bent and straight. When you're able to move while balancing different types of energy in your body, you Tai Chi will exhibit a balance of forces—a balance of yin and yang. ## CONCLUSION To advance in Tai Chi, adhere to the principles of yin and yang in your practice. Whenever you notice the physical changes that will occur in your Tai Chi, you'll know that you're ready for the next stage. Understanding the five levels of Tai Chi will help guide your practice and provide you with hints that will signal when you are able to progress to higher levels. ## Footnote * Howard Choy and Ahtee Chia, "Family Transmitted Chen Style Tai Chi Chuan" Inside Kung Fu , May 1992, pp. 41–43, 80. # ACKNOWLEDGMENTS Jose Figueroa would like to thank the following people for their contributions that made this book possible. > Models: > Natalie Figueroa > Cheikh Fall > > Hair and make up: > Awilda & Nadia Figueroa > Natasha Bolado > > Photography & layout: > Johnny Rodriquez (jrvisions.com) The following places and people opened their hearts and minds to the idea of Tai Chi for kids and the value of such a program. P.S. 121 / Scan New York Lewis Zuckerman and Rene Avery, for being amazing mentors in the development and growth of our youth. P.S./M.S. 20 Rita Sollow Schneyman and staff of P.S./M.S. 20 in the Bronx, for believing that the gift of Tai Chi enhances the academic and social performance of children. And to all of the children who taught me how to truly teach from the heart. Manhattan School for Children Susan Rappaport, Alysa Essenfeld, and Anna Chen, as well as the magnificent staff, parents, and kids that made this program such a success and joy, Thank you all for helping refine this unique curriculum. A SPECIAL THANKS goes out to all of my mentors and teachers. Thank you, Master Derrick Trent, my first Tai Chi teacher, for introducing me to the legacy that is Tai Chi. To Master Ren Guang Yi for his powerful inspiration, guidance, and support on the development of this complex art. And thanks to my kung fu brothers Greg Pinney and the late Dr. Joseph Cheu for your knowledge and mutual love for Tai Chi. AND A VERY SPECIAL THANKS goes to Stephan Berwick, who has inspired me to achieve goals that I never thought possible. This book could not have been done without your guidance, support and deep expertise in the martial arts. You represent the highest qualities of a Martial Artist. Both warrior and scholar, you do it from the heart, not for fame or money, but for the love of Kung Fu and the splendor it brings. Many of my lessons with you were much more about the true meaning of Kung Fu and what it truly represents to the practitioner. In your voice I can hear the passion and love for the art. Your lessons to me were much more about inspiration than anything else. What stays with me is your ability to be so humble about your development in the martial arts. For this reason I'm proud to have you as my co-writer, mentor and life friend. You are a modern Shih Fu who refrains from the title Master, but is truly worthy of it. # ABOUT THE AUTHORS JOSE FIGUEROA Jose Figueroa's more than fifteen years in Chinese martial arts includes unrivaled success as America's premier internal Chinese martial arts competitor. Founder of the Tai Chi Holistic Network, Mr. Figueroa is a senior student of Master Ren Guang Yi. He has won numerous grand championships and first place titles at every major Chinese martial arts tournament in the United States. As a national champion, he traveled to China in 1998 with Master Ren to train in Chen village and compete in the International Taiji Competition held in Wenxian, Henan, China. With a BS in Physical Education, Mr. Figueroa has designed innovative physical education curricula based solely on Chinese martial arts for the NY Board of Education, Wavehill community, Equinox health club, and the Omega Institute. From 1996-2004, under mentoring by Mr. Berwick, Mr. Figueroa emerged as one of New York's favored theater choreographers. For his pioneering work with jazz playwright Fred Ho, Mr. Figueroa won the 2000 NY Foundation for the Arts Gregory Millard fellowship for choreography, based on his use of Chinese martial arts for theater combat choreography. STEPHAN BERWICK Stephan Berwick, a winner of the 1st International Chen Style Taijiquan Association Excellence Award, has almost 30 years experience in Chinese martial arts. A widely published martial arts scholar and practitioner, Stephan is a senior disciple of Master Ren Guang Yi and is also trained by Grandmaster Chen Xiaowang. Certified at the Sha'anxi Athletic Technical Institute in Xi'an China, under Masters Zhao Changjun and Bai Wenxiang, Stephan later conducted primary research on Chen Taijiquan at Taiji's birthplace, Chenjiagou. Stephan was originally mentored by Master Bow Sim Mark and performed in Hong Kong action films with Donnie Yen under director Yuen Wo Ping. As a Chen Taiji specialist, Stephan instructs a wide variety of students — from the physically challenged to experienced defense professionals. For more information on Stephan's Washington DC-area Taiji program, please visit <http://www.truetaichi.com>. #
{ "redpajama_set_name": "RedPajamaBook" }
5,638
\section{Introduction} There has been widespread activity in braneworld gravity in recent times\cite{maartens}. The braneworld scenario of our universe opens up the fascinating possibility of the existence of large extra spatial dimensions by ensuring that the standard model fields are confined to the $3$-brane, whereas gravity could also propagate into the higher dimensional bulk. Several braneworld models have been studied in the literature, the most popular amomg them being the Arkani-Hamed, Dimopoulos and Dvali (ADD) model\cite{arkani}, and the two Randall-Sundrum (RS) models\cite{randall1,randall2}. In the ADD model\cite{arkani} there could be $n$ large compact extra dimensions with radius $l$, with $n\ge 2$ providing a possible resolution of the hierarchy problem of particle physics. The RS-I model\cite{randall1} is motivated from similar considerations and consists of two opposite tension branes, with our universe stipulated to be the negative tension brane. In the RS-II model\cite{randall2} which has been the inspiration for an extensively developed braneworld cosmology\cite{brax}, the AdS5 bulk can have infinite size. Confinement of the standard model fields is achieved by a positive tension brane, and a negative cosmological constant for the AdS5 bulk with curvature radius $l$. The resultant modification of the Newtonian gravitational potential in braneworld models\cite{garrtan,giddings2} is of the order $1/r^3$ at distances $r \ge l$ where $l$ is the scale of the extra dimension(s). The failure of current experiments using torsion pendulums and mechanical oscillators to observe departures from Newtonian gravity at small scales have set the upper limit of $l$ in the sub-milimeter region, i.e., $l \le 0.2 {\mathrm mm}$\cite{long}. It has been recently realized that the possibility of observing signatures of modified gravity in braneworld models with large extra dimensions exists in particle accelerator experiments. This is due to the fact that in braneworld gravity the fundamental $5$-dimensional Planck scale $M_5$ can be much below the $4$-dimensional Planck scale $M_4$. If the scale of the extra dimension is not much below the limit obtained from table-top experiments\cite{long}, then the corresponding $M_5$ could be as high as the order of a TeV. Thus it is possible for mini $5$-dimensional black holes to be produced in particle collisions with centre of mass energy of TeV order\cite{giddings}. The production cross-sections of various higher dimensional black holes in future accelerators such as the LHC has been studied and possible signatures in the form of properties of end products of the Hawking evaporation from these black holes have been enlisted\cite{dimopoulos}. High energy cosmic ray showers could similarly produce small higher dimensional black holes, the possible signatures from which have also been studied\cite{feng}. It hence appears that observational signatures of braneworld gravity may be a distinct possiblity in the near future from several avenues. The analysis of gravitational field equations on the brane is conceptually complicated due to the fact that the propagation of gravity into the bulk does not permit the treatment of the brane gravitational field equations as a closed form system\cite{shiromizu}. This makes the task of studying gravitational collapse on the brane rather difficult\cite{germani}. Garriga and Tanaka\cite{garrtan} first incorporated the effect of the Kaluza-Klein modes on the metric outside a spherically symmetric and static matter distribution on the brane in the form of the $1/r^3$ correction to the gravitational potential. Since then though no exact solution of the full $5$-dimensional bulk field equations have been found, various solutions representing braneworld black holes have been obtained based on different configurations for the projected $5$-dimensional Weyl tensor on the brane. Among them the one of the Reissner-Nordstrom type with negative tidal charge\cite{dadhich} originating from the Weyl term has been discussed in details in the literature. The projection onto the brane of the $5$-dimensional Schwarzschild solution given by the Myers-Perry metric\cite{myers} could effectively describe a small braneworld black hole of size $r \le l$. The properties of such black holes have been investigated in details\cite{argyres}. The mechanism of Hawking evaporation has been formulated into the brane and the bulk as well\cite{kanti}, and special features have been observed in the interaction of $5$-dimensional black holes with branes\cite{frolov1}. Braneworld black holes have been the subject of recent phenomenological interest in the arena of cosmology. Primordial black holes have potentially diverse ramifications on several eras of cosmological evolution. If braneworld black holes are formed in the early universe, their effect on subsequent dynamics could be not only varied but also widely different from those of primordial Schwarzschild black holes in standard cosmolgy. This is due to the reasons that braneworld black holes have different properties compared to ordinary $4$-dimensional black holes, and also due to the entirely modified evolution of the very early stages of the universe in the braneworld scenario\cite{maartens,brax}. In particular, the Hubble expansion is modified by the presence of a term proportional to the square of the energy density on the right hand side of the Friedmann equation, which dominates the dynamics during the very early high energy phase. Primordial braneworld black holes that could be produced due to the collapse of horizon-sized density perturbations, have a lower temperature and evaporate slowly compared to the standard $4$-dimensional black holes as a consequence of their different geometry\cite{guedens}. Furthermore, the modified braneworld cosmological evolution of the universe enables accretion from the surrounding radiation to be effective towards increasing the mass and longevity of the black holes\cite{majumdar2,clancy}. Primordial black holes could survive till many different eras in such a scenario, thereby contributing to the energy density of the universe. The Hawking evaporation products at the end of their life-cycles could have a significant bearing on several cosmolgical processes. Observational results such as the background gamma ray spectrum could hence be used to impose constaints on the initial mass spectrum of the black holes. These could be again significantly modified\cite{liddle} compared to those that have been obtained for primordial Schwarzschild black holes in standard cosmology. The prospect of survival of primordial black holes up to present times in the braneworld scenario naturally raises the question as to whether they could be a significant fraction of cold dark matter in galactic haloes, and also what role they could have in structure formation. In order to address these issues it is first important to obtain observational evidences of their existence and to determine their mass ranges. The two likely avenues for obtaining observational signatures from black holes that may be present in our galactic halo are through gravitational waves from coalescing black hole binaries, and through gravitational lensing of light sources by the black holes. It has been shown that energy exchange between neighbouring black holes that are formed in the high energy braneworld era facilitates the later formation of black hole binaries through gravitational interaction\cite{majumdar3}. It has been also argued that binaries of primordial black holes in the braneworld scenario could emit gravitational waves observable by future detectors\cite{inoue}. The study of gravitational lensing by braneworld black holes has been very recently undertaken. The deflection of light propagating on the brane due to bulk effects has been calculated\cite{frolov3}. The expressions for the weak field bending angle of light in certain braneworld metrics has been obtained\cite{kar}. Further, the various lensing quantities for one possible braneworld black hole geometry have been obtained and compared to those for the Schwarzschild black hole in the weak field limit\cite{majumdar4}. The richer phenomenology of strong field gravitational lensing such as the positions and magnifications of relativistic images is being investigated\cite{eiroa} in several braneworld geometries, and the values of observational parameters computed for a candidate lense\cite{whisker}. The plan of this review is as follows. In the next section we describe some candidate geometries for braneworld black holes. The arbitrariness of the projected bulk Weyl term on the brane is responsible for the existence of a number of possible solutions. We discuss some distinctive properties of a few of them. The aim of this article is to highlight the progress made in understanding the impact of having braneworld black holes in cosmological and astrophysical processes. In order to make this review a bit self-contained we provide a brief description of the essential features of the cosmology of the braneworld scenario in section~3. The stage is then set for a somewhat detailed analysis of the cosmological evolution of primordial braneworld black holes in section~4. Here we try to emphasize the key differences from the consequences of a population of primordial Schwarzschild black holes in standard cosmology. We begin section~5 with a skeletal description of the theoretical framework of gravitational lensing. We then present some recent results on braneworld lensing quantities and observables for some of the geometries described earlier. The underlying spirit here is the attempt to possibly discriminate between different gravity and braneworld gravity models by future observations. A summary is presented and some concluding remarks are made in section~6. \section{Spherically symmetric and static vacuum solutions on the brane} Obtaining the gravitational field due to a localysed matter distribution on the brane has been an involved and challenging task right since the inception of braneworld models. This question is the forebearer of the problem of finding the final state of gravitational collapse on the brane, which is of central importance concerning the existence of black hole solutions in the braneworld scenario. The process of gravitational collapse in the braneworld scenario is much complicated compared to general relativity because in the former case whereas matter is confined to the brane, the gravitational field can also access the extra dimension. The corrections to the Newtonian potential of a point mass $M$ at large distances due to the extra dimension were calculated to be\cite{randall1,garrtan} \begin{equation} V(r) = \frac{2M}{M_4^2 r}\biggl(1+\frac{2l^2}{3r^2}\biggr) \label{gravpot} \end{equation} The failure of current experiments to detect such corrections at sub-milimeter scales have set the upper limit on the curvature radius $l$ of the $5$-th dimension as $l \le 0.2 {\mathrm mm}$\cite{long}. The effect of Kaluza-Klein modes on the metric exterior to a static and spherically symmetric matter distribution on the brane was considered by Garriga and Tanaka\cite{garrtan}. They obtained a solution in the weak field limit given by \begin{equation} dS_4^2 = -\biggl(1-\frac{2M}{M_4^2r} + \frac{4Ml^2}{3M_4^2r^3}\biggr)dt^2 + \biggl(1 + \frac{2M}{M_4^2r} +\frac{2Ml^2}{3M_4^2r^3}\biggr)(dr^2 + r^2 d\Omega^2) \label{gartanmetric} \end{equation} Note that this solution is quite different from the Schwarzschild metric and that the gravitational potential obtained from this metric (\ref{gartanmetric}) has $1/r^3$ corrections (\ref{gravpot}) compared to the Newtonian potential. Further perturbative studies\cite{giddings2,sasaki} have also established that the first weak field correction to the Newtonian potential on the brane is proportional to $1/r^3$. The projected Weyl term $E_{\mu\nu}$ on the brane carries the imprint of Kaluza-Klein modes that could be relevant in the process of gravitational collapse. If the Weyl term vanishes, then the standard Schwarzschild solution in $4$ dimensions can be assumed as the simplest black hole solution on the brane by `stacking' it into the extra dimension. Such a vacuum solution of the $4$-dimensional Einstein equation is of the `black string' type\cite{chamblin}, and can be generalised to the case of a cosmological constant in $4$ dimensions as well\cite{anderson}. Subsequently, it was shown that the black string is unstable to large-scale perturbations\cite{gregory}. Another solution to the vacuum $4$-dimensional Einstein field equations is obtained by setting the $4$-dimensional cosmological constant to zero (as in Eq.(\ref{cosmconst}), thus obtaining a relation between the brane tension and the Ads radius (\ref{tenserad})). Since the projected Weyl tensor on the brane is divergence free for the vacuum case, one gets for static solutions a closed system of equations given by\cite{shiromizu} \begin{eqnarray} R_{\mu\nu} &=& -E_{\mu\nu}\nonumber \\ R_{\mu}^{\mu} &=& 0\nonumber \\ \nabla^{\mu}E_{\mu\nu} &=& 0 \label{fieldsoln} \end{eqnarray} Dadhich et al\cite{dadhich} have prescribed the mapping of the $4$-dimensional general relativity solution with traceless energy momentum tensor of the Einstein-Maxwell type to a vacuum braneworld solution in $5$ dimensions with the correspondence \begin{equation} \kappa^2 T_{\mu\nu} \leftrightarrow -E_{\mu\nu} \label{corres} \end{equation} An exact black hole solution to the effective field equations on the brane of the Reissner-Nordstrom type was given with the above correspondence (\ref{corres}) as\cite{dadhich} \begin{equation} dS_4^2=-\left(1-\frac{2M}{M_4^2r}+\frac{Q}{r^2}\right)dt^2\\+ \left(1-\frac{2M}{M_4^2r}+\frac{Q}{r^2}\right)^{-1}dr^2\\ +r^2(d\Omega^2) \label{tidalmetric} \end{equation} where $Q < 0$ is not the electric charge of the conventional Reissner-Nordstrom metric, but the negative `tidal charge' arising from the projection on to the brane of the gravitational field in the bulk. Since the black hole mass $M$ is the source of the bulk Weyl field, the tidal charge $Q$ could be viewed as the reflection back on the brane of the gravitational field of $M$ by the negative AdS5 bulk cosmological constant. In the limit $r < l$, it can be shown that\cite{dadhich,dadhich1} \begin{equation} Q = -\frac{Ml}{M_4^2} \label{masscharge} \end{equation} The bulk tidal charge thus strengthens the gravitational field of the black hole. It has been further argued\cite{dadhich2} that since the back reaction of the bulk onto the brane strengthens gravity on the brane, the formation of a black hole as result of gravitational collapse is favored as against a naked singularity. The metric with negative tidal charge (\ref{tidalmetric}) has a spacelike singularity and one horizon given by \begin{equation} r_h = \frac{M}{M_4^2}\Biggl(1+ \sqrt{1 - \frac{QM_4^4}{M^2}}\Biggr) \end{equation} which is larger than the Schwarzschild horizon. So the bulk effects are seen to increase the entropy and decrease the temperature of the black hole. A more general class of spherically symmetric and static solutions to the field equations with a $5$-dimensional cosmological constant can be derived by considering a general line element of the type \begin{equation} ds^2 = -A(r)dt^2 + B(r)dr^2 + r^2(d\Omega^2) \label{general} \end{equation} and relaxing the condition $A(r)=B^{-1}(r)$ used while obtaining the Schwarzschild or the Reissner-Nordstrom metrics. Casadio et al\cite{casadio} obtained two types of solutions by fixing either $A(r)$ or $B(r)$, and then demanding the correct $1/r$ asymptotic behaviour for the other in terms of the post Newtonian (PPN) parametrization. In the first case, the choice $A(r)= 1-2M/(M_4^2r)$ leads to the metric \begin{equation} ds_4^2= -(1-\frac{2M}{M_4^2r})dt^2 + \frac{1-\frac{3M}{2M_4^2r}} {(1-\frac{2M}{M_4^2r}) \left(1-\frac{M(4\beta-1)}{2M_4^2r}\right)} + r^2(d\Omega^2) \label{casad1} \end{equation} in terms of the PPN parameter $\beta$ which impacts the deflection and time delay of light\cite{will}. Note that the above metric was also derived as a possible geometry outside a star on the brane\cite{germani1}. The solution (\ref{casad1}) is of the temporal Schwarzschild form having a horizon $r_h = 2M/M_4^2$. The corresponding Hawking temperature is given by\cite{casadio} \begin{equation} T_{BH} = \frac{\sqrt{1-6(\beta-1)}}{8\pi M} \end{equation} Thus, in comparison with Schwarzschild black holes, the black hole (\ref{casad1}) will be either hotter or colder depending upon the sign of $(\beta - 1)$. Alternately, the choice for $B(r)$ of the form $B^{-1}(r) = 1- 2\gamma M/(M_4^2r)$, in terms of the PPN parameter $\gamma$, yeilds the line element \begin{equation} dS_4^2 = \frac{1}{\gamma^2}\Biggl(\gamma - 1 + \sqrt{1 - \frac{2M}{M_4^2r}} \Biggr)^2 dt^2 + \frac{dr^2}{1-\frac{2M}{M_4^2r}} + r^2(d\Omega^2) \label{casad2} \end{equation} This form of the metric represents a nonsingular wormhole, and has been discussed earlier in the literature in the context of $4$-dimensional general relativity\cite{dadhich3}. Wormhole solutions in the braneworld context have been discussed by Bronnikov et al\cite{bronnikov}. Furthermore, a class of static, spherically symmetric and non-singular braneworld solutions with horizon have been obtained\cite{dadhich4} by relaxing the vanishing scalar curvature condition (\ref{fieldsoln}) used to obtain the solutions (\ref{tidalmetric}),(\ref{casad1}) and (\ref{casad2}). Stationary solutions representing charged rotating black holes have also been found recently\cite{aliev}. The arbitrariness of the projected bulk Weyl term $E_{\mu\nu}$ and its geometric origin is at the root of the variety of braneworld black hole and wormhole solutions since both the functions $A(r)$ and $B(r)$ in Eq.(\ref{general}) have to be determined by it\cite{visser}. A specific configuration for the Weyl term with a negative equation of state has been considered and the resultant geometry with a singular horizon has been worked out to provide one more example of a possible braneworld black hole solution\cite{gregory2}. The black hole solution (\ref{tidalmetric}) exhibits a $1/r^2$ correction to the Newtonian potential on the brane in contrast to the weak field correction of $1/r^3$ as in the solution (\ref{gartanmetric}). The solution with tidal charge (\ref{tidalmetric}) is reflective of the short distance or strong gravity limit where the $1/r^2$ correction to the gravitational potential may even dominate over the $1/r$. This corresponds to the fact that at short distances, braneworld gravity is truly $5$-dimensional. For short distances $r \ll l$ it is natural to consider the $5$-dimensional Schwarzschild solution as a braneworld black hole candidate given by\cite{myers} \begin{equation} ds_5^2 = -\left(1-\frac{r_{BH}^2}{r^2}\right)dt^2+ \left(1-\frac{r_{BH}^2}{r^2}\right)^{-1}dr^2\\+r^2\left(d\Omega_3^2\right) \label{hdbh} \end{equation} where the horizon size $r_0$ is so small ($r_0 \ll l$) so that the black hole effectively ``sees'' all the spatial dimensions on the same footing. A generalysation to higher dimensions\cite{argyres,nakao} of the hoop conjecture leads to the above form of the metric as a static solution to collapsing matter on the brane. Near the event horizon, the black hole would have no way of distinguishing between the bulk dimension and the braneworld ones. Numerical simulations for scales sufficiently small compared to the AdS scale $l$ seem to also support the existence of static solutions satisfying the AdS5 boundary conditions\cite{kudoh}. The induced $4$-dimensional metric on the brane near the event horizon of the $5$-dimensional black hole (\ref{hdbh}) is obtained by integrating out the extra dimension to be \begin{equation} dS_{4}^2=-\left(1-\frac{r_{BH}^2}{r^2}\right)dt^2+\left(1-\frac{r_{BH}^2}{r^2}\right)^{-1}dr^2\\+r^2\left(d\Omega^2\right) \label{smallmetric} \end{equation} This $4$-dimensional metric is different from the standard $4$-dimensional Schwarzschild solution as it reflects the $5$-dimensional character of the strong gravitational field near the black hole horizon in the form of the $1/r^2$ gravitational potential. It is however expected that far from the event horizon the metric (\ref{smallmetric}) would approach the standard $4$-dimensional Schwarzschild form, as was shown explicitly in $2+1$ dimensional braneworld framework\cite{emparan}. The properties of small black holes with the geometry given by Eq.(\ref{smallmetric}) have been studied extensively by Argyres et al\cite{argyres}. In general these black holes have lesser temperature and a longer lifetime compared to the standard $4$-dimensional Schwarzschild black holes. Also since the $5$-dimensional Planck mass could be much lower compared to the $4$-dimensional Planck mass ($M_5 \ll M_4$), these black holes could be produced in particle accelerators\cite{giddings,dimopoulos} and cosmic ray showers\cite{feng}. They could thus provide one avenue of testing higher dimensional or braneworld physics. Of course, the consequences of a population of primordial black holes of the type (\ref{smallmetric}) are potentially rich during many different cosmological eras, and these will be described in details in section 4. The Myers-Perry black hole\cite{myers} in $5$ dimensions (\ref{hdbh}) has been also used in the context of braneworld models to investigate various effects of its interaction with the brane and its radiation onto the bulk. Frolov and Stojkovic\cite{frolov1} have shown that a small black hole attached to the brane may leave the brane as the result of a recoil due to emission of quanta into the bulk. Such an effect leads to energy loss in the brane. This opens up the possibility of observing energy non-conservation in particle colliders which may be able to produce these black holes. Radiation by rotating $5$-dimensional black holes have also been studied and certain conditions have been found when such objects could be stationary\cite{frolov2}. The interaction of $5$-dimensional black holes with the brane described as a domain wall has interesting phenomenological features. In particular, the induced geometry on the brane due to a moving bulk black hole has been derived, and an apparent violation of the energy condition observed on the brane\cite{frolov3}. Specific features of the interaction of rotating black holes with the brane have been studied\cite{frolov4}. Energy flux through the horizons of various configurations of the black hole--domain wall system have also been investigated\cite{stojkovic1}. Before concluding this section, it needs to be emphasized that although several spherically symmetric and static brane black hole solutions with contributions from the bulk gravity effects have been found\cite{dadhich,casadio,germani1,dadhich4,visser,gregory2}, and further possibilities have been elucidated as belonging to a more general class of black holes\cite{bronnikov}, none of these are obtained as exact solutions of the full $5$-dimensional bulk field equations. The analyses of gravitational collapse on the brane have typically yeilded non-static solutions\cite{germani}. Numerical simulations\cite{kudoh,numerical} have been inconclusive in this aspect. Investigations on stellar metrics\cite{visser,wiseman} on the braneworld have been performed in order to obtain further insight into the full $5$-dimensional spacetime geometry. Understanding the bulk properties have been attempted by extending some particular braneworld black hole solutions to the bulk\cite{kanti2}. The problem of finding the bulk metric which would represent a static and spherically symmetric vacuum solution with horizon on the brane remains an open one till date. \section{Braneworld cosmology} The cosmology of the RS-II model entails a modified high energy phase in the early radiation dominated era of the universe during which the right hand side of the Einstein equation contains a term that is quadratic in the brane energy momentum tensor\cite{maartens}. Other modifications include the so-called ``dark-energy'' term which is given by the projection of the bulk Weyl tensor. Transition to the standard radiation dominated era takes place when $t >> t_c \equiv l/2$. Such a modified high energy evolution has rich consequences for the physics of the early universe\cite{brax}. In particular, the inflationary scenario is altered, allowing the possibility of steep inflaton potentials to accomplish the desired features. Constraints on the duration of the brane dominated high energy phase are enforced by the necessity of conforming to the standard cosmological observational features such as nucleosynthesis and density perturbations. Let us now review briefly some of the essential features of the RS-II braneworld cosmology. The effective $4$-dimensional Einstein tensor on the brane is given by\cite{maartens} \begin{equation} G_{\mu\nu} = {8\pi\over M_4^2}\tau_{\mu\nu} + \kappa^4\Pi_{\mu\nu} - E_{\mu\nu} \end{equation} where $\tau_{\mu\nu}$ is the brane energy-momentum tensor; $\Pi_{\mu\nu}$ is quadratic in the brane EM tensor; and $E_{\mu\nu}$ is the projection of the 5-dimensional Weyl tensor. The $4$-dimensional Planck's mass $M_4$ is related to the gravitational coupling constant $\kappa$ and the AdS length $l$ by $\frac{8\pi}{M_4^2} = \frac{\kappa^2}{l}$. For the Friedmann-Robertson-Walker metric on the brane, the Friedmann equation is given by \begin{equation} H^2 = {8\pi\over 3M_4^2}\Biggl(\rho + {\rho^2\over 2\lambda} + \rho_{KK}\Biggr) + {\Lambda_4\over 3} -{k\over a^2} \end{equation} with $H$ being the Hubble constant, $\rho$ the energy density, and $k=-1,0,1$ representing open, flat and closed branes, respectively. $\rho_{KK}$ is the effective energy density coming from the bulk Weyl tensor, $\lambda \equiv 3M_5^6/4\pi M_4^2$ is the brane tension, and $\Lambda_4$ the effective $4$-dimensional cosmological constant on the brane. The AdS curvature radius $l$ is given by the bulk cosmological constant $\Lambda_5$ and $5$-dimensional Planck mass $M_5$ as $\Lambda_5 = -(3M_5^3)/(4\pi l^2)$. The induced $4$-dimensional cosmological constant $\Lambda_4$ is given by \begin{equation} \Lambda_4= 3\biggl({M_5^6\over M_4^4} - {1\over l^2}\biggr) \label{cosmconst} \end{equation} Setting $\Lambda_4 =0$, one obtains a relation between the brane tension and AdS radius given by \begin{equation} \lambda^{-1/4} = \Biggl({4\pi\over 3}\Biggr)^{1/4}\Biggl({l\over l_4}\Biggr)^{1/2} l_4 \label{tenserad} \end{equation} Nucleosynthesis and CMBR observations constrain the ``dark energy'' term $\rho_{KK}$ to be negligible compared to the radiation density $\rho$\cite{binetruy}. For much of cosmological evolution one can neglect $\rho_{KK}$, as we will do in the following analysis. Assuming a radiation dominated equation of state, the $(k=0)$ solutions for the Friedmann equation are given by \begin{equation} \rho_R = {3M_4^2\over 32\pi t(t+t_c)} \end{equation} for the energy density, and \begin{equation} a=a_0\Biggl[{t(t+t_c)\over t_0(t_0+t_c)}\Biggr]^{1/4} \end{equation} for the scale factor $a$ during the radiation dominated era, and where $t_c \equiv l/2$ effectively demarcates the brane dominated ``high energy'' era from the standard radiation dominated era. For times earlier that $t_c$, i.e., $t \le t_c$ (or $\rho \ge \lambda$), one has the non-standard high energy regime during which the radiation density and the scale factor evolve as \begin{equation} \rho_R = {3M_4^2\over 32\pi t_ct} \label{rhobrane} \end{equation} and \begin{equation} a = a_0\biggl({t\over t_0}\biggr)^{1/4} \label{abrane} \end{equation} respectively. As a consequence, the time-temperature relation also gets modified during the brane dominated high energy era, i.e., $T \propto t^{-1/4}$. On the other hand, if the high energy braneworld regime is matter dominated from a time $t_m$, the scale factor grows subsequently like \begin{equation} a= a_m\biggl({t\over t_m}\biggr)^{1/3} \end{equation} But, for times much later than $t_c$, i.e., $t >> t_c$ (or $\rho << \lambda$), one should recover back the standard radiation dominated cosmological evolution given by \begin{equation} \rho_R = {3M_4^2\over 32\pi t^2} \end{equation} and \begin{equation} a = a_0\biggl({t\over (t_0t_c)^{1/2}}\biggr)^{1/2} \end{equation} The observational success of standard big-bang nucleosynthesis constrains that the high energy era be over by the epoch of the synthesis of light elements. However, this requirement is satisfied for $l < 10^{43}l_4$, which is a much weaker bound than that obtained from experiments probing the modifications to Newtonian gravity\cite{long} in the braneworld scenario. Modified expansion in the high energy era has interesting implications for inflation\cite{lidsey}. For inflation driven by a scalar field $\phi$ with potential $V$ on the brane, the condition for accelerated expansion of the scale factor ($\ddot{a} > 0$) is satisfied when the equation of state parameter $w$ is \begin{equation} w < -\frac{1}{3}\biggl(\frac{1+2\rho/\lambda}{1+\rho/\lambda}\biggr) \end{equation} In the standard slow role approximation the Hubble rate and the scalar field evolve as \begin{eqnarray} H^2 \approx \frac{\kappa^2}{3}V\biggl(1 + \frac{V}{2\lambda}\biggr) \dot{\phi} \approx \frac{V'}{3H} \end{eqnarray} The braneworld correction term $V/2\lambda$ enhances the Hubble rate compared to standard cosmology. This is turn increases friction in the scalar field equation. Thus slow roll inflation is favored even for steep potentials\cite{cline}. The added advantage of such a scenario is that the inflaton field can play the role of quintessence\cite{copeland} leading to a late time acceleration of the universe, as well. Further modifications to the high energy expansion in the very early stages can be brought about by the inclusion of the Gauss-Bonnet term in the $5$-dimensional action\cite{charmousis}. During the very early era the Gauss-Bonnet term drives the Hubble rate as $H^2 \propto \rho^{2/3}$ (Gauss-Bonnett regime), which subsequently changes to $H^2 \propto \rho^2$ (Randall-Sundrum regime), and finally for $t > t_c$ the $H^2 \propto \rho$ (standard regime) evolution is recovered. Braneworld inflation could be accomplished by the Gauss-Bonnet term for very steep potentials such as the exponential potential\cite{tsujikawa}. \section{Cosmological evolution of black holes} This section will focus on the consequences of a population of primordial black holes on cosmology in the braneworld scenario. The modified features of cosmology during the high energy braneworld era have been highlighted in section 2. The black holes present in the early universe affect the dynamics of the radiation dominated expansion through Hawking emission and accretion of the surrounding radiation\cite{majumdar2,clancy}. These two competing processes lead to a net energy flow for a single black hole, the direction of which determines its longevity. The temperature and the rate of Hawking radiation for braneworld black holes are themselves different from those of standard $4$-dimensional Schwarzschild black holes. The evolution of the individual black holes are impacted by the accretion of radiation from the surroundings, which is more effective in the braneworld scenario compared to the standard cosmology. Since the rate of accretion is governed by the rate of background expansion which is much slower in the high energy regime, black holes that are produced earlier undergo larger growth. The actual rate of accretion and evaporation of course depends upon the particular geometry of the braneworld black hole. The black holes of interest for cosmological evolution are produced very early in the universe either due to the collapse of overdense regions resulting from inflation generated density perturbations, or due to the collision of heavy particles in the primordial plasma. Most of such black holes are expected to be formed with a size small enough ($r \le l$) for the induced $4$-dimensional Myers-Perry metric (\ref{smallmetric}) to be a good approximation to their geometry on the brane. Further, as we are interested in the processes of Hawking evaporation and the accretion of radiation, both of which are characterized by the near horizon short distance properties of the metric, it may also be pragmatic to consider the $5$-dimensional form of gravity reflected in the near horizon strong field region by the geometry in Eq.(\ref{smallmetric}). It is worth noting that the collapse of the `tidal charge' in vacuum could also give rise to the same geometry for primordial black holes\cite{dadhich} ($M=0$ in Eq.(\ref{tidalmetric})). For these reasons only this particular form (\ref{smallmetric}) of braneworld black hole geometry has been considered for the analysis of cosmological evolution\cite{guedens,majumdar2,clancy,liddle}. We first consider the evolution of a single primordial black hole which is formed with a sub-horizon mass\cite{guedens} in the high energy radiation dominated era. Since we are considering the $4$-dimensional projection of the $5$-dimensional Schwarzschild metric, i.e., \begin{equation} dS_{4}^2=-\left(1-\frac{r_{BH}^2}{r^2}\right)dt^2+\left(1-\frac{r_{BH}^2}{r^2}\right)^{-1}dr^2\\+r^2\left(d\Omega^2\right) \label{metric1} \end{equation} the horizon radius of such a black hole is proportional to the square root of its mass. The mass-radius relationship given by \begin{equation} r_{BH} = \Biggl({8\over 3\pi}\Biggr)^{1/2}\Biggl({l\over l_4}\Biggr)^{1/2} \Biggl({M\over M_4}\Biggr)^{1/2}l_4 \label{massradius} \end{equation} which is different from the ordinary $4$-dimensional Schwarzschild radius. Various properties of such black holes have been elaborated\cite{argyres}, and the process of Hawking evaporation into the bulk and also on the brane have been extensively studied\cite{kanti}. The Hawking evaporation rate, as for the case of standard black holes, is proportional to the surface area times the fourth power of temperature. The Hawking temperature is given by \begin{equation} T_{BH} = {1\over 2\pi r_{BH}} \label{tempbbh} \end{equation} Therefore, as a consequence of the mass-radius relationship (\ref{massradius}), such black holes are colder and long-lived compared to $4$-dimensional Schwarzschild black holes. A black hole formed in the early radiation dominated era of the universe accretes the surrounding radiation. In standard cosmology the effectiveness of accretion in the growth of black hole mass is restricted because of the fact that the mass of the individual black holes could grow at nearly the same rate as that of the cosmological Hubble mass $M_H$, i.e., $M_H \sim M \sim t$. Black holes produced in the radiation dominated era cannot be formed with a size much smaller than the Hubble radius, since otherwise pressure forces could hinder the collapse process. However, in the radiation dominated high energy phase of the braneworld scenario, the Hubble mass grows as $M_H \sim t^2$, whereas the growth of black hole mass due to accretion is given by $M \sim t^B$ (with $B < 2/pi$)\cite{majumdar2,clancy}. Hence, sufficient energy is available within the Hubble volume for a black hole to accrete in the high energy braneworld regime. The exact efficiency of accretion though depends upon complex physical processes involving the mean free paths of the particles comprising the radiation background and the thermal properties of the radiation in the non-trivial geometry near the event horizon\cite{clancy}. Any peculiar velocity of the black hole with respect to the cosmic frame further impacts the rate of accretion. In the absence of a universally accepted approach of determining the precise accretion rate, it is usually taken to be as equal to the product of the surface area of the black hole, the energy density of the background radiation, and an efficiency factor ranging between $0$ and $1$\cite{clancy}. Taking into account these effects of accretion and evaporation together, the rate of change of mass $\dot{M}$ of a braneworld black hole is given by \begin{equation} \label{bbhrate} \dot{M} = 4\pi r_{BH}^2\biggl( - g_{brane}\sigma T_{BH}^4 + f\rho_R\biggr) \end{equation} where $g_{brane}$ is effective number of particles that can be emitted by the black hole (we assume that the black holes can emit massless particles only and take $g_{brane} = 7.25$\cite{guedens}), $f$ is the accretion efficiency ($0 \le f \le 1$)\cite{clancy}, and $\sigma$ is the Stefan-Boltzmann constant. The black hole also evaporates into the bulk, with a rate proportional to $4\pi r_{BH}^2g_{bulk}T_{BH}^5$. However, this term is subdominant even for very small black holes\cite{guedens}, and has negligible effect on their lifetimes. Substituting the expressions for the black hole radius (Eq.(\ref{massradius})), the temperature-radius relation (\ref{tempbbh}), and the energy density of radiation (\ref{rhobrane}), the black hole rate equation (\ref{bbhrate}) in the radiation dominated high energy braneworld era can be written as\cite{majumdar2} \begin{equation} \dot{M} = -{AM_4^2 \over Mt_c} + {BM \over t} \label{bbhrate2} \end{equation} where $A$ and $B$ are dimensionless numbers given by \begin{eqnarray} \label{AandB} A &\simeq & {3\over (16)^3\pi} \\ B &\simeq & {2f\over \pi} \end{eqnarray} The exact solution for the black hole rate equation is given by\cite{majumdar2} \begin{equation} \label{exactsoln} M(t) = \Biggl[\Biggl(M_0^2 - {2AM_4^2\over 2B -1}{t_0\over t_c}\Biggr) \Biggl({t\over t_0}\Biggr)^{2B} + {2AM_4^2\over 2B -1}{t\over t_c}\Biggr]^{1/2} \end{equation} with $M_0$ being the formation mass of the black hole at time $t=t_0$. If the black hole is formed out of the collapse of horizon or sub-horizon sized density perturbations, the formation time and mass are related by\cite{guedens} \begin{equation} \label{initmasstime} {t_0\over t_4} \simeq {1\over 4}\Biggl({M_0\over M_4}\Biggr)^{1/2}\Biggl({l\over l_4}\Biggr)^{1/2} \end{equation} It has been argued\cite{majumdar2} that a black hole so formed continues to grow in size by the accretion of radiation during the high energy radiation dominated era, with its mass increasing as \begin{equation} \label{massgrowth} {M(t) \over M_0} \simeq \Biggl({t\over t_0}\Biggr)^B \end{equation} This result is however sensitive to the accretion efficiency, since a more careful analysis\cite{clancy} shows that for $f < \pi/4$, the growth due to accretion could come to a halt during the high energy regime itself. Within the context of standard cosmology, the evolution of a population of primordial black holes exchanging energy with the surrounding radiation by accretion and evaporation has been studied by several authors\cite{majumdar1,custodio}. The analysis of this problem is simplified by assuming that all the black holes are formed with an average initial mass $M_0$ at a time $t_0$ when the fraction of the total energy density in black holes is $\beta_{BH}$, and the number density of black holes is $n_{BH}(t_0)$. With these assumptions, the coupled cosmological equations for the radiation density $\rho_R(t)$, the matter density in the black holes $M(t)n_{BH}(t)$, and the scale factor $a(t)$ are integrated to give the complete cosmological evolution. Note however, that the black holes are produced with an initial mass spectrum in any realistic scenario of black hole formation in the standard cosmology\cite{starobinsky}, and it is expected that the same would be the case in the braneworld scenario as well. The effect of a mass distribution can be incorporated into the cosmological evolution in the standard scenario by introducing an additional differential equation for the distribution function and specifying further initial conditions related to it\cite{custodio}. However, since not much is known about the formation processes of black holes in braneworld cosmology, the study of only a few basic features of their cosmological evolution under simplifying assumptions has been undertaken in the literature till date. The number density of black holes $n_{BH}(t)$ scales as $a(t)^{-3}$, and thus for a radiation dominated evolution on the brane, one gets \begin{equation} (n_{BH}(t)/n_{BH}(t_0)) = (t_0/t)^{3/4} \end{equation} since $a(t) \propto t^{1/4}$. The net energy in black holes grows since accretion dominates over evaporation. The condition for the universe to remain radiation dominated (i.e., $\rho_{BH}(t) < \rho_R(t)$) at any instant $t$ can be derived to be\cite{majumdar2} \begin{equation} \label{raddomcond} \beta_{BH} < {(t_0/t)^{B+1/4} \over 1 + (t_0/t)^{B+1/4}} \end{equation} If the value of $\beta_{BH}$ exceeds the above bound, there ensues an era of matter (black hole) domination in the high energy braneworld phase. Such a phase of matter domination should definitely be over by the time of nucleosynthesis for the cosmology to be viable. Let us first describe the situation when the cosmology stays radiation dominated up to the time when brane effects are important, i.e., $t\leq t_c$. From Eq.(\ref{raddomcond}), this requires \begin{equation} \label{raddomcond2} {\beta_{BH} \over 1 -\beta_{BH}} < \Biggl({t_0 \over t_c}\Biggr)^{B + 1/4} \end{equation} Further, the black holes should remain small enough, i.e., $(M/M_4) < (3\pi/4)(t/t_4)$ for the $5$-dimensional evaporation law to be valid\cite{guedens}. These criteria can be used to put an upper bound to the average initial mass\cite{majumdar2} \begin{equation} \label{masslim} {M_0\over M_4} < \Biggl({3\pi \over 4(2\sqrt{2})^B}\Biggr)^{{2\over 2-B}} {t_c \over t_4} \end{equation} The growth of the black holes in the radiation dominated era due to accretion slows down with time, since the surrounding radiation density gets diluted. The rate of evaporation is also insignificant for a wide range of $M_0$ at this stage since the black hole masses could have grown by several orders of magnitude from their initial values. There ensues an era during which the black hole mass stays nearly constant over a period of time, as is the case for standard cosmology\cite{majumdar1}. The accretion rate is smaller for the braneworld case since the surface area is proportional to $M$ instead of $M^2$ for $4$-dimensional black holes. Moreover, the evaporation rate is ($\propto M^{-1}$) instead of $M^{-2}$. Hence, the black hole mass will stay for while near a maximum value $M_{max}$ reached at time $t_t$ before evaporation starts dominating. The expression for the lifetime $t_{end}$ of a black hole in this scenario is given by\cite{majumdar2} \begin{equation} \label{lifetime} {t_{end} \over t_4} \simeq {4\over A}(2\sqrt{2})^B\Biggl({M_0\over M_4}\Biggr)^{2-B} {t_c\over t_4}\Biggl({t_t^2 \over t_c t_4}\Biggr)^B \end{equation} It is important to note that the modified evaporation law also contributes to the increased lifetime for braneworld black holes\cite{guedens}. However, the effect of accretion is more significant, as can be seen by comparing the lifetime of a $5$-dimensional black hole in the presence of accretion to the the lifetime due to purely an altered geometry\cite{majumdar2}. Depending upon the values taken by the parameters $t_c$ and $l$, one could obtain several interesting examples of primordial black holes surviving till various cosmologically interesting eras\cite{majumdar2,clancy}. One particular choice worth mentioning is that of a black hole formed with an initial mass $M_0 = 10^8 M_4 \simeq 10^3{\rm g}$, that will survive up to the present era if one chooses $(l/l_4) \simeq 10^{30}$. Further interesting phenomena occur in the interaction of two or more neighbouring black holes mediated by the surrounding radiation in the radiation dominated high energy era. These black holes exchange energy via the processes of evaporation and accretion with the radiation bath, and through it, with each other. The evolution equation for a black hole gets modified due to the presence of one or more neighbouring black hole(s) with the addition of an extra source (sink) term due to the evaporating (accreting) neighbour(s) over the average radiation background. The black hole equation (\ref{bbhrate2}) now becomes \begin{equation} \dot{m}_i = {Bm_i\over \tilde{t}} -{1\over m_i} - g{m_i\dot{m}_j\over \tilde{t}^{1/2}} \label{evolve2} \end{equation} where $\tilde{t}=Am_4^2t/t_c$, and \begin{equation} g = {4A^{1/2}\over 3\pi}\biggl({l_4\over d_0}\biggr)^2\biggl({t_0\over t_4}\biggr)^{1/2}\biggl({t_c\over t_4}\biggr)^{1/2} \label{coupling} \end{equation} can be dubbed as the `coupling' parameter between two black holes. The physical distance between two such neighbours increases initially with the Hubble expansion. Forming out of horizon collapse, the initial mass ratio of two such black holes is on average proportional to the square of the ratio of their formation times (from Eq.(\ref{initmasstime})). By sudying the effect of the interaction term it is possible to show that an initial mass difference between two such black holes can never decrease during the radiation dominated era\cite{majumdar3}. The energy exchange between the black holes and the surrounding radiation always causes mass disequilibration between neighbours. Such mass differences facilitate the formation of binaries during the standard low energy phase via three-body gravitational interactions. A formation mechanism for primordial black hole binaries in standard cosmology has been investigated\cite{nakamura} which shows that if equal mass primordial black holes are inhomogeneously distributed in space, three-body gravitational interactions could lead to the formation of binaries. The scheme of binary formation in the braneworld scenario\cite{majumdar3} could become more effective when combined with the scheme based on spatial inhomogeneities discussed in the context of standard cosmology\cite{nakamura}. Coalescing braneworld black hole binaries may emit gravitational waves amenable for detection by the next generation detectors\cite{inoue}. Let us now describe the case of matter (black hole) domination in the high energy phase. The onset of such a matter dominated era is derived to be\cite{majumdar2} \begin{equation} \label{mattdomtime} {t_{heq}\over t_0} = \Biggl({1-\beta_{BH} \over \beta_{BH}}\Biggr)^{{4\over 4B+1}} \equiv \gamma_B \end{equation} The mass of a black hole at $t_{heq}$ is given by $M(t_{heq}/M_0) = \gamma_B^B$. For $t > t_{heq}$ the Hubble expansion is essentially driven by the black holes ($p=0$) which dominate over radiation. Since the number density of black holes scales as matter ($n_{BH}(t) \propto a^{-3}$), for $t<t_c$ one has $H \propto \rho_{BH}$, and thus the scale factor grows as \begin{equation} a(t) \sim t^{1/3} \end{equation} During this era, the radiation density $\rho_R$ is governed by the equation \begin{equation} {d \over dt}\biggl(\rho_R(t)a^4(t)\biggr) = - \dot{M}(t)n_{BH}(t)a(t) \end{equation} where the contribution from accreting black holes is comparable to the normal redshiting term ($\rho_R \sim a^{-4}$) because at this stage the black holes dominate the total energy density. Some further analysis leads to the following expression for the radiation density\cite{majumdar2} \begin{equation} \rho_R(t) \approx \gamma_B^{-1}\rho(t_0)\Biggl({t_{heq}\over t}\Biggr) - {\beta_{BH} B\over B+1/3} \gamma_B^{1/4}\rho(t_0)\Biggl({t_0\over t}\Biggr)^{1-B} \end{equation} The black hole mass grows as \begin{equation} M(t) = M_0\gamma_B^B{\rm exp}\Biggl[3B + {C\over B}\gamma_B^{B+1/4} - 3B\Biggl({t_{heq}\over t}\Biggr)^{1/3} - \gamma_B^{1/4}{C\over B}\Biggl({t\over t_0}\Biggr)^B\Biggr] \end{equation} where $C = \frac{\beta_{BH} B}{B+1/3}$. As in the case of standard cosmology, the accretion regime lasts for a brief duration in the matter dominated phase, beyond which the black hole evaporation starts to play a significant role. The universe gets reheated as $\rho_R(t)$ increases with time. The stage of black hole domination lasts up to a time $t_r$ ($\rho_R(t_r) = n_{BH}(t_r)M(t_r)$). Subsequently, radiation domination takes over once again. One can derive\cite{majumdar2} \begin{equation} {t_c \over t_r} \approx {3\over 2\delta_B\gamma_B} + {A\over 4\gamma_B^{2B}}\Biggl({M_4\over M_0}\Biggr)^2 \end{equation} with \begin{equation} \delta_B \equiv \Biggl({B+1/3 \over 1-\beta_{BH}}\Biggr)^{{3\over 3\beta_{BH} +1}} \end{equation} A stringent restriction is imposed by demanding that the standard low energy cosmology (for $t > t_c$) should emerge as radiation dominated. By requiring that the era of black hole domination be over before $t_c$, i.e., $t_r < t_c$, one gets a lower bound on $\beta_{BH}$ from Eq.(35), i.e., \begin{equation} \beta_{BH} \geq \Biggl[{4t_0\over 3t_c}\bigl(B + 1/3\bigr)^{{3\over 3B+1}}\Biggr]^{B+1/4} \end{equation} The evaporation time of black holes in this scenario has also been calculated\cite{majumdar2}. The black hole lifetime is given by \begin{equation} \label{lifetime2} {t_{end}\over t_4} \approx \Biggl({M_0\over M_4}\Biggr)^2{t_c\over t_4}\gamma_B^{2B}\end{equation} The effect of accretion is less significant in prolonging black hole lifetimes in this case, as borne out by the following example. For instance, taking the value of the AdS radius to be $l/l_4 \simeq 10^{20}$, and $\beta_{BH} = 10^{-3}$, black holes with $M_0 \simeq 10^{12}{\rm g}$ evaporate during the present era. This is only to be expected, since early matter domination results in larger Hubble expansion rate which further restricts the availability of radiation for the black holes to accrete. It thus turns out that it is possible to have primordial black holes formed in the high energy era of the braneworld scenario to survive up to several cosmologically interesting eras. Accretion of radiation during the radiation dominated era is primarily responsible for the increased longevity of braneworld black holes, though their $5$-dimensional geometry also contributes to a slower rate of Hawking evaporation. It is worth noting here that primordial black holes in certain models could also accrete the energy of a cosmological scalar field\cite{bean}. Such an effect could lead to further growth of these black holes beyond the early radiation dominated era, thus pushing up their lifetimes further. The implications for a population of primordial black holes in cosmology are diversely manifold. At any particular era, the surviving black holes would contribute a portion to the the total energy density as dark matter in the universe. If the black holes are produced with an initial mass spectrum, then one would have evaporating black holes at different eras. Hawking radiation from these evaporating black holes would on one hand produce all kinds of particles including heavier ones which could lead to baryogenesis\cite{majumdar1}, and on the other, could contribute significantly to the background photons, thus diluting the baryon to photon ratio. Observational constraints impacting different cosmological eras could be used to impose restrictions on the initial mass spectrum of braneworld black holes in a manner similar to the primordial black holes in standard cosmology. Clancy et al\cite{liddle} have shown how standard constraints are modified in the case of braneworld cosmology. To simplify the treatment, one can assume that radiation domination persists up to $t_c$, and that the accretion of radiation is possible only up to $t_c$. A black hole mass fraction $\alpha_M$ in terms of the radiation density can be defined (related to the mass fraction $\beta_{BH}$) as \begin{eqnarray} \alpha_{M_0}(t) = \frac{\rho_{BH}(t)}{\rho_T} \nonumber \\ \alpha_{M_0}(t_0) = \frac{\beta_{BH}}{1-\beta_{BH}} \end{eqnarray} Similarly, a `final' mass fraction $\alpha(t_{\mathrm{evap}})$ is defined at the end of the black hole life-cycle, since the black holes radiate most of their energy towards the very end of their lifetimes. Observational constraints on $\alpha(t_{\mathrm{evap}})$ are considered at different cosmological epochs, given by \begin{equation} \alpha (t) < L_{4D}(t) \end{equation} for standard $4$-dimensional black holes, or \begin{equation} \alpha (t) < L_{5D}(t) \end{equation} for braneworld black holes. Then these constraints are evolved backwards to constrain initial mass spectrum. Accretion in the high-energy phase leads to $\alpha (t) \propto M_{BH}(t)a(t)$. Therefore the constraints on the initial black hole mass fraction are given by \begin{equation} L_{4D}^0 \equiv \alpha_i < [\frac{a_0}{a(t)}]_{4D} L_{4D}(t) \end{equation} for standard cosmological evolution, and \begin{equation} L_{5D}^0 \equiv \alpha_i < [\frac{a_0}{a(t)}]_{5D} L_{5D}(t) \end{equation} in the braneworld scenario. Any astrophysical or cosmological process to be constrained at certain epoch is dominantly affected by the PBHs with lifetimes of that epoch. The constraints on primordial black holes in standard cosmology thus get modified to\cite{liddle} \begin{equation} \frac{L_{5D}^0}{L_{4D}^0} = \frac{L_{5D}(t_{\mathrm{evap}})}{L_{4D}(t_{\mathrm{evap}})} \biggl(\frac{l}{l_{\mathrm{min}}}\biggr)^{\frac{5-16B}{16-8B}} \end{equation} where $l_{\mathrm{min}} \propto t_{\mathrm{evap}}^{1/3}$. The departure from standard constraints is sensitive to the accretion efficiency, as is expected. A more detailed study on how the initial mass spectrum of the black holes is distorted due to braneworld accretion has been undertaken by Sendouda et al\cite{sendouda1}. The diffuse photon background emitted by the spectrum of black holes has been shown to be modified in accordance with the mass spectrum of the black holes. These results have been compared to the observed diffuse photon background to obtain bounds on the initial black hole mass fraction, the scale of the extra dimension, and the accretion efficiency by these authors. The observed number density of massive particles could also be used to obtain bounds on the initial mass fraction of the black holes, as in the case of Schwarzschild primordial black holes in standard cosmology. Further constraints on the scale of the extra dimension have been derived\cite{sendouda2} by considering the recent observation of sub-Gev galactic antiprotons\cite{orito} as originating from braneworld black holes present in our galaxy. A relevant issue is to investigate whether primordial braneworld black holes could contribute to a significant fraction of cold dark matter. If so, the direct searches of cold dark matter compact objects through gravitational lensing might be able to reveal their presence in galactic haloes. In the next section we will report on the specific features of the analysis of gravitational lensing for several braneworld black hole metrics. \section{Gravitational lensing by braneworld black holes} The braneworld scenario implies the modification of Einstein's general relativity at short distances or strong gravitational fields. The bending of light due to the gravitational potential of a massive object is one of the first predictions of the general theory of relativity. Its application in the phenomenon of gravitational lensing\cite{schneider} has potentially diverse possibilities. Gravitational lensing in the weak field limit\cite{bernard} is till date one of the most widely used tool in observational astrophysics and cosmology. On the other hand, strong field gravitational lensing, though limited in observational utility because of presently inadequate instruments, remains our ultimate scope for exploring the physics of strong gravitational fields. The general technique for analysing strong gravitational lensing for spherically symmetric metrics has been developed by Bozza\cite{bozza1,bozza} who has also formulated useful connections between observational quantities like fluxes and resolutions and the metric parameters. Strong gravitational lensing is also endowed with richer phenomenological features like relativistic images\cite{virbhadra1,virbhadra2} and retrolensing\cite{retro,eiroa2,bozza2}. It would be thus worthwhile to investigate if the results of strong gravitational lensing could be used for probing the modifications to general relativity made in braneworld geometries. Such studies are also motivated from the possibility of producing braneworld black holes in future accelerators\cite{giddings,dimopoulos}. Analysis of the trajectories of light and massive particles in various braneworld and higher dimensional metrics have been undertaken recently. Kar and Sinha\cite{kar} obtained the bending angle of light for several brane and bulk geometries. Frolov et al\cite{frolov5} found certain non-trivial features about the propagation of light in the Myers-Perry\cite{myers} metric for a $5$-dimensional black hole solution. This solution represents primordial black holes that could be produced with size $r < l$ in the early high energy era of the RS-II braneworld scenario\cite{guedens}. It was shown that such black holes could grow in size due to accretion of radiation, and consequently survive till much later stages in the evolution of the universe\cite{majumdar2}. With a suitable choice of parameters, some of these black holes could also exist in the form of coalescing binaries in galactic haloes at present times\cite{majumdar3}. The weak field limit of gravitational lensing was studied for the Myers-Perry metric and certain notable differences from the standard Schwarzschild lensing were found\cite{majumdar4}. Thereafter, Eiroa\cite{eiroa} analysed the strong field lensing and retrolensing effects for the Myers-Perry black hole. Strong field gravitational lensing in a couple of other braneworld metrics has been discussed by Whisker\cite{whisker} and some lensing observables have been computed using parameters for the galactic centre black hole. For a spherically symmetric metric \begin{equation} ds^2 = - A(r)dt^2 +B(r)dx^2 +C(r)\left(d\Omega^2\right) \label{sphersym} \end{equation} where the asymptotic forms of the functions $A(r)$ and $B(r)$ have the standard $1/r$ form, and $C(r) \to r^2$ asymptotically, the general formalism of strong field gravitational lensing has been worked out by Bozza\cite{bozza}. It is required that the equation \begin{equation} \frac{C'(r)}{C(r)}=\frac{A'(r)}{A(r)} \label{photsphere} \end{equation} admits at least one positive solution the largest of which is defined to be the photon sphere $r_{\mathrm ph}$. A photon emanating from a distant source and having an impact parameter $u$ will approach near the black hole at a minimum distance $r_0$ before emerging in a different direction (see Figure~\ref{f1}). The closest approach distance is given in terms of the impact parameter by \begin{equation} u=\sqrt{\frac{C_0}{A_0}} \label{impact} \end{equation} where the functions $C$ and $A$ are evaluated at $r_0$. The deflection angle of the photon in terms of the distance of closest approach is \begin{eqnarray} && \alpha(r_0)=I(r_0)-\pi \label{deflangle1} \\ && I(r_0)=\int\limits_{r_0}^\infty \frac{2\sqrt{B}dr}{\sqrt{C} \sqrt{\frac{C}{C_0}\frac{A_0}{A}-1}} \label{deflint1} \end{eqnarray} The weak field limit is obtained by expanding the integrand in Eq.(\ref{deflint1}) to the first order in the gravitational potential. This limit however is not a good approximation when there is a significant difference between the impact parameter $u$ and the distance of closest approach $r_0$, which occurs when $A(r_0)$ significantly differs from $1$, or $C(r_0)$ from $r_0^2$. By decreasing the impact parameter, and consequently the distance of closest approach, the deflection angle increases beyond $2\pi$ at some stage resulting in one or more photonic loops around the black hole before emergence. Further decrease of the impact parameter to a minimum value $u_m$ corresponding to the distance of closest approach $r_0 = r_{\mathrm ph}$ results in the divergence of the deflection angle (integral in Eq.(\ref{deflint1})), which means that the photon is captured by the black hole. Strong field gravitational lensing is useful for studying the deflection of light in a region starting from just beyond the photon sphere up to the distance where the weak field approximation approaches validity. In the general analysis of strong field gravitational lensing it becomes necessary to extract out the divergent part of the deflection angle. In order to do so two new variables $y$ and $z$ are defined as\cite{bozza} \begin{eqnarray} && y=A(r) \\ && z= \frac{y-y_0}{1-y_0} \label{newvar} \end{eqnarray} where $y_0=A_0$. In terms of these variables, the integral (\ref{deflint1}) in the deflection angle is given by \begin{eqnarray} && I(r_0)=\int\limits_0^1 R(z,r_0) f(z,r_0) dz \label{I z}\label{deflint2} \\ && R(z,r_0)=\frac{2\sqrt{B y}}{C A'}\left( 1-y_0 \right) \sqrt{C_0} \label{R} \\ && f(z,r_0)=\frac{1}{\sqrt{y_0- \left[ \left(1-y_0 \right) z+ y_0 \right]\frac{C_0}{C}}} \end{eqnarray} where all functions without the subscript $0$ are evaluated at $r=A^{-1} \left[\left(1-y_0 \right) z+ y_0 \right]$. The function $R(z,r_0)$ is regular for all values of $z$ and $r_0$, while $f(z,r_0)$ diverges for $z \to 0$. \begin{figure}[pb] \centerline{\psfig{file=fig1.eps,width=4.7cm}} \vspace*{8pt} \caption{Gravitationl lensing for point like mass object $M$. A light ray from the source $S$ passes the lens with an impact parameter $u$, and is deflected by an angle $\alpha$. The observer sees an image $I$ of the source at the angular position $\theta$. \label{f1}} \end{figure} The deflection angle can be written as a function of $\theta = u/D_{d}$, where $\theta$ is the angular separation of the image from the lens, and $D_{d}$ is the distance between the lense and the observer (See Figure~\ref{f1}). In order to do so, an integral \begin{equation} b_R = \int\limits_0^1 g(z,r_m) dz \label{deflint3} \end{equation} is defined, where \begin{equation} g(z,r_m)= R(z,r_m)f(z,r_m)-R(0,r_m)f_0(z,r_m) \end{equation} The expression for the deflection angle is given as a function of $\theta = u/D_{d}$ as\cite{bozza} \begin{eqnarray} && \alpha(\theta)=-\overline{a} \log \left( \frac{\theta D_{d}}{u_m} -1 \right) +\overline{b} \label{deflangle2}\\% && \overline{a}= \frac{R(0,r_m)}{2\sqrt{\beta_m}} \label{abar}\\% && \overline{b}= -\pi+b_R+\overline{a}\log{\frac{2\beta_m}{y_m}}\label{bbar} \end{eqnarray} where $R(0,r_m)$ and $b_R$ are given be Eqs.(\ref{R}) and (\ref{deflint3}), respectively, and $\beta_m$ is defined as \begin{equation} \beta_m=\frac{ C_m \left( 1- y_m \right)^2 \left(C''_m y_m-C_m A''(r_m) \right)}{2y_m^2 {C'_m}^2} \label{betam} \end{equation} In strong lensing there may exist $n$ relativistic images given by the number of times a light ray loops around the black hole. The positions of these are obtained as solutions of the lense equation given by \begin{equation} {\mathrm tan} \delta = {\mathrm tan} \theta - \frac{D_{ds}}{D_s} [{\mathrm tan} \theta + {\mathrm tan}(\alpha - \theta)] \label{lenseq} \end{equation} for specific positions of the source and the lense respective to the observer, and using the the value of the deflection angle from Eq.(\ref{deflangle2}). The relativistic images formed by light rays winding around the black hole are highly demagnified compared to the weak field images. When the source, the lense and the observer are highly aligned, it is possible to obtain the most prominent of the relativistic images\cite{virbhadra1}. Hence, the analysis of strong lensing is usually restricted to the case when both $\delta$ and $\theta$ are small\cite{bozza}, though the general case for arbitrary positions can also be analysed\cite{bozza2}. With the above restriction on the values of $\delta$ and $\theta$, a light ray will reach the observer after winding around the lense $n$ number of times only if the deflection angle $\alpha$ is very close to a multiple of $2\pi$. Substituting $\alpha = 2n\pi + \Delta \alpha_n$ in Eq.(\ref{lenseq}), one gets \begin{equation} \delta = \theta - \frac{D_{ds}}{D_s} \Delta \alpha_n \label{lenseq2} \end{equation} The position of the $n$-th relativistic image $\theta_n$ can hence be obtained as a solution of the lense equation (\ref{lenseq2}) as\cite{bozza} \begin{equation} \theta_n = \frac{u_m}{D_d}(1 + e_n) + \frac{u_m e_n \biggl(\delta - \frac{u_m(1+e_n)}{D_d}\biggr)D_s}{\overline{a}D_{ds}D_d} \label{nposit} \end{equation} where $u_m$ is the minimum impact parameter, and $e_n$ is given by \begin{equation} e_n = {\mathrm e}^{(\overline{b} -2n\pi)/\overline{a}} \end{equation} The magnification $\mu_n$ of the $n$-th relativistic image is given by\cite{bozza} \begin{equation} \mu_n = \frac{1}{(\delta/\theta)\partial \delta \partial \theta} \vert_{\theta_n} \simeq \frac{u_m^2 e_n (1+e_n)D_s}{\overline{a}\delta D_{ds}D_d^2} \label{magnif} \end{equation} The above formula for magnification is valid under the approximation of a point source. However, for an extended source the magnification at the image positon can also be derived by integrating over the luminosity profile of the source\cite{eiroa2}. It is useful to obtain the expressions for the various lensing observables in terms of the metric parameters. For $n \to \infty$ an observable $\theta_{\infty}$ can be defined\cite{bozza} representing the asymptotic position approached by a set of images. The minimum impact parameter can then be obtained as \begin{equation} u_m=D_{d} \theta_{\infty} \label{thetainfty} \end{equation} In the simplest situation where only the outermost image $\theta_1$ is resolved as a single image, while all the remaining ones are packed together at $\theta_\infty$, two lensing observables can be defined as\cite{bozza} \begin{eqnarray} {\cal S}=\theta_1-\theta_\infty \end{eqnarray} representing the separation between the first image and the others, and \begin{eqnarray} {\cal R}=\frac{\mu_1}{\sum\limits_{n=2}^\infty \mu_n} \end{eqnarray} corresponding to the ratio between the flux of the first image and the flux coming from all the other images. In terms of the deflection angle parameters $\overline{a}$ and $\overline{b}$, these observables can be written as\cite{bozza} \begin{equation} {\cal S}= \theta_\infty e^{\overline{b}/\overline{a} - 2\pi/\overline{a}} \label{obs-s} \end{equation} \begin{equation} {\cal R}=e^{2\pi/\overline{a}} \label{obs-r} \end{equation} The above equations (\ref{obs-s}) and (\ref{obs-r}) can be inverted to express $\overline{a}$ and $\overline{b}$ in terms of the image separation ${\cal S}$ and the flux ratio ${\cal R}$. Therefore the knowledge of these two observables can be used to reconstruct the deflection angle given by Eq.(\ref{deflangle2}). The aim of strong field gravitational lensing is to detect the relativistic images corresponding to specific lensing candidates and measure their separations and flux ratios. Once this is accomplished, the observed data could be compared with the theoretical coefficients obtained using various metrics. A precise set of observational data for strong gravitational lensing, if obtained, could therefore be able discriminate between different models of gravity. In the braneworld scenario the computation of the above parameters has been performed for two metrics\cite{whisker} taking the black hole at the centre of our galaxy as a potential candidate. We will now consider examples of the various lensing quantities defined above for some of the possible braneworld black hole geometries discussed in section 2. It is instructive to compare the braneworld lensing quantities with the standard Schwarzschild ones which for strong gravitational lensing are given as follows\cite{bozza}. Choosing the Schwarzschild radius $r_s = 2M/M_4^2$ as the unit of distance, the photon sphere is given by \begin{equation} r_{\mathrm ph} = \frac{3}{2} \label{phot} \end{equation} for Schwarzschild lensing. The corresponding minimum impact parameter is \begin{equation} u_m = \frac{3\sqrt{3}}{2} \label{minimp} \end{equation} The coefficients $\overline{a}$ and $\overline{b}$ defined in Eqs.(\ref{abar}) and (\ref{bbar}) are given by \begin{eqnarray} \overline{a} &=& 1 \\ \overline{b} &=& -\pi + 2 {\mathrm log}[6(2-\sqrt{3})] + {\mathrm log}[6] \end{eqnarray} The deflection angle (\ref{deflangle2}) is obtained in terms of the parameters $\overline{a}$ and $\overline{b}$ to be\cite{bozza} \begin{equation} \alpha(\theta) = -{\mathrm log}\biggl(\frac{2\theta D_d}{3\sqrt{3}}-1\biggr) + {\mathrm log}[216(7- 4\sqrt{3})] - \pi \end{equation} In the weak field limit, the expression for the bending angle is given by \begin{equation} \alpha_{\mathrm weak} = \frac{4M}{M_4^2 r_0} \end{equation} For the analysis of gravitational lensing by braneworld metrics, let us first consider the Garriga-Tanaka weak field solution\cite{garrtan} given by Eq.({\ref{gartanmetric}) in isotropic coordinates. The metric in terms of the standard coordinates can be written as \begin{equation} ds_4^2=-\left(1-\frac{2M}{M_4^2r}+\frac{4Ml^2}{3M_4^2r^3}\right)dt^2+ \left[\frac{1+\frac{M}{M_4^2r}-\frac{Ml^2}{3M_4^2r^3}}{1+\frac{2M}{M_4^2r} +\frac{2Ml^2}{3M_4^2r^3}}\right]^{-2}dr^2 +r^2(d\Omega^2) \label{gartanmetric2} \end{equation} The formal expression for the radius of the photon sphere $r_{\mathrm ph}$ can be obtained as a function of $M$ and $l$ using Eq.(\ref{photsphere}) to be \begin{eqnarray} r_{\mathrm ph} = \frac{r_s}{2} &&+\frac{3^{1/3}r_s^2}{2(3r_s^3-20l^2r_s+2\sqrt{10} \sqrt{10l^4r_s^2-3l^2r_s^4})^{1/3}}\nonumber\\ &&+\frac{(3r_s^3-20l^2r_s+2\sqrt{10}\sqrt{10l^4r_s^2-3l^2r_s^4})^{1/3}}{2 3^{1/3}} \label{garrphot} \end{eqnarray} where $r_s = 2M/M_4^2$. However, it can be seen from Eq.(\ref{garrphot}) that no real solution for $r_{\mathrm ph}$ exists for admissible values of $l$ and $r_s$. This is to be expected since the metric ({\ref{gartanmetric}) represents a weak field solution. The weak field limit of the bending angle was obtained to be\cite{kar} \begin{equation} \alpha_{\mathrm weak} = \frac{4M}{M_4^2 \tilde{r}_0} + \frac{4Ml^2}{M_4^2\tilde{r}_0^3} \end{equation} where $\tilde{r}_0$ in this case is the isotropic coordinate equivalent of the distance of closest approach $r_0$ in standard coordinates. The analysis of strong field gravitational lensing can however be performed in other braneworld geometries. For example, let us consider lensing in the metric with tidal charge (\ref{tidalmetric}) given by\cite{dadhich} \begin{equation} dS_4^2=-\left(1-\frac{2M}{M_4^2r}+\frac{Q}{r^2}\right)dt^2\\+ \left(1-\frac{2M}{M_4^2r}+\frac{Q}{r^2}\right)^{-1}dr^2\\ +r^2(d\Omega^2) \label{tidalmetric-2} \end{equation} which resembles the Reissner-Nordstrom metric, but with $Q < 0$ in the braneworld context. Again using units of distance $2M/M_4^2$, it is straightforward to obtain the expressions for the photon sphere and the minimum impact parameter given by\cite{bozza} \begin{eqnarray} r_{ph} &=& \frac{\left(3+\sqrt{9-32Q}\right)}{4} \\ u_{m} &=& \frac{\left(3+\sqrt{9-32Q}\right)^2}{4\sqrt{2}\sqrt{3-8Q+\sqrt{9-32Q}}} \end{eqnarray} The coefficients $\overline{a}$ and $\overline{b}$ in the deflection angle are given by\cite{bozza} \begin{eqnarray} \overline{a} = \frac{r_{\mathrm ph}\sqrt{r_{\mathrm ph} - 2Q}}{\sqrt{(3 - r_{\mathrm ph})r_{\mathrm ph}^2 - 9Q r_{\mathrm ph} + 8Q^2}}\\ \overline{b} = -\pi + 2 {\mathrm log}[6(2-\sqrt{3})] + \frac{8Q \Bigl(\sqrt{3} - 4 + {\mathrm log}[6(2-\sqrt{3})]\Bigr)}{9} \nonumber \\ + \frac{(r_{\mathrm ph} - Q)^2 [(3- r_{\mathrm ph})r_{\mathrm ph}^2 - 9Q r_{\mathrm ph} + 8Q^2] \overline{a}{\mathrm log}[2]}{(r_{\mathrm ph}- 2Q)^3(r_{\mathrm ph}^2 - r_{\mathrm ph} + Q)} \end{eqnarray} In terms of the above coefficients one obtains the complete expression for the deflection angle using Eq.(\ref{deflangle2}). The weak field limit of the bending angle was derived to be\cite{kar} \begin{equation} \alpha_{\mathrm weak} = \biggl(\frac{1}{M_4^2} - \frac{3\pi Q}{16M r_0} \biggr)\frac{4M}{r_0} \end{equation} Note that the bending angle is always positive because of negative tidal charge $Q$ unlike the electric charge of the Reissner-Noredstrom metric. We next consider the braneworld solution (\ref{casad1}) in terms of the PPN parameter $\beta$ given by\cite{casadio} \begin{equation} ds_4^2= -(1-\frac{2M}{M_4^2r})dt^2 + \frac{1-\frac{3M}{2M_4^2r}} {(1-\frac{2M}{M_4^2r}) \left(1-\frac{M(4\beta-1)}{2M_4^2r}\right)} + r^2(d\Omega^2) \label{casad1-2} \end{equation} For the above metric the expressions for the radius of the photon sphere $r_{\mathrm ph}$ and the minimum impact parameter are of course similar to those for Schwarzschild lensing given by Eqs.(\ref{phot}) and (\ref{minimp}), as is easy to see using Eqs.(\ref{sphersym}), (\ref{photsphere}) and (\ref{impact}). The expression for the deflection angle can be derived in terms of the coefficients $\overline{a}$ and $\overline{b}$ which for the metric (\ref{casad1-2}) are given by (setting the unit of distance as $2M/M_4^2$) \begin{equation} \overline{a} = \frac{\sqrt{3}}{\sqrt{6-(4\beta-1)}} \label{casadbend1} \end{equation} \begin{equation} \overline{b} = -\pi + \frac{2\sqrt{3}}{\sqrt{6-(4\beta-1)}}{\mathrm log}[6(2-\sqrt{3})] + \frac{\sqrt{3}}{\sqrt{6-(4\beta-1)}}{\mathrm log}[6] \label{casadbend2} \end{equation} Kar and Sinha\cite{kar} obtained the weak field limit of the bending angle for the metric (\ref{casad1-2}) to be \begin{equation} \alpha_{\mathrm weak} = \frac{2M(1+\beta)}{r_0} \label{casadweak} \end{equation} Note that the standard Schwarzschild expressions are recovered in Eqs.(\ref{casadbend1}), (\ref{casadbend2}) and (\ref{casadweak}), as should be, for $\beta =1$. The lensing observables ${\cal S}$ and ${\cal R}$ corresponding to the ratio between the flux of the first image and the flux coming from all the other images as defined in Eqs.(\ref{obs-s}) and (\ref{obs-r}) respectively, can be computed using the expressions for $u_m$, $\overline{a}$, and $\overline{b}$ for particular geometries. One can obtain the magnitudes of ${\cal S}$ and ${\cal R}$ for particular lensing candidates using the known values for their masses and distances. For the black hole located at the centre of our galaxy at a distance of $D_d = 8.5$kpc and with mass $M = 2.8 \times 10^6M_{\odot}$, the position of relativistic images in Schwarzschild strong lensing was first computed by Virbhadra and Ellis\cite{virbhadra1}, and the observable parameters ${\cal S}$ and ${\cal R}$ were computed by Bozza\cite{bozza}. Whisker\cite{whisker} has computed using the above black hole as a candidate lense the values for these observables for two possible braneworld gemetries given by the metric (\ref{tidalmetric-2}) with tidal charge\cite{dadhich} and another solution\cite{gregory2}. \begin{table}[ph] \tbl{Estimates for the strong field lensing angle coefficients and observables for the black hole at the center of our galaxy with mass $M=2.8\times10^{6}M_{\odot}$ and $D_d = 8.5$ kpc, for standard Schwarzschild and two different braneworld geometries. ($r_m = 2.5 {\mathrm log}{\cal R}$.)} {\begin{tabular}{|l|l|l|l|l|l|l|l|} \hline {Observables}& {Schwarzschild} & \multicolumn{3}{|l|} {Brane metric} & \multicolumn{2}{|l|} {Brane metric}\\ $\>$ &{metric} &\multicolumn{3}{|l|}{with tidal charge Q} & \multicolumn{2}{|l|}{with PPN parameter $\beta$}\\ \cline{3-7} $\>$ & $\>$ & $Q=-0.1$ & $Q=-0.2$ & $Q=-0.3$ & $\beta=1+ 10^{-4}$ & $\beta=1- 10^{-4}$\\ \hline $\theta_{\infty} (\mu\phantom{x}\textrm{arc sec})$&16.87 & 17.87 & 18.92 & 19.65 & 16.87 & 16.87\\ \hline ${\cal S}(\mu\phantom{x}\textrm{arc sec})$& 0.0211 & 0.0142 & 0.0102 & 0.097 & 0.02115 & 0.01923\\ \hline $r_{m} (\textrm {magnitudes})$& 6.82 & 7.02 & 7.2 & 7.37 & 6.818 & 6.887\\ \hline $u_m/r_s $& 2.6 & 2.75 & 2.9 & 3 & 2.6 & 2.6\\ \hline $\overline{a}$& 1 & 0.9708 & 0.938 & 0.925 & 1.00006 & 0.9999\\ \hline $\overline{b}$& -0.4002 & -0.612 & -0.747 & -0.819 & -0.402 & -0.429\\ \hline \end{tabular} \label{ta1}} \end{table} In Table~\ref{ta1} we display the values of these lensing quantities $\theta_{\infty}, {\cal S}, {\cal R}, u_m/r_s, \overline{a}$, and $\overline{b}$ first for Schwarzschild lensing. We compare the values of the lensing observables with those obtained for lensing with two braneworld geometries, i.e., the metric (\ref{tidalmetric-2}) with tidal charge\cite{dadhich}, and the metric (\ref{casad1-2}) with the PPN parameter $\beta$\cite{casadio}. We choose three negative values of the tidal charge $Q$. The two values of the parameter $\beta$ on either side of the Schwarzschild value ($\beta = 1$) are chosen to maintain observational compatibility with the solar system tests of the Nortvedt effect\cite{will}. The measurement of the observable $\theta_{\infty}$ involves a microsecond resolution which should in principle be attainable by the very long baseline interferometry projects such as MAXIM\cite{maxim}. Though the actual identification of faint relativistic images would be extremely difficult in practice due to the inherent disturbances\cite{virbhadra1}, an accurate measurement of $\theta_{\infty}$ would be able to distinguish the Schwarzschild geometry from the braneworld RN-type one. However, to unambiguously determine the exact nature of the black hole through the lensing angle coefficients $\overline{a}$ and $\overline{b}$, one has to measure the observables ${\cal S}$ and ${\cal R}$. Since this involves the resolution of two faint images separated by $\sim 0.02 \mu$ arc sec, such an observation would need a leap of technological development over the present astronomical facilities\cite{whisker,bozza}. \begin{figure}[pb] \centerline{\psfig{file=fig2.eps,width=5.7cm}} \vspace*{8pt} \caption{The photon sphere $r_{\mathrm ph}$ (in units of the Schwarzschild radius $r_s$) is plotted versus mass for the Schwarzschild black hole~(I), and the Myers-Perry braneworld black holes with $l= 10^{30}l_4$~(II), and $l= 10^{20}l_4$~(III). \label{f2}} \end{figure} Let us finally return to the Myers-Perry metric (\ref{smallmetric}) which is obtained from the $5$-dimensional Schwarzschild metric\cite{myers} and could be relevant for the geometry near the horizon of a small $r \le l$ braneworld black hole. The cosmological evolution of such black holes formed in the early universe having the metric \begin{equation} dS_{4}^2=-\left(1-\frac{r_{BH}^2}{r^2}\right)dt^2+\left(1-\frac{r_{BH}^2}{r^2}\right)^{-1}dr^2\\+r^2\left(d\Omega^2\right) \label{metric11} \end{equation} has been described in details in section 4. It is possible for such black holes to survive through the intermediate eras of the universe\cite{majumdar2,clancy}. It is feasible that such black holes with masses in the sub-lunar range exist as dark matter in galactic haloes\cite{inoue,majumdar3}. The photon sphere for a Myers-Perry braneworld black hole in units of $2M/M_4^2$ is given by \begin{equation} r_{\mathrm ph} = \frac{2}{\sqrt{3\pi}}\sqrt{\frac{l}{l_4}}\sqrt{\frac{M_4}{M}} \end{equation} and is plotted as a function of black hole mass in Figure~\ref{f2}. The minimum impact parameter is \begin{equation} u_m = \sqrt{2}r_{\mathrm ph} \end{equation} The coefficients $\overline{a}$ and $\overline{b}$ given by\cite{eiroa} \begin{eqnarray} \overline{a} = \frac{1}{\sqrt{2}} \\ \overline{b} = -\pi + \sqrt{2} {\mathrm log}(4\sqrt{2}) \end{eqnarray} can be then be used to construct the strong field deflection angle. Eiroa\cite{eiroa} has calculated the positions and magnifications of the relativistic images and compared them with those of Schwarzschild black holes. Gravitational lensing in the weak field limit by the black hole (\ref{metric11}) has been worked out\cite{majumdar4}. When the impact parameter $u$ exceeds a few times the horizon radius given by Eq.(\ref{massradius}), but is still lesser than $l$ ($u \le l$), the application of weak field lensing could be relevant. The weak field limit of the deflection angle was calculated to be\cite{majumdar4} \begin{equation} \alpha_{\mathrm weak} = \frac{2Mll_4}{M_4r_0^2} \end{equation} In order to satisfy the requirement that $u \le l$, and also obtain non-negligible magnification at the image location, the mass of the black hole should be such that\cite{majumdar4} \begin{equation} \frac{M}{M_4} \le \frac{l}{l_4} \label{masslim12} \end{equation} Using the maximum allowed value for $l$ by present experiments\cite{long}, one then obtains that $M \le 10^{-8}M_{\odot}$. So weak field gravitational lensing for such a black hole could be applicable only for masses in the sub-lunar range. As discussed above the braneworld scenario is conducive to the existence of primordial black holes in the sublunar mass range\cite{majumdar2,clancy}. If such black holes exist in our galactic halo, then the magnification of their weak field images turns out to be diminished compared to the standard Schwarzschild black holes of similar mass\cite{majumdar4}. \begin{table}[ph] \tbl{Estimates for the strong field lensing angle coefficients and observables for a black hole in the galactic halo with mass $M=10^{25}$ gm and $D_d = 10^{22}$ cm, for the standard Schwarzschild and the Myers-Perry geometry with two different values of $l$. ($r_m = 2.5 {\mathrm log}{\cal R}$.)} {\begin{tabular}{|l|l|l|l|l|l|l|l|} \hline {Observables}& \multicolumn{2}{|l|} {Myers-Perry metric} & {Schwarzschild}\\ $\>$ &\multicolumn{2}{|l|} {with extra dimention $l$} &{metric}\\ \cline{2-3} $\>$ & $l=10^{30}l_4$ & $l=10^{20}l_4$ & $\>$\\ \hline $\theta_{\infty}(\mu\phantom{x}\textrm{arc sec})$&$0.039\times 10^{-12}$ &$0.0123\times10^{-16}$&$0.03\times 10^{-12}$\\ \hline ${\cal S}(\mu\phantom{x}\textrm{arc sec})$&$0.2042\times 10^{-17}$&$0.0644\times 10^{-21}$&$.0375\times 10^{-15}$\\ \hline $r_{m} (\textrm {magnitudes})$&9.64&9.64&6.82\\ \hline $u_m/r_s$&3.37&$1.065\times 10^{-4}$&2.6\\ \hline $\overline{a}$&0.707&0.707&1\\ \hline $\overline{b}$&-0.689&-0.689&-0.4002\\ \hline \end{tabular} \label{ta2}} \end{table} Before concluding this section we present a comparitive study of lensing in the Myers-Perry geometry (\ref{metric11}) with that in the standard Schwarzschild geometry. The respective photon spheres are plotted as a function of mass in Figure~\ref{f2} choosing two values of $l$ for the braneworld case. The mass range chosen is the one which could be relevant for Myers-Perry braneworld black holes (\ref{metric11}) that have been conjectured to exist in the form of binaries in the galactic halo\cite{majumdar3,inoue}. Choosing a specific value of mass in the above range, the strong field lensing quantities have been evaluated separately for the above two metrics. The strong lensing angle coefficients $\overline{a}$ and $\overline{b}$, and the observables $\theta_{\infty}$, ${\cal R}$ and ${\cal S}$ are displayed in Table~\ref{ta2}. It is interesting to note that a larger value for the scale of the extra dimension $l$ takes the values of the observables $\theta_{\infty}$ and ${\cal S}$ for the braneworld case closer to those of Schwarzschild lensing. This happens because the size of the braneworld black hole which is much smaller compared to the Schwarzschild black hole of same mass increases with $l$ for fixed mass~(\ref{massradius}). Of course, the values of these lensing quantities are far beyond the possiblity of verification by present observational capabilities. But the comparison of these numbers provides an in principle method of discriminating between different gravity models. \section{Summary and Conclusions} Braneworld black holes are the potential testing arenas of a rich theoretical structure associated with modified braneworld gravity and extra dimensions. If the fundamental scale of gravity is much lower than the $4$-dimensional Planck scale, then it is feasible for higher dimensional black holes to be formed in low energy processes. A lot of the present interest in the prospect of obtaining observable signatures of braneworld gravity is via the properties of the evaporation products of mini black holes produced either in particle collisions inside accelerators\cite{giddings,dimopoulos}, or in high energy cosmic ray showers\cite{feng}. Another issue of interest is regarding primordial black holes which may be formed through the collapse of overdense regions in the braneworld high energy phase of the early universe. Such black holes could have diverse cosmological implications. Further, even larger black holes could form by gravitational collapse of matter on the brane. Braneworld black holes have entirely different metrics compared to $4$-dimensional black holes, and carry the signature of the extra dimension in their geometries. The focus of this review has been to discuss the cosmological consequences of primordial braneworld black holes, and also to analyse the features associated with gravitational lensing in several braneworld black hole metrics. The physics of gravitational collapse on the brane is not yet understood in totality\cite{germani}. A serious conceptual difficulty arises due to the fact that the gravitational field equations are rendered incomplete on the brane by the effect of the bulk gravitational modes. No complete solution to the bulk gravitational field equations representing a spherically symmetric vacuum black hole metric on the brane has been found till date. Projections of the $5$-dimensional Weyl tensor have been used in the $4$-dimensional brane field equations in various configurations to obtain possible braneworld black hole metrics. In section~2 we have discussed some possible candidate geometries for braneworld black holes. These metrics are in general rather different from the standard Schwarzschild metric, and incorporate modifications of the standard $1/r$ gravitational potential in braneworld gravity\cite{garrtan}. The braneworld black hole solutions contain interesting features related to the existence of horizons, and the modified black hole entropy and temperature. The Reissner-Nordstrom type solution\cite{dadhich} contains a negative tidal charge which can be viewed as the reflection of the black hole mass by the bulk with negative cosmological constant. Other solutions discussed include the one that incorporates the contribution of the projected Weyl tensor in terms of the post-Newtonian parameters\cite{casadio}. The $5$-dimensional character of braneworld gravity at small scales motivates the consideration of the induced Myers-Perry metric\cite{myers} as a braneworld black hole candidate for black holes with radius $r < l$. Physically this corresponds to the fact that a small black hole is unable to distinguish between the bulk dimension and our $3$ brane dimensions. The modified properties\cite{argyres} of such black holes within the braneworld context have been discussed\cite{emparan}, and the different subtelties regarding their Hawking evaporation on the brane and into the bulk have been analysed earler\cite{kanti}. Special features of the interaction of $5$-dimensional black holes with the brane have been revealed in the literature\cite{frolov1,frolov3,frolov2,frolov4,frolov5}. In this review we have highlighted the relevance of using the Myers-Perry geometry for the analysis of the processes of Hawking evaporation and the accretion of radiation in the early cosmological evolution of primordial black holes in the braneworld scenario. In section~3 we have provided a short summary of the main features of the cosmology in the RS-II model\cite{randall2}. Essentially the cosmology in the early braneworld era (high energy brane regime) is altered by the presence of a term in the Friedmann equation on the brane that is quadratic in the energy momentum tensor. This modifies the Hubble expansion at times earlier than the time $t_c \equiv l/2$. Among other consequences, such a scenario allows for the possibility of inflation with steep potentials that could be excluded in the standard scenario. Various features of the cosmology of braneworld models have been analysed in details in some recent reviews\cite{maartens,brax}. In the context of the present review, the most significant implication of the modified high energy behaviour is that the expansion of the Hubble volume makes it feasible for accretion by the primordial black holes of the surrounding radiation to take place. This feature is primarily responsible for the mass growth and prolonged survival\cite{majumdar2,clancy} of the braneworld black holes. The standard cosmological expansion of the universe is recovered at later times, in order for the observationally established processes such as nucleosynthesis to work out. The description of the braneworld cosmological evolution with primordial black holes has been provided in section~4. The modified mass-radius relationship for the induced $4$-dimensional Myers-Perry black holes leads to slower evaporation and longer lifetimes compared to standard Schwarzschild black holes\cite{guedens}. The detailed properties of evaporation of such black holes are described in a recent review\cite{kanti}. The accretion of radiation in the high energy phase could be effective because the growth of black hole mass is smaller than the growth of the mass in the Hubble volume in braneworld evolution\cite{majumdar2,clancy}. Thus, a large fraction of primordial black holes may survive up to much later eras. The decaying black holes affect several cosmological processes at different eras, and hence the observational abundance of different species, for example, the background high enery photons\cite{sendouda1}, could be used to put constraints on the initial mass spectrum of the black holes. These contraints have to be evaluated considering the altered cosmological evolution in the braneworld scenario, and therefore could be significantly modified compared to similar constraints in standard cosmology\cite{liddle}. The exchange of energy of the black holes with the surrounding radiation in the high energy era leads to mass disequilibration of neighbouring black holes\cite{majumdar3}. As a consequence, binaries could be formed later through $3$-body gravitational interactions. Such binaries have masses in the sub-lunar range, and gravitational waves emitted during their coalescence come in the detectable range\cite{inoue}. Braneworld black holes exisiting at present times offer another scope of detection, viz. through the gravitational lensing of light sources by them. In section~5 we have first briefly reviewed the framework of gravitational lensing using which both weak field and strong field lensing can be handled in a unified manner\cite{bozza}. The expressions for the lensing quantities in different braneworld metrics have been presented analysing the crucial differences from standard Schwarzschild lensing. Myers-Perry black holes existing in the galactic halo in certain mass ranges would be difficult to detect via weak field lensing due to reduced magnification compared to Schwarzschild black holes\cite{majumdar4}. Strong field gravitational lensing through its prominent features such as the production of relativistic images and retrolensing offers the ultimate scope of discriminating between different gravity models\cite{eiroa}. Using the black hole at the centre of our galaxy as a candidate lense, it is possible to compute the theoretical values of several lensing observables in different geometries\cite{whisker}. We have presented the computed lensing observables for a few braneworld metrics. A comparison with the lensing observables of the Schwarzschild metric shows that it might be possible to distinguish the braneworld black hole with tidal charge\cite{dadhich} from the Schwarzschild black hole by the possible measurement of one of these observables in the near future. The physics of extra dimensions in no longer a field of mere theoretical constructs. There is currently a lot of ongoing activity on finding observational signatures of braneworld black holes through different physical mechanisms. In addition to the motion of light rays, the properties of massive orbiting particles in these geometries are also expected to exhibit interesting features. In particular, it has been shown that no stable circular orbits exist in the equatorial plane of the Myers-Perry metric\cite{frolov5}. The position of the innermost stable orbit shifts for braneworld black holes as is the case with the photon sphere, and this could lead to observable modifications of the properties of the accretion disks around black holes. Gravity wave spectroscopy offers another possibility of observing braneworld gravity. It has been pointed out using the black string between two branes as a model of a braneworld black hole, that the massive bulk gravity modes could lead to detectable spectroscopic signatures that are absent in the normal $4$-dimensional gravitational waves\cite{seahra}. Furthermore, the possibility of even distingushing between different braneworld models such as the ADD model\cite{arkani} and the RS model\cite{randall2} via the production of their respective black holes in accelerators has been argued\cite{stojkovic2}. Much excitement exists indeed in the prospects of detecting signatures of extra dimensions. The range of cosmological and astrophysical implications of extra dimensions, braneworld gravity, and black holes have only started to being investigated.
{ "redpajama_set_name": "RedPajamaArXiv" }
5,114
Walter Hayle Walshe (1812–1892) was an Irish physician, a pioneer in the study of cancer with his discovery that malignant cells can be recognised under a microscope. Life The son of William Walshe, a barrister, he was born in Dublin on 19 March 1812. He studied at Trinity College, Dublin, entering in 1827, but did not take a degree. In 1830 he went to live in Paris, and there initially studied oriental languages, but in 1832 began medicine. He became acquainted in 1834 with the anatomist Pierre Charles Alexandre Louis. Oliver Wendell Holmes, Sr. and François Louis Isidore Valleix, the French physician, were his fellow-students, and continued his friends throughout life. Walshe went to Edinburgh in 1835, there graduated M.D. in 1836, and in 1838 began practice in London. He was elected as professor of morbid anatomy at University College, London, in 1841, lecturing on morbid anatomy till 1846, when he was elected Holme professor of clinical medicine and physician to University College Hospital. In 1848 he was appointed professor of the principles and practice of medicine, a post which he held till 1862. In his lectures he used numerical statements of fact and case analysis; Sir William Jenner praised his clarity. His pupils maintained that he was the first accurately to describe the anatomy of movable kidney and epidural haematoma, and to teach that patients with aortic regurgitation are likely to die suddenly. In 1852 Walshe was elected a fellow of the College of Physicians of London. He first lived in Upper Charlotte Street, then in Queen Anne Street, and had for at time a considerable practice as a physician. Sir Andrew Clark commented that he had little ability in the treatment of disease. Walshe died in London on 14 December 1892. In 1868 he married Caroline Ellen Baker, and had one son. Works In 1843 Walshe published The Physical Diagnosis of Diseases of the Lungs, later superseded by the Auscultation and Percussion of Samuel Gee, one of his pupils. He translated P. C. A. Louis's Recherches sur la Phthisie into English in 1844. In 1846 he published a large volume On the Nature and Treatment of Cancer, a collection of the then existing knowledge of neoplasms and hypotheses as to their origin. In 1851 he published A Practical Treatise on Diseases of the Lungs and Heart, of which several editions appeared, and part of which was enlarged into A Practical Treatise on the Diseases of the Heart and Great Vessels. A complete list of his medical books is to be found in vol. xvi. of the Index Catalogue of the Library of the Surgeon-general's Office, U. S. Army. Walshe wrote in 1839 and 1840 numerous pathological articles in William Birmingham Costello's Cyclopædia of Practical Surgery. He made contributions to medical journals and transactions, and in 1885 wrote the Colloquial Linguistic Faculty and its Physiological Groundwork, of which a second edition appeared in 1886. He published in 1881 a short treatise called Dramatic Singing Physiologically Estimated, in which he attempted to provide numerically scaled quantifications and categorical qualifications of the operatic or traditional classical voice, based on the qualities of a singer's voice itself, how it used in vocalization, and the dramatic expression employed by the singer and in relation to the two other categories. Notes Attribution 1812 births 1892 deaths Academics of University College London Alumni of Trinity College Dublin Cancer researchers Fellows of the Royal College of Physicians Irish anatomists Medical doctors from Dublin (city)
{ "redpajama_set_name": "RedPajamaWikipedia" }
1,527
/** * Author: CZ cz.theng@gmail.com */ package object import ( "github.com/cz-it/aliyun-oss-golang-sdk/ossapi" "path" "strconv" ) /* //redefine on put_object type ObjectInfo struct { CacheControl string ContentDisposition string ContentEncoding string Expires string Encryption string ACL string ObjName string BucketName string Location string Body []byte Type string } */ //AppendObjInfo is Append Info type AppendObjInfo struct { Info Position uint64 } // AppendObjRspInfo is Resopnse Info type AppendObjRspInfo struct { Possition uint64 crc64 uint64 } // Append Create a Appendable object // @param objName : name of object // @param bucketName : name of bucket // @param locaton : location of bucket // @param objInfo : object meta info // @return rstInfo : possition and crc of data // @retun ossapiError : nil on success func Append(objName, bucketName, location string, objInfo *AppendObjInfo) (rstInfo *AppendObjRspInfo, ossapiError *ossapi.Error) { if objInfo == nil { ossapiError = ossapi.ArgError return } resource := path.Join("/", bucketName, objName) host := bucketName + "." + location + ".aliyuncs.com" header := make(map[string]string) if objInfo != nil { header["Cache-Control"] = objInfo.CacheControl header["Content-Disposition"] = objInfo.ContentDisposition header["Content-Encoding"] = objInfo.ContentEncoding header["Expires"] = objInfo.Expires header["x-oss-server-side-encryption"] = objInfo.Encryption header["x-oss-object-acl"] = objInfo.ACL } req := &ossapi.Request{ Host: host, Path: "/" + objName + "?append&position=" + strconv.FormatUint(objInfo.Position, 10), Method: "POST", Resource: resource, Body: objInfo.Body, CntType: objInfo.Type, SubRes: []string{"append&position=" + strconv.FormatUint(objInfo.Position, 10)}, ExtHeader: header} req.AddXOSS("x-oss-object-acl", objInfo.ACL) req.AddXOSS("x-oss-server-side-encryption", objInfo.Encryption) rsp, err := req.Send() if err != nil { if _, ok := err.(*ossapi.Error); !ok { ossapi.Logger.Error("GetService's Send Error:%s", err.Error()) ossapiError = ossapi.OSSAPIError return } } if rsp.Result != ossapi.ErrSUCC { ossapiError = err.(*ossapi.Error) return } pos, _ := strconv.Atoi(rsp.HTTPRsp.Header["X-Oss-Next-Append-Position"][0]) crc, _ := strconv.Atoi(rsp.HTTPRsp.Header["X-Oss-Hash-Crc64ecma"][0]) rstInfo = &AppendObjRspInfo{ Possition: uint64(pos), crc64: uint64(crc)} return }
{ "redpajama_set_name": "RedPajamaGithub" }
7,145
\section{Introduction} Function approximation with deep neural networks has been extremely successful in the last decade for a range of complex tasks. However, this performance comes at a significant computational cost and excessive memory requirements due to their good optimization and generalization performance in the highly overparamaterized region \cite{zhang2021understanding, neyshabur2018role, arora2019fine, zhang2019fast}. It is an attractive proposition to prune such large networks with negligible loss in performance for real-time applications specially on edge devices with resource constraints. Pruning of networks \cite{lecun1990optimal, hassibi1993optimal, dong2017learning, han2015learning} has demonstrated reduction in inference-time resource requirements with minimal performance loss. The standard approach is to prune network after training and then perform a costly retraining step, thus requiring an extended training regime to generate sparse networks. Moreover, it is also difficult to train sparse networks from scratch which maintains similar performance to their dense counterparts \cite{han2015deep, li2016pruning}. Although, pruning before training is difficult, there are significant benefits in time and resource efficiency if we can prune networks before training. In recent times, the Lottery Ticket Hypothesis \cite{frankle2018lottery} was proposed that details the presence of sub-architectures within a larger network, which when trained are capable of reaching the baseline accuracy of the dense networks or even surpass them in some cases. The sparsity in these sub-architectures can be exploited with suitable hardwares for computational efficiency gains, such as in \cite{dey2019pre}, where authors managed to demonstrate a $5\text{x}$ efficiency gain while training networks with pre-specified sparsity. There are a range of techniques in literature that provide methods to prune Deep Neural Networks at various stages of their training and testing. The most common of these techniques is to prune the network after training using some sort of predefined criterion that captures the significance of the parameters of the network to the objective function. A range of classical works on pruning used the second derivative information of the loss function \cite{lecun1990optimal, hassibi1993optimal}. Perhaps the most intuitive of these approaches is magnitude pruning, where following training a subset of the parameters below some threshold is pruned and the rest of the parameters are retrained \cite{han2015deep, han2015learning} and regularization based methods \cite{yang2019deephoyer, ma2019transformed, louizos2017learning, yun2019trimming} which induces sparsity in the network during the optimization process. Other more elaborate techniques to find the lottery tickets include solving a separate optimization problem to find out the subset of weights of the lottery-ticket the sub-architecture \cite{zhang2018systematic, gillis2019grouped, li2019compressing}. However, pruning of randomly initialized network before training still seemed like a difficult task, as the connections of a randomly initialized network exhibits little information about their significance to the training process. This, however, was changed with the proposal of \emph{Single-shot Network Pruning} (SNIP) \cite{lee2018snip}, which managed to prune weights before training with great success by finding out sparse trainable sub-architectures. With SNIP it has been demonstrated that it is indeed possible to prune neural networks in one-shot at initialization. A recent work \cite{wang2020picking} challenges SNIP's pruning criterion of \textit{connection sensitivity} and argues that this is sub-optimal as the gradient of each weight is susceptible to change after pruning due to complex interactions among weights. Therefore, with this technique there is a possibility of pruning weights that are vital for the flow of information through the network. Instead, the authors of \cite{wang2020picking} propose an alternative method, \emph{Gradient Signal Preservation} (GraSP), that preserves the gradient flow of the network. These techniques where the network weights are pruned before training can be termed as \textit{pruning at initialization}. There are a few recent works \cite{DST-RL, Pops} that leverages different pruning techniques in Deep-RL algorithms but they prune the neural-network in-between Online-RL training. Since the RL agent gets updated in every iteration and collects data through environment interactions, there are significant shifts in the data-distribution. Thus, it makes harder for the pruning techniques to find the proper sub-networks that can perform the same. To the best of our knowledge, current state-of-the-art pruning in Online-RL methods can sparsify the networks up to $50\%$ without sacrificing performance \cite{DST-RL}. But in this work, we show we can do better. Similar to supervised training, Offline-RL is trained with fixed dataset. Therefore, pruning techniques that are suitable for fixed dataset can be used in offline-RL as well. In this work, we explore single-shot pruning techniques in Offline-RL algorithms. This allow us to prune the networks even before we start training an RL agent. Up until very recently batch-dataset RL in the setting of continuous control was presumed to be a hard problem. This is due to not having access to environment interactions and RL agents needing to learn from a fixed dataset. But, we can instead leverage this fixed-dataset nature of offline-RL to apply one-shot pruning techniques that are not suitable for online-RL algorithms. Through this work we want to excite the community more about offline-RL research and pruning techniques in RL algorithms. Our contributions in this work are as follows: \raggedbottom \begin{itemize} \item In this work, we show experimental results of pruning methods in Offline-RL algorithms where we use the following one-shot pruning methods: \emph{SNIP} \cite{lee2018snip} and \emph{GraSP} \cite{wang2020picking}. We explain how these single-shot pruning methods can be integrated with Offline-RL algorithms. \item We demonstrate it is possible to prune $95 \%$ of the network parameters without losing performance in continuous control tasks. \item We also show that it is possible to reduce the memory required to store these pruned networks by $4\text{x}$ without any elaborate compression mechanism. \end{itemize} \section{Preliminaries} We consider learning in a Markov decision process (MDP) described by the tuple ($S, A, P, R$). The MDP tuple consists of states $s \in S$, actions $a\in A$, transition dynamics $P(s'|s,a)$, and reward function $r=R(s,a)$. We use $s_t$, $a_t$ and $r_t=R(s_t,a_t)$ to denote the state, action and reward at timestep t, respectively. A trajectory is made up of sequence of states, action and rewards $\tau=(s_0, a_0, r_0, s_1, a_1, r_1, ..., s_T,a_T,r_T)$. For continuous control task we consider an infinite horizon, where $T=\infty$ and the goal in reinforcement learning is to learn a policy which maximizes the discounted expected return $\mathbb{E}[\sum_{t=t'}^T \gamma^t r_t]$ in an MDP. In offline reinforcement learning, instead of obtaining data through environment interactions, we only have access to some fixed limited dataset consisting of trajectory rollouts of arbitrary policies. This setting is harder for agent as it can not further explore the environment and collect additional feedback. Thus can fail due to overestimation of values induced by the distributional shift between the dataset and the learned policy. Offline algorithms \cite{BCQ, TD3_BC, CQL, fisher_RBC} overcome the problem through either constraining policy or the value function estimation. \section{Methods of pruning at initialization} In this section, we discuss two methods of pruning at initialization namely, SNIP and GraSP and briefly discuss their criterions of pruning. A more elaborate discussion is available at \cite{lee2018snip, wang2020picking}. \subsection{Single-shot Network Pruning at initialization} The first work to tackle pruning at initialization was SNIP \cite{lee2018snip} which exploits the idea of \textit{connection sensitivity} to prune insignificant weights. They formalize this idea in terms of removing a single weight $\theta_q$ and the effect it has on the loss as: \begin{equation} S(\theta_q) = \lim_{ \epsilon \to 0} \left| \frac{\mathcal{L}(\theta_0) -\mathcal{L} (\theta_{0} + \epsilon \delta_q)}{\epsilon} \right| = \left| \theta_q \frac{\partial \mathcal{L}}{\partial \theta_q} \right| \end{equation} where $\theta_q$ corresponds to the $q^{th}$ element of $\theta_0$, and $\delta_q$ is a one-hot vector whose $q_{th}$ element equals to $\theta_q$. The goal of SNIP is to essentially preserve the loss of the randomly initialized network before training. Although the idea to preserve the loss value was behind some classic works in pruning \cite{lecun1990optimal, hassibi1993optimal}, its importance is less obvious for pruning at initialization before the training begins. The authors of GraSP \cite{wang2020picking} instead argue that it is more important to preserve the training dynamics during pruning before training rather than the loss itself, because with the first technique there is a chance to make some layers too sparse that creates a bottleneck in the neural network for signal propagation. Therefore, they argue that a pruning technique i.e. Gradient Signal Preservation, that takes into account how the presence of a connection affects the training of the whole network would be preferable. \subsection{Gradient Signal Preservation} The idea of utilizing \emph{Gradient Signal Preservation} (GraSP) to improve upon the work of SNIP was presented in the work \cite{wang2020picking} with the algorithm the authors termed as GraSP. Pruning a network results in fewer parameters and reduced connectivity which might lead to a decrease in the flow of gradients through the network thus slowing down the optimization process. More formally, a larger norm of the gradient points to each gradient update contributing towards a greater loss reduction to the first order, as indicated by the directional derivative: \begin{align} \label{eq:2} \Delta \mathcal{L} (\theta) = \lim_{ \epsilon \to 0} \frac{\mathcal{L} (\theta + \epsilon \nabla \mathcal{L} (\theta)) - \mathcal{L} (\theta)}{\epsilon} = \nabla \mathcal{L} (\theta)^T \nabla \mathcal{L} (\theta) \end{align} The goal of GraSP is to preserve (even increase if possible) the gradient flow after pruning the network. Similar to the classic work \cite{lecun1990optimal} the authors cast the pruning operation as adding a perturbation $\delta$ to the initial weights. A Taylor approximation is then used to characterize the effect of removing one weight to the gradient flow through the network. \begin{align} \label{eq:3} \mathbf{S}(\delta) =& \Delta \mathcal{L} (\theta_0 + \delta) - \Delta \mathcal{L} (\theta_0) \nonumber \\ =& 2 \delta^T \nabla^2 \mathcal{L} (\theta_0) \nabla \mathcal{L} (\theta_0 ) + \mathcal{O}( \Vert \delta \Vert_2^2) \nonumber \\ =& 2 \delta^T \mathbf{Hg} + \mathcal{O}( \Vert \delta \Vert_2^2) \end{align} where $ \mathbf{S}(\delta)$ is an approximate measure of the change to equation \ref{eq:2}. The dependencies among the parameters of the network is captured by the Hessian matrix, which acts as a predictor of the effects of removing multiple weights. GraSP essentially uses equation \ref{eq:3} to calculate the score of each weight corresponding to its effect on the reduction of gradient flow after pruning. More precisely, a negative $S(\delta)$ will correspond to a reduction of gradient flow if the associated weight is pruned, while a positive value will result in an increase of gradient flow if said weights are pruned. Therefore the larger the scores associated with the weights, the lower their importance and those weights are removed first. Therefore the vectorized scores are calculated as: \begin{align} \mathbf{S} (- \mathbf{\theta}) = -\mathbf{\theta} \odot \mathbf{Hg} \end{align} GraSP then removes the \textit{top k} fraction of the weights for a given pruning ration of \textit{k} to generate a pruning mask by computing the scores associated with each weight. Thus, GraSP takes the gradient flow into account while pruning the network. \begin{algorithm}[h!] \caption{Single-shot Pruned Offline-RL Training} \label{algorithm_1} \begin{algorithmic} \STATE \textbf{Initialize Networks}: critic $Q_{\theta_1}$, $Q_{\theta_2}$, Actor $\pi_\phi$, VAE $V_{\omega}=\{V_{\omega_E}, V_{\omega_D}\}$ \\ \textbf{Choose single shot pruning technique}: SNIP or GraSP \\ \textbf{Find the Pruning Weight Maps}: \STATE $M_{\theta_1}$, $M_{\phi}$, $M_{\omega_E}$, $M_{\omega_D}$ = \text{single-shot pruning} $\Big( \mathcal{L}(\theta_1) , \mathcal{L}(\phi), \mathcal{L}(\omega_E), \mathcal{L}(\omega_D)\Big)$ \\ \textbf{Prune the networks:} \\ $\theta_1 \leftarrow \theta_1 \odot M_{\theta_1} $, \\ $\theta_2 \leftarrow \text{copy}(\theta_1) $, \\ $\phi \leftarrow \phi \odot M_{\phi} $, \\ $\omega_E \leftarrow \omega_E \odot M_{\omega_E} $, \\ $\omega_D \leftarrow \omega_D \odot M_{\omega_D}$ \\ \FOR{$t=1$ {\bfseries to} $T$} \STATE Train Offline-RL algorithm \ENDFOR \end{algorithmic} \end{algorithm} \section{Experiments} We perform our experiments on OpenAI Gym MuJoCo continuous control tasks \cite{mujoco, OpenAi_gym} on two different Offline-RL algorithms: \emph{Batch-Constrained deep Q-learning} (BCQ) \cite{BCQ} and \emph{Behavior Cloning} (BC) (implemented in \cite{Bear}). Without changing anything within the RL objective, we integrate following two pruning approaches: \emph{SNIP} \cite{lee2018snip} and \emph{GraSP} \cite{wang2020picking}. These one-shot pruning methods find the important neural-network weights before initializing RL training loop and set the rest of the weights to zero which remains zero through-out the RL training. For example, to train a $95\%$ sparse network, we will set the $95\%$ weights to zero using one of these pruning techniques and will train the RL agent with the remaining $5\%$ of the weights which the pruning methods finds to be more relevant to the RL learning objective. We run our experiment varying different sparsity levels to understand at what extend we can reduce the model without sacrificing the performance. Since the one-shot pruning methods are independent of the Offline-RL objective, we expect these pruning methods to perform the same for other Offline-RL algorithms as well. BCQ and BC share the same architecture, where they use separate Actor, Critic and a VAE neural network. Before starting the network training, we sample single batch of training samples (100 random $(s,a,s',r)$ for SNIP and 200 random $(s,a,s',r)$ samples for GraSP) to generate a pruning mask. We then prune the neural network with these mask and train the remaining weights. The complete process is detailed in Algorithm-\ref{algorithm_1}. Actor, Critic and VAE network have different objective functions. We individually compute these maximization/minimization objectives to find out the weights that are most relevant in optimizing these objectives. We vary the sparsity of these networks from $10\%$ to $95\%$ and compare the performance of the Offline-RL algorithms. We perform our experiments on \emph{Half-Cheetah-v2}, \emph{Hopper-v2}, \emph{Walker2d-v2} environments and train the offline-RL algorithms with \emph{D4RL} \cite{D4RL} expert dataset. We plot the mean performance for seeds $0-4$ over 1 million gradient updates with $100\%$ confidence interval. In figures \ref{SNIP vary sparsity} and \ref{GRASP vary sparsity} we observe that, for all the experiments, except for BCQ in Hopper task, the performance in consistent even with $95\%$ sparse networks. This means with a fraction number of weights we still attain the performance of a large neural network. The pruning techniques can find the "sub-networks" that gives similar performance using only $5\%$ parameters of the larger network. This is important to note that \emph{Dynamic Sparse TD3} (DS$-$TD3) \cite{DST-RL}, a very recent research work hat uses sparse training with dynamic sparsity \cite{scalable_NN} in RL, can attain the performance up to $50\%$ sparse network in online training. On the other hand our proposed approach can leverage the fixed batch dataset offline training method and find a $95\%$ pruned sub-architecture. Our proposed method not only shrinks the size of the network weights, with proper hardware and software optimization this will allow faster training and inference \cite{nvidia, dey2019pre}. This reduces the computation cost and allows RL algorithms to use in low-resource, large- data driven real-time applications. In our initial experiments we tried varying $(i)$ batch size and $(ii)$ number of pruning iterations, but that does not provide any improvement over a single batch pruning loop. Since both methods perform similar to the non-pruned network, we do not conduct further experiments avoid unnecessary compute. \subsection{Performance of Offline RL algorithm with pruned network} \begin{figure}[hbt!] \centering \includegraphics[width=0.32\linewidth]{figures/snip/BC_halfcheetah-expert-v2_Normalized_Score.pdf} \ \includegraphics[width=0.32\linewidth]{figures/snip/BC_hopper-expert-v2_Normalized_Score.pdf} \ \includegraphics[width=0.32\linewidth]{figures/snip/BC_walker2d-expert-v2_Normalized_Score.pdf} \ \includegraphics[width=0.32\linewidth]{figures/snip/BCQ_halfcheetah-expert-v2_Normalized_Score.pdf} \ \includegraphics[width=0.32\linewidth]{figures/snip/BCQ_hopper-expert-v2_Normalized_Score.pdf} \ \includegraphics[width=0.32\linewidth]{figures/snip/BCQ_walker2d-expert-v2_Normalized_Score.pdf} \caption{Performance plot of Offline-RL algorithms (BCQ, BC) varying sparsity using SNIP} \label{SNIP vary sparsity} \end{figure} \begin{figure}[hbt!] \centering \includegraphics[width=0.32\linewidth]{figures/grasp/BC_halfcheetah-expert-v2_Normalized_Score.pdf} \ \includegraphics[width=0.32\linewidth]{figures/grasp/BC_hopper-expert-v2_Normalized_Score.pdf} \ \includegraphics[width=0.32\linewidth]{figures/grasp/BC_walker2d-expert-v2_Normalized_Score.pdf} \ \includegraphics[width=0.32\linewidth]{figures/grasp/BCQ_halfcheetah-expert-v2_Normalized_Score.pdf} \ \includegraphics[width=0.32\linewidth]{figures/grasp/BCQ_hopper-expert-v2_Normalized_Score.pdf} \ \includegraphics[width=0.32\linewidth]{figures/grasp/BCQ_walker2d-expert-v2_Normalized_Score.pdf} \caption{Performance plot of Offline-RL algorithms (BCQ, BC) varying sparsity using GraSP} \label{GRASP vary sparsity} \end{figure} \subsection{Visualization of the Network After Pruning} We compare the layer wise sparsity of the pruned network to that of the regular dense network and observe the effect of layer-wise pruning. In figure \ref{Layerwise_sparsity}, we compare the layer-wise remaining weights using different techniques after pruning them with $95\%$ sparsity. We plot the mean number of layer-wise weights and it's standard-deviation for seeds 0-4. For both SNIP and GraSP we find similar pattern of pruning, where it does not prune all the layers uniformly. Both methods preserve more weights for the last layer to preserve gradient flow. But since GraSP's objective focuses on preserving the gradient flow \cite{wang2020picking} they preserve weights at the last layer than SNIP. \begin{figure}[hbt!] \centering \subfloat[halfcheetah-expert-v2]{\includegraphics[width=0.32\linewidth]{figures/network_sparsity/Actor_Net_Sparsity_halfcheetah-expert-v2_keepRatio_0.05.pdf}} \ \subfloat[halfcheetah-expert-v2]{\includegraphics[width=0.32\linewidth]{figures/network_sparsity/Critic_Net_Sparsity_halfcheetah-expert-v2_keepRatio_0.05.pdf}} \ \subfloat[halfcheetah-expert-v2]{\includegraphics[width=0.32\linewidth]{figures/network_sparsity/VAE_Net_Sparsity_halfcheetah-expert-v2_keepRatio_0.05.pdf}} \ \vspace{-0.45cm} \subfloat[hopper-expert-v2]{\includegraphics[width=0.32\linewidth]{figures/network_sparsity/Actor_Net_Sparsity_hopper-expert-v2_keepRatio_0.05.pdf}} \ \subfloat[hopper-expert-v2]{\includegraphics[width=0.32\linewidth]{figures/network_sparsity/Critic_Net_Sparsity_hopper-expert-v2_keepRatio_0.05.pdf}} \ \subfloat[hopper-expert-v2]{\includegraphics[width=0.32\linewidth]{figures/network_sparsity/VAE_Net_Sparsity_hopper-expert-v2_keepRatio_0.05.pdf}} \ \vspace{-0.45cm} \subfloat[walker2d-expert-v2]{\includegraphics[width=0.32\linewidth]{figures/network_sparsity/Actor_Net_Sparsity_walker2d-expert-v2_keepRatio_0.05.pdf}} \ \subfloat[walker2d-expert-v2]{\includegraphics[width=0.32\linewidth]{figures/network_sparsity/Critic_Net_Sparsity_walker2d-expert-v2_keepRatio_0.05.pdf}} \ \subfloat[walker2d-expert-v2]{\includegraphics[width=0.32\linewidth]{figures/network_sparsity/VAE_Net_Sparsity_walker2d-expert-v2_keepRatio_0.05.pdf}} \caption{Visualization of the remaining weights per-layer of the neural networks} \label{Layerwise_sparsity} \end{figure} \newpage \subsection{Network Weights Reduction} We use "\emph{torch$.$to$\_$sparse()}" function from PyTorch \cite{pytorch_lib} library to get the sparse matrix, which stores the weights and corresponding index vectors. Since sparse indexing (green line in figure \ref{Layerwise sparsity}) requires additional index vectors, it takes more memory to save regular dense network weights (blue line in figure \ref{Layerwise sparsity}). For $95\%$ sparsity, we are able to reduce the memory size to \emph{$4 \text{x}$} compared to the regular dense networks. With more sophisticated compression mechanisms to save sparse matrices, it will be possible to achieve further reduction in memory requirements. In Table \ref{network-weight-table} we compare the memory (in Megabytes) it takes to save these networks. \begin{figure}[hbt!] \centering \includegraphics[width=0.32\linewidth]{figures/network_weights/Network_Weights_Actor_halfcheetah-expert-v2.pdf} \ \includegraphics[width=0.32\linewidth]{figures/network_weights/Network_Weights_Critic_halfcheetah-expert-v2.pdf} \ \includegraphics[width=0.32\linewidth]{figures/network_weights/Network_Weights_Vae_halfcheetah-expert-v2.pdf} \caption{Comparison of memory requirement of sparse and dense models} \label{Layerwise sparsity} \end{figure} \begin{table} \caption{Memory Size of the Network Weights in Megabytes (mb)} \label{network-weight-table} \centering \begin{tabular}{llll} \toprule \cmidrule(r){1-2} Method & Actor & Critic & VAE \\ \midrule Dense Model & 0.5287 & 1.04492 & 4.7621 \\ Dense Model Sparse Indexing & 2.6318 & 5.2035 & 23.7739 \\ GraSP (95\% sparse) & \textbf{0.14099}& \textbf{0.2768} & \textbf{1.2122} \\ SNIP (95\% sparse) & \textbf{0.14297} & \textbf{0.2824} & \textbf{1.2303} \\ \bottomrule \end{tabular} \end{table} \section{Future Work} We use D4RL \cite{D4RL} dataset for this experiment where expert data were collected from the same data distribution. In real-world application that will not be the case. And one-shot techniques does not guarantee performance under changes in the underlying distribution. \section{Conclusion} Network latency is one of the more crucial aspects of deploying a deep RL into real world application where it needs to process large dataset in real-time (i.e. self-driving car, deploying bot in games, financial data analysis etc.). This also hinders applying RL in low resource devices (i.e. embedded system, edge devices etc.). As a step towards this direction we conduct experiments on pruning techniques in offline RL algorithms. In this paper we show, how simple single-shot pruning plug-in prior to training can reduce the network parameters by $95\%$ while maintaining performance. This sparse model saves $4 \text{x}$ in memory without using any compression mechanism and with proper hardware integration \cite{nvidia, dey2019pre} it promises faster training and higher inference time. \section{Acknowledgement} Riyasat Ohib and Sergey Plis were in part supported by R01 DA040487 and RF1 MH121885 from NIH.
{ "redpajama_set_name": "RedPajamaArXiv" }
5,960
Jim specializes in retirement and estate planning and believes in building long-term, personal relationships with First Gunn Financial Group's clients. A former professor of accounting at the University of Arkansas and Oklahoma State University, he left academia in 1970 to become an investment advisor representative, focusing on life insurance and estate planning. In 1995, Jim became a dedicated investment advisor representative and joined LPL Financial. Jim earned a Bachelor of Science degree in Accounting, a Master of Business Administration and a Ph.D. in Business Administration, all from the University of Arkansas. After receiving his doctorate, he added CPA to his list of qualifications. He is a lifetime qualifier of the Million Dollar Round Table, an international organization of life insurance and financial services professionals with exceptional industry knowledge, excellent client service, and a strict code of ethics. Jim doesn't recommend an investment strategy that he hasn't vetted himself. Jim lives in Stillwater with his wife and is the proud father of two daughters and the grandfather of five. Aside from travel, skiing and fishing, Jim is, above all, a fan of Oklahoma State University football and basketball. You might also see him in the bleachers at NCAA Final Four basketball tournaments. An active member of the community, Jim has served as president of the local Exchange Club; and as a corporate board member of a local bank, home health services provider, and a vending and food service company. He also has over 50 years of membership and service in Presbyterian Church.
{ "redpajama_set_name": "RedPajamaC4" }
3,518
\section{INTRODUCTION} Wire scanners serve as an essential part of accelerator diagnostic systems and are used mostly for beam transverse profile measurements (for a review see~\cite{Wittenburg:2006zz}). Depending on scanning wire trajectory the profilers could be classified as rotational~\cite{Fischer:1988ft} or linear~\cite{Loos:2010zzc}. When its necessary to measure vertical and horizontal beam profiles at the same longitudinal position one has to use two independent scanners. Alternatively two profiles could be sampled by using a single driver mounted at 45deg with two wires stretched horizontally and vertically over a fork attached to this linear driver. However, wire vibration in the scanning direction is a known problem for the 45deg scanners~\cite{Frisch:2008zz, Iida:1998ua}. \begin{figure}[htb] \centering \includegraphics*[width=85mm]{fig1} \caption{Linear-rotary motor from LinMot company. } \label{lrmot} \end{figure} Different types of driver motors have been employed in order to move and control scanning wires which are normally mounted on cards or forks connected to the motors. Stepper or servo rotating motors are among the most popular drivers and linear motors are at developing stage. Here we explore commercially available translational-rotational motor units to propose a wire scanner solution which will perform beam scans in mutually perpendicular directions using a single linear-rotary motor and a simple wire hosting construction attached to it. The construction is a key-like wire holder which makes twisting (helical) motion during a 2-D scan. Next will follow a more detailed description of the translational part with linear motors. In conclusion we will estimate technical feasibility of the proposed twisting scanner. \section{LINEAR-ROTARY MOTORS} A linear-rotary motor produced by company LinMot~\cite{linmot} is shown in Fig.~\ref{lrmot}. The motor consists of a linear and a rotary part merged together. Translational and rotational motions are decoupled and organized independently. However, linear and rotary motion synchronization is foreseen by motor controller logic. The motors are provided in different configurations with variable sizes and strengths reaching up to $1~kN$ linear force and $7.5~Nm$ rotating torque. \begin{table}[hbt] \centering \setlength\tabcolsep{2pt} \caption{LinMot PR01-52x60-R/37x120F-HP-100 Linear-rotary Motor Parameters} \begin{tabular}{ll} \toprule \textbf{Parameter} & \textbf{Value} \\ \hline \multicolumn{2}{c}{Linear Motion} \\ Extended Stroke ES mm (in) & 100 (3.94)\\ Standard Stroke SS mm (in) &100 (3.94)\\ Peak Force E12x0 - UC N (lbf)& 255 (57.3)\\ Cont. Force N (lbf) &51 (11.5)\\ Cont. Force Fan cooling N (lbf)& 92 (20.7)\\ Force Constant N/A (lbf/A) &17 (3.8)\\ Max. Current @ 72VDC A& 15\\ Max. Velocity m/s (in/s) &3.9 (154)\\ Position Repeatability mm (in) & $\pm0.05$ ($\pm0.0020$)\\ Linearity \% & $\pm0.10$\\ \multicolumn{2}{c}{Rotary Motion} \\ Peak Torque Nm (lbfin)& 2 (17.7)\\ Constant Torque (Halt) Nm (lbfin) &0.5 (4.4)\\ Max. Number of revolutions Rpm& 1500\\ Torque\,Constant\,Nm/Arms\,(lbfin/Arms)& 0.46 (4.07)\\ Max. Current @ 72VDC Arms& 6.2\\ Repeatability $\deg$ &$\pm0.05$\\ \end{tabular} \label{linmot} \end{table} Motor controllers use advanced and flexible software/firmware which should help to perform slow or fast scans with minimal programming efforts. An operational voltage of 72VDC and maximal current of 15A complies to general Electro-Magnetic Interference (EMI) requirements in accelerator environments. Described features make the linear-rotary motor as an attractive tool for driving the proposed twisting wire scanner. A closer look to specifications of a linear-rotary motor LinMot \hbox{PR01-52x60-R/37x120F-HP-100} is presented in Table~\ref{linmot} as an example. Listed values for the Repeatability are quoted for built-in, internal position and angle sensors. One can improve these parameters considerably by using external, finer sensors which is foreseen by controller software. In following we demonstrate that for linear motors. \section{KEY-BIT SCANNER} In order to apply 2-D helical motion of a linear-rotary motor for scanning a beam, one needs to invent a suitable construction with stretched wires and a holding frame which stays out of (does not cross the) beam during the scan. For that we propose a key-bit like assembly which fulfills above requirements. The construction is schematically presented in Fig.~\ref{fork}. \begin{figure}[htb] \centering \includegraphics*[width=85mm]{fig2} \caption{A key-bit holder scheme with horizontal and vertical scanning wires. A small ellipse on the right depicts a beam running normal to the page.} \label{fork} \end{figure} As it's indicated by arrows, for this arrangement translational motion will first scan the beam in horizontal direction and next, when the beam will be inside the key-bit, a proper rotation will perform vertical scan. It is necessary to limit the rotation angle in order to escape crossing of the wire holder with the beam. For that there is sufficient space between the holder frame and the beam, remaining after the rotational scan is over. Denoting vertical key-bit and wire size by $L$ and $l_w$ respectively, the beam to holder distance could be expressed as $$ L\left( \arccos{\frac{x}{L}} - \arccos{\frac{x}{l_w}} \right) $$ where $x$ is distance between the beam and rotational axis. Applying this formula for fast ($>1~m/sec$) rotational scans with some realistic accelerator parameters we obtain sufficient space to accelerate the wire while for deceleration the space is limited and one needs to use mechanical dumps to stop the scanner. \section{3-D KEY-BIT HOLDER FOR 2-D FAST SCANS} An improved, slightly more complicated design, for the fast scans could be achieved \begin{figure}[htb] \centering \includegraphics*[width=75mm]{fig3} \caption{Scanning scheme of a three dimensional key-bit wire holder. Vertical green line depicts the beam to be scanned.} \label{fork3d} \end{figure} by tilting the second quadrant of the key-bit wire holder out of the construction's plane by some angle. A 90 deg tilted key-bit holder is sketched in Fig.~\ref{fork3d}. Scanning sequence is exactly the same as for the flat key-bit scanner with an advantage of more room after the second scan is over. This should give sufficient time to decelerate and stop the frame by the motor alone, without mechanical dumpers. In addition the tilted key-bit's moment of inertia is considerably smaller than in flat case. This will allow easier and improved handling of rotations with more ergonomic acceleration and deceleration of the key-bit structure. \section{LINEAR MOTOR PERFORMANCE AS A SCANNER DRIVER} We are developing wire scanners with linear motor drivers for European XFEL accelerator. \begin{figure}[htb] \centering \includegraphics*[width=95mm]{fig4} \caption{Designed horizontal and vertical wire scanners mounted on vacuum chamber. } \label{xfel3d} \end{figure} Here we present some of the results obtained during recent laboratory experiment with test scanners. Planned test setup is displayed in Fig.~\ref{xfel3d} while experimental realization is presented by Fig.~\ref{test1}. For horizontal and vertical scans two identical and independent profilers are mounted to a special vacuum chamber dedicated to beam transverse diagnostics. \begin{figure}[htb] \centering \includegraphics*[width=70mm]{fig5} \caption{XFEL wire scanners' test setup } \label{test1} \end{figure} Position feedback for the linear servo-motor is provided by an external Heidenhain optical system which is accurate to $1\mu m$. With the setup we have tested triggered fast scans and mechanical as well linear motors performance during/after tens of thousand scanning strokes. Important specifications of the XFEL wire scanners are shown in Table~\ref{xfel-tab}. \begin{table}[hbt] \centering \caption{European XFEL Wire Scanner Specifications} \smallskip \begin{tabular}{ll} \toprule \textbf{Parameter} & \textbf{Value} \\ \hline Stroke & 53mm \\ Measurement duration &5 sec / 4 scanners \\ Scanning modes& Fast (1m/s), Slow\\ Motor to beam sync & $< 1 \mu sec$ (RMS)\\ Position accuracy in a cycle &2 $\mu m$ (RMS)\\ Width accuracy per cycle & 2 \% (RMS) \\ Wire positioning error & 1 $\mu m$\\ Number of wires per fork & 3 + 2 ( 3x$90^o$, $\pm 60^o$)\\ Wire material & Tungsten \\ Fork gap & 15mm\\ Wire-wire distance & 5mm ( $90^o$ ) \\ \end{tabular} \label{xfel-tab} \end{table} Tests have marked most of the listed specifications as achieved. During the test mechanical design and construction precision has been justified while linear motors have demonstrated reliable performance. \begin{figure}[htb] \centering \includegraphics*[width=\columnwidth]{fig6} \caption{Linear motor parameters recorded during a stroke: position(mm, red), velocity(m/s, blue), current (A, green), demand and actual velocity difference (m/s, brown) } \label{motpar} \end{figure} To verify motor's dynamic behavior we have recorded essential parameters during nominal strokes. An example is shown on Fig.~\ref{motpar} where together with position and velocity also the motor's current and velocity deviation are displayed for a fast ($1m/s$) scanning stroke. An important issue for the XFEL wire profilers and fast scans in general is mechanical jitter magnitude for triggered scans. We have investigated this by recording time intervals between the trigger and fine position system reference mark traversing time. \begin{figure}[htb] \centering \includegraphics*[width=\columnwidth]{fig7} \caption{Motor triggering mechanical jitter distribution for the forward (upper plot) and backward (lower plot) strokes. Superimposed are shown fitting gaussian functions with displayed RMS (one sigma) values. } \label{trigg} \end{figure} Measurement results for many forward and backward strokes are summarized in Fig.~\ref{trigg}. Distributions show time jitter below $1 \mu sec$ which, in our case of $1 m/sec$ velocity, is equivalent to a sub-micrometer mechanical jitter. This could also be quoted as a repeatability of the tested linear motor with fine position feedback and triggering systems. \section{DISCUSSION} In the last section we have demonstrated an outstanding performance of contemporary linear motors as wire scanner drivers. We have proposed to use linear-rotary motor with attached key-bit wire card as 2-D twisting wire scanner. Estimated planes of possible oscillations of the key-bit wires differ from critical planes in 45~deg forks which should cure associated vibrational problems reported at LCLS and other centers. This will become possible mainly due to different alignment of the scanning wires relative to driver unit. In addition the vibrations are normally dumped along the motion direction. An apparent difficulty for twisting wire scanner development is the linear-rotary motion transfer into the vacuum chamber where the key-bit card should operate. For that one should combine linear bellows with either wobble~\cite{Bosser:1984us} or torsional~\cite{tbellows} bellows. \newpage \acknowledgements{ We are thankful to S.~Vilcins-Czvitkovits, J.~Kruse, A.~Ziegler and M.~Drewitsch for participation in preparation and running of the XFEL wire scanners' first prototyping test.}
{ "redpajama_set_name": "RedPajamaArXiv" }
4,828
\section{Introduction} \label{sec_introduction} Due to their prominent applications, the underwater acoustic networks (UANs) have gained significant research interest~\cite{dar01}. However, the data rate in UANs is limited due to eminent delay and restricted bandwidth over long range communications. Therefore, if source to destination direct link is incompetent to meet a data rate demand, then a relay can be deployed between them to decrease the hop length and yield an energy efficient design~\cite{stoj02}. This letter investigates the joint power allocation (PA) and relay placement (RP) in a dual-hop UAN where the direct link is either absent~\cite{wan13}, or its effect can be neglected while minimizing the outage probability for a desired date rate.\color{black} In the recent works~\cite{liu04} and~\cite{kam09}, an energy efficient UAN operation was investigated by optimizing the location of the relays along with other key parameters. Whereas, optimal PA was studied in~\cite{babu10}. Although multiple relays were used in these works, the underlying optimization studies were performed considering assumptions like perfect channel state information (CSI) availability and adopting simpler Rayleigh fading model. In contrast, the joint optimization in this letter has been carried out under a realistic dual-hop communication environment~\cite{nour11}, where only the statistics of fading channels are required and a more generic Rician distribution is adopted for the frequency-selective fading channel. Lately, in~\cite{nour11,cao12,doos07} it is shown that the throughput in cooperative UANs can be significantly improved by optimizing PA and RP. However, the existing works didn't consider joint optimization and also only numerical solutions were proposed for individual PA and RP problems. \textit{So, to the best of our knowledge, the joint global optimization of PA and RP in UANs has not been investigated}. Further, we would like to mention that the joint optimization in cooperative UANs is very different and more challenging than the conventional terrestrial networks due to the frequency-selective behavior of underwater channels in terms of fading, path loss and noise, which are all strongly influenced by the operating frequencies. The key contributions of this letter are three fold. First we prove the generalized convexity of the proposed outage minimization problem in DF relay-assisted UANs. Using it we obtain the jointly global optimal PA and RP solutions. Secondly, to gain analytical insights, a novel very low-complexity near-optimal approximation algorithm is presented. Lastly via numerical investigation, the analytical discourse is first validated and then used for obtaining insights on the optimizations along with the quantification of achievable performance gains. \section{System Model Description}\label{section2} We consider a dual-hop, half-duplex DF relay assisted UAN. Here a source $\mathcal{S}$ communicates with destination $\mathcal{D}$, positioned at $D$ distance apart, via a cooperative relay $\mathcal{R}$. These nodes are composed of single antenna and the $\mathcal{S}$-to-$\mathcal{D}$ direct link is not available due to large path loss and fading effects. As $\mathcal{R}$ communicates in half-duplex mode, the data transfer from $\mathcal{S}$ to $\mathcal{D}$ takes place in two slots: first from $\mathcal{S}$ to $\mathcal{R}$ and then from $\mathcal{R}$ to $\mathcal{D}$. For efficient energy utilization, a power budget $P_B$ is taken for transmit powers of $\mathcal{S}$ and $\mathcal{R}$. We assume that each of $\mathcal{S} \mathcal{R}$, $\mathcal{R} \mathcal{D}$, and $\mathcal{S} \mathcal{D}$ links follows independent Rician fading. Adopting the channel model in~\cite{cao12,stoj02}, the frequency $f$ dependent received signal-to-noise ratio (SNR) at node $j$, placed $d_{ij}$ distance apart from node $i$, is given by: \begin{equation} \gamma_{ij}(f)=S_{i}(f)G_{ij}(f)\left[a(f)\right]^{-d_{ij}}d_{ij}^{-\alpha}\left[N(f)\right]^{-1}. \end{equation} Here $G_{ij}(f)$ is the channel gain for frequency-selective Rician fading over $ij$ link, $S_i(f)$ is power spectral density (PSD) of transmitted signal from node $i$, $\alpha$ is spreading factor, $a(f)$ is absorption coefficient in dB/km for $f$ in kHz~\cite[eq. (3)]{stoj02}, and $N(f)$ is PSD of noise as defined by~\cite[eq. (7)]{stoj02}. \color{black} The complementary cumulative distribution function (CDF) of $\gamma_{ij}$ for Rice factor $K \!\leq\!39$dB is approximated as~\cite[eq. (10)]{mish17}: \begin{equation}\label{ccdf} \begin{aligned} \textbf{Pr}[\gamma_{ij}(f)>x]=e^{-\mathcal{A}\Big(\frac{2(1+K)\beta x}{\overline{\gamma}_{ij}(f)}\Big)^\mathcal{B}}, \end{aligned} \end{equation} where $\mathcal{A}=e^{\phi(\sqrt{2K})}$ and $\mathcal{B}=\frac{\varphi(\sqrt{2K})}{2}$. The polynomial expressions for $\phi(v)$ and $\varphi(v)$ as a function of $v$ were defined in~\cite[eqs. (8a), (8b)]{mish17}. The expectation of SNR $\gamma_{ij}(f)$ is given by $\overline{\gamma}_{ij}(f)=\frac{\beta c_{ij}(f)S_{i}(f)}{N(f)a(f)^{d_{ij}}d_{ij}^{\alpha}}$, where $\beta=\frac{\mathcal{A}^{-\frac{1}{\mathcal{B}}}}{\mathcal{B}}\Gamma(\frac{1}{\mathcal{B}})$ and $c_{ij}(f)$ is the expectation of the channel gain $G_{ij}(f)$. Using this channel distribution information (CDI), we aim to minimize the outage probability for $\mathcal{S} \mathcal{D}$ underwater communication. \section{Joint Optimization Framework}\label{optimization} Here we first obtain the outage probability expression and then present the proposed joint global optimization framework. \subsection{Outage Minimization Problem} Outage probability is defined as the probability of received signal strength falling below an outage data rate threshold $r$. \noindent Outage probability $p_{out}$ in DF relay without direct link is~\cite{mish17}: \begin{eqnarray}\label{out_exp} p_{out}=\textbf{Pr}\Big(\hspace{-1mm} \textstyle\int\limits_{0}^{B_W}\hspace{-1mm}\textstyle\frac{1}{2}\log_2(1+\min\{\gamma_{\mathcal{S}\mathcal{R}}(f),\gamma_{\mathcal{R}\mathcal{D}}(f)\})\text{d}f\leq r\hspace{-1mm}\Big) \hspace{-3mm} \end{eqnarray} Our goal of minimizing $p_{out}$ by jointly optimizing PA and RP for a given transmit power budget can be formulated as below. \begin{eqnarray}\label{eq9} \begin{aligned} \text{(P0):}&\hspace{-0mm} \underset{S_\mathcal{S}(f),S_\mathcal{R}(f),d_{\mathcal{S}\mathcal{R}}}{\text{minimize}} \quad p_{out}, \quad \text{subject to} \hspace{2mm}C1\hspace{0mm}: d_{\mathcal{S}\mathcal{R}} \geq \delta,\\ & \hspace{-6mm} C2\!: d_{\mathcal{S}\mathcal{R}}\leq D-\delta, \hspace{1mm}C3\!: \textstyle\int_{0}^{B_W}\hspace{-0.5mm}(S_\mathcal{S}(f)+S_\mathcal{R}(f))\text{d}f\leq P_B,\hspace{-6mm} \end{aligned} \end{eqnarray} where $C1$ and $C2$ are the boundary conditions on $d_{\mathcal{S}\mathcal{R}}$ with $\delta$ being the minimum separation between two nodes~\cite{mish17}. $C3$ is the total transmit power budget in which $S_\mathcal{S}(f)$ and $S_\mathcal{R}(f)$ at frequency $f$ respectively represent the power spectral density (PSD) of transmit powers for $\mathcal{S}$ and $\mathcal{R}$. From the convexity of $C1,C2,C3$ along with the pseudoconvexity of $p_{out}$ in $S_\mathcal{S}(f)$, $S_\mathcal{R}(f)$, and $d_{\mathcal{S}\mathcal{R}}$ as proved in Appendix~\ref{AppA}, (P0) is a generalized-convex problem possessing the unique global optimality property~\cite[Theorem 4.3.8]{Baz06}. However, as it is difficult to solve $\text{(P0)}$ in current form, we next present an equivalent formulation to obtain the jointly optimal design. \subsection{Equivalent Formulation for obtaining Joint Solution}\label{sec:Eqv} As direct solution of (P0) is intractable~\cite{babu10,nour11}, we discretize the continuous frequency domain problem (P0). For this transformation we choose the large enough number $n$ of frequency sub-bands or ensure that the bandwidth of each sub-band $\Delta f=\frac{B_W}{n}$ is sufficiently small such that the difference between outage probabilities, $p_{out}$ defined in \eqref{out_exp} and $\widehat{p_{out}}$ defined in \eqref{eq:pouH} for the discrete domain, have the corresponding root mean square error less than $0.08$ for it being a good fit~\cite{Hoop08}. So, instead of minimizing $p_{out}$, we minimize \begin{eqnarray}\label{eq:pouH} \widehat{p_{out}}\triangleq\textstyle\textbf{Pr}\left( \sum_{q=1}^{n}\frac{\Delta f}{2}\log_2(1+\min\{\gamma_{\mathcal{S}\mathcal{R}_q},\gamma_{\mathcal{R}\mathcal{D}_q}\})\leq r\right)\hspace{-1mm}, \hspace{-3mm} \end{eqnarray} where $q^{th}$ sub-band of the $\mathcal{S}\mathcal{R}$ link is coupled with $q^{th}$ sub-band of the $\mathcal{R}\mathcal{D}$ link, the end-to-end received SNR at node $j$ is: $\gamma_{{ij}_q}=P_{{i}_q}G_{{ij_q}}a_q^{-d_{ij}}d_{ij}^{-\alpha}[N_q\Delta f]^{-1}$, where $P_{{i}_q}=S_{{i}_q}\Delta f$ and $S_{{i}_q}=S_{i}(f_q)\text{U}(f-f_q)$ are the PA and PSD respectively at transmitting node $i\in\{\mathcal{S},\mathcal{R}\}$ with unit step function $\text{U}(f)=1$ for $f\in [-\frac{\Delta f}{2},\frac{\Delta f}{2}]$ and $0$ otherwise. Further, $a_q=a(f_q)\text{U}(f-f_q)$ is the absorption coefficient, $N_q=N(f_q)\text{U}(f-f_q)$ is the additive noise, $G_{{ij}_q}=G_{ij}(f_q)\text{U}(f-f_q)$ and $c_{{ij}_q}=\mathbb{E}[G_{{ij}_q}]=c_{ij}(f_q)\text{U}(f-f_q)$ respectively are the channel gain and its expectation value in $q^{th}$ sub-band of ${ij}\in\{\mathcal{S}\mathcal{R},\mathcal{R}\mathcal{D}\}$ link. The different frequency-dependent parameters (cf. Section~\ref{section2}) remains constant within a sub-band and they are expressed by their respective center frequencies $\{f_q\}_{q=1}^n$. The twofold benefit of this discretization are transforming: (i) a frequency-selective fading channel into a non-frequency-selective one, and (ii) non-additive noise into an additive noise~\cite{babu10}. For sufficiently large value of $n$, $\widehat{p_{out}}$ closely matches $p_{out}$ (as also shown later via Fig.~\ref{fig:validation}(a)), using Appendix~\ref{AppA}, we can claim that $\widehat{p_{out}}$ is also jointly-pseudoconvex in $\{P_{\mathcal{S}_q},P_{\mathcal{R}_q}\}_{q=1}^n$, and $d_{\mathcal{S}\mathcal{R}}$. Further, as CDF is a monotonically \noindent decreasing function of the expectation of the underlying random variable~\cite[Theorem 1]{mish18} in \eqref{eq:pouH}, the minimization of $\widehat{p_{out}}$ is equivalent to the maximization of the expectation value $\frac{\Delta f}{2}\mathbb{E}[\log_2\prod_{q=1}^{n}(1+\min\{\gamma_{\mathcal{S}\mathcal{R}_q},\gamma_{\mathcal{R}\mathcal{D}_q}\})]$. Further, we observe that since the logarithmic transformation is monotonically increasing, expectation $\mathbb{E}[\prod_{q=1}^{n}(1+\min\{\gamma_{\mathcal{S}\mathcal{R}_q},\gamma_{\mathcal{R}\mathcal{D}_q}\})]$ is also a jointly pseudoconcave function. Lastly, assuming SNRs in different sub-bands to be independently and identically distributed, the products in this expectation can be moved outside the operator $\mathbb{E}\left[\cdot\right]$ and (P0) can be equivalently formulated as \begin{equation} \begin{aligned} \text{(P1):}&\hspace{-0mm} \underset{\{P_{\mathcal{S}_q},P_{\mathcal{R}_q}\}_{q=1}^n,d_{\mathcal{S}\mathcal{R}}}{\text{maximize}} &&\hspace{-2mm} \textstyle{\prod\limits_{q=1}^{n}(1+\mathbb{E}[\min\{\gamma_{\mathcal{S}\mathcal{R}_q},\gamma_{\mathcal{R}\mathcal{D}_q}\}])} \\ &\hspace{-2mm} \text{subject to} &&\hspace{-12mm} C1, C2, \widehat{C3}\hspace{-0.5mm}: \textstyle\sum_{q=1}^{n}(P_{\mathcal{S}_q}+P_{\mathcal{R}_q})\leq P_B, \end{aligned}\hspace{-2mm} \end{equation} where $\widehat{C3}$ gives the transmit power budget and using the definition~\eqref{meanSNR} in Appendix~\ref{AppA}, $\mathbb{E}[\min\{\gamma_{\mathcal{S}\mathcal{R}_q},\gamma_{\mathcal{R}\mathcal{D}_q}\}]=\overline{\gamma}_q\!=\!\textstyle\frac{\beta}{N_q}\Big[\Big(\frac{a_q^{d_{\mathcal{S}\mathcal{R}}}d_{\mathcal{S}\mathcal{R}}^\alpha}{c_{\mathcal{S}\mathcal{R}_q}P_{\mathcal{S}_q}}\Big)^\mathcal{B}+\Big(\frac{a_q^{D-\delta-d_{\mathcal{S}\mathcal{R}}}(D-\delta-d_{\mathcal{S}\mathcal{R}})^\alpha}{c_{\mathcal{R}\mathcal{D}_q}P_{\mathcal{R}_q}}\Big)^\mathcal{B}\Big]^{\frac{-1}{\mathcal{B}}}$. With the pseudoconcavity of objective function and convexity of $C1$, $C2,\widehat{C3}$, the Karush-Kuhn-Tucker (KKT) point of (P1) yields its global optimal solution. Further, the Lagrangian function of (P1) by associating the Lagrange multiplier $\lambda$ with $\widehat{C3}$ and considering $C1$ and $C2$ implicit, can be defined by: \begin{equation}\label{LagFun1} \textstyle\mathcal{L}_1=\mathrm \prod_{q=1}^{n}\left(1+\mathbb{E}[{\min\{\gamma_{\mathcal{S}\mathcal{R}_q},\gamma_{\mathcal{R}\mathcal{D}_q}\}}]\right)-\lambda\mathcal{J}, \end{equation} where $\mathcal{J}\triangleq\big(\sum_{q=1}^{n}(P_{\mathcal{S}_q}+P_{\mathcal{R}_q})-P_B\big)$. On simplifying the KKT conditions $\big[\frac{\partial \mathcal{L}_1}{\partial P_{\mathcal{S}_q}}=0$, $\frac{\partial \mathcal{L}_1}{\partial P_{\mathcal{R}_q}}=0$, $\lambda\mathcal{J}=0$,\,$C1$, $C2$, $\widehat{C3}$, and $\lambda \geq 0\big]$, we get a system of $(2n+2)$ equations represented by~\eqref{OptimizationA},~\eqref{OptimizationB},~\eqref{OptimizationC} and $\mathcal{J}$, to be solved $\{P_{\mathcal{S}_q},P_{\mathcal{R}_q}\}_{q=1}^n,d_{\mathcal{S}\mathcal{R}}$ and $\lambda$. Variables $Q_q,T_q , V_q,\forall q\le n,$ in \eqref{eq:KKTJ} are defined below. \begin{subequations}\label{three_var} \begin{eqnarray}\label{Qq_var} \hspace{-3mm}&Q_q\!=\!\frac{\beta}{\lambda N_q \Delta f}\bigg[\!\!\left(\!\!\frac{c_{\mathcal{S}\mathcal{R}_q}}{a_q^{d_{\mathcal{S}\mathcal{R}}}d_{\mathcal{S}\mathcal{R}}^{\alpha}}\!\right)^{\!\!\frac{\mathcal{B}}{\mathcal{B}+1}}\hspace{-1.5mm}+\hspace{-1mm}\left(\!\!\frac{c_{\mathcal{R}\mathcal{D}_q} a_q^{\hspace{-1mm}-(D-\delta-d_{\mathcal{S}\mathcal{R}})}}{ (D-\delta-d_{\mathcal{S}\mathcal{R}})^{\alpha}}\!\right)^{\!\!\frac{\mathcal{B}}{\mathcal{B}+1}}\bigg]^{\!\!\frac{\mathcal{B}+1}{\mathcal{B}}}\!\!\!\!, \end{eqnarray} \begin{align}\label{Tq_var}\textstyle T_q\hspace{-0.5mm}=\hspace{-0.5mm} (\beta c_{\mathcal{S}\mathcal{R}_q}\hspace{-0.5mm}[\lambda N_q \Delta f a_q^{d_{\mathcal{S}\mathcal{R}}}d_{\mathcal{S}\mathcal{R}}^\alpha]^{\hspace{-0.5mm}-1}\hspace{-0.5mm})^{\frac{\mathcal{B}}{\mathcal{B}+1}}\hspace{-1mm}-\hspace{-1mm}1, \end{align} \begin{align}\label{Vq_var} V_q=&\, (c_{\mathcal{S}\mathcal{R}_q}P_{{\mathcal{S}_q}} a_q^{D-\delta-d_{\mathcal{S}\mathcal{R}}} (D-\delta-d_{\mathcal{S}\mathcal{R}})^\alpha)^\mathcal{B}\nonumber\\ &\,+(c_{\mathcal{R}\mathcal{D}_q}P_{\mathcal{R}_q}a_q^{d_{\mathcal{S}\mathcal{R}}}d_{\mathcal{S}\mathcal{R}}^\alpha)^\mathcal{B}. \end{align} \end{subequations} \color{black} As it is cumbersome to solve system of $(2n+2)$ equations for large value of $n$ to ensure the equivalence of problems $\text{(P0)}$ and $\text{(P1)}$, we next propose a novel low-complexity approximation. \begin{figure*}[!t] \begin{subequations}\label{eq:KKTJ} \begin{equation}\label{OptimizationA} \begin{aligned} P_{\mathcal{S}_q}=P_{\mathcal{R}_q}c_{\mathcal{R}\mathcal{D}_q}\left[c_{\mathcal{S}\mathcal{R}_q}a_q^{D-\delta-2d_{\mathcal{S}\mathcal{R}}}\left(\left(D-\delta\right)d_{\mathcal{S}\mathcal{R}}^{-1}-1\right)^\alpha\right]^{-1}\left[\left(Q_q\beta c_{\mathcal{S}\mathcal{R}_q}\left[N_q \lambda \Delta f a_q^{d_{\mathcal{S}\mathcal{R}}}d_{\mathcal{S}\mathcal{R}}^\alpha\right]^{-1}\right)^\frac{\mathcal{B}}{\mathcal{B}+1}-1\right]^\frac{1}{\mathcal{B}} \end{aligned} \end{equation} \begin{eqnarray}\label{OptimizationB} P_{\mathcal{R}_q}=P_{\mathcal{S}_q}c_{\mathcal{S}\mathcal{R}_q}a_q^{D-\delta-2d_{\mathcal{S}\mathcal{R}}}\left(\left(D-\delta\right)d_{\mathcal{S}\mathcal{R}}^{-1}-1\right)^\alpha c_{\mathcal{R}\mathcal{D}_q}^{-1}\left[\left(Q_q\beta c_{\mathcal{R}\mathcal{D}}\left[N_q \lambda \Delta f a_q^{D-d_{\mathcal{S}\mathcal{R}}-\delta}\left(D-d_{\mathcal{S}\mathcal{R}}-\delta\right)^\alpha\right]^{-1}\right)^\frac{\mathcal{B}}{\mathcal{B}+1}-1\right]^\frac{1}{\mathcal{B}} \end{eqnarray} \begin{eqnarray}\label{OptimizationC} \sum_{q=1}^{n}\beta c_{\mathcal{S}\mathcal{R}_q}c_{\mathcal{R}\mathcal{D}_q}P_{\mathcal{S}_q}P_{\mathcal{R}_q}\left[N_q \Delta f\right]^{-1} V_q^{\frac{-\mathcal{B}}{\mathcal{B}+1}}\hspace{-1mm}\left(c_{\mathcal{R}\mathcal{D}_q}P_{\mathcal{R}_q}a_q^{d_{\mathcal{S}\mathcal{R}}}d_{\mathcal{S}\mathcal{R}}^\alpha\right)^\mathcal{B}\hspace{-1mm}\left[T_q\left(\ln a_q\hspace{-0.8mm} +\hspace{-0.8mm}\alpha \left(D\hspace{-0.5mm}-\hspace{-0.5mm}\delta\hspace{-0.5mm}-\hspace{-0.5mm}d_{\mathcal{S}\mathcal{R}}\right)^{-1}\right)\hspace{-0.8mm}-\hspace{-0.8mm}\left(\ln a_q\hspace{-0.5mm} +\hspace{-0.5mm}\alpha d_{\mathcal{S}\mathcal{R}}^{-1}\hspace{-0.5mm}\right)\right]\hspace{-0.5mm}=\hspace{-0.5mm}0\!\!\! \end{eqnarray} \end{subequations} \hrulefill \end{figure*} \section{Low Complexity Approximation Algorithm}\label{section4} This proposed algorithm decoupling the joint optimization into individual PA and RP problems, can be summarized into three main steps as discussed in following three subsections. \subsection{Optimal PA (OPA) within a sub-band for a given RP}\label{sec:PA-a} For a given RP, we first distribute the power budget $P_{t_q}$ for sub-band $q$ between $P_{{\mathcal{S}_q}}$ and $P_{{\mathcal{R}_q}}$ to maximize $\overline{\gamma}_q$. As with $P_{\mathcal{R}_q}=P_{t_q}-P_{\mathcal{S}_q}$, $\overline{\gamma}_q$ is concave in $P_{\mathcal{S}_q}$, optimal values $P_{\mathcal{S}_q}^*$ and $P_{\mathcal{R}_q}^*=\mathcal{Z}_qP_{\mathcal{S}_q}^*$ are obtained on solving $\textstyle\frac{\partial\overline{\gamma}_q}{\partial P_{\mathcal{S}_q}}=0$, where \begin{equation}\label{Zq_var} \!\textstyle\mathcal{Z}_q \triangleq (\hspace{-0.5mm}c_{\mathcal{S}\mathcal{R}_q}\hspace{-0.1mm}a_q^{D\hspace{-0.1mm}-\hspace{-0.1mm}\delta\hspace{-0.1mm}-\hspace{-0.1mm}d_{\mathcal{S}\mathcal{R}}}(D\hspace{-0.5mm}-\hspace{-0.5mm}\delta\hspace{-0.5mm}-\hspace{-0.5mm}d_{\mathcal{S}\mathcal{R}}\hspace{-0.5mm})^\alpha[c_{\mathcal{R}\mathcal{D}_q}\hspace{-0.5mm}a_q^{\hspace{-0mm}d_{\mathcal{S}\mathcal{R}}}{d_{\mathcal{S}\mathcal{R}}^\alpha}]^{-1}\hspace{-0.5mm})^\frac{\mathcal{B}}{\mathcal{B}+1}\hspace{-1mm} \end{equation} \noindent\color{black} Here, note that $P_{\mathcal{S}_q}\gtrless P_{\mathcal{R}_q}$ as determined by $\mathcal{Z}_q\lessgtr1$ depends on the relative received SNRs over $\mathcal{S}\mathcal{R}$ and $\mathcal{R}\mathcal{D}$ links. \begin{figure*}[!t] \centering \subfigure[]{\includegraphics[width=2.3in]{Fig1.eps}} \subfigure[]{\includegraphics[width=2.3in]{Fig2.eps}} \subfigure[]{\includegraphics[width=2.3in]{Fig3.eps}} \caption{\small Validation of analysis and insights on OPA and ORP with varying system parameters. (a) Variation of expected data rate with $P_B$ in continuous and discrete domains. (b) Variation of $\widehat{p_{out}}$ with $d_{\mathcal{S}\mathcal{R}}$ with FPA. (c) Variation of OPA across sub-bands with $c_{\mathcal{S}\mathcal{R}}(f):c_{\mathcal{R}\mathcal{D}}(f)$.} \label{fig:validation} \end{figure*} \subsection{OPA to each sub-band for a given $\{P_{\mathcal{R}_q}\}_{q=1}^n$ and $d_{\mathcal{S}\mathcal{R}}$} Using this derived relationship $P_{\mathcal{R}_q}=\mathcal{Z}_qP_{\mathcal{S}_q}$, we can eliminate $\{P_{\mathcal{R}_q}\}_{q=1}^n$ in~\eqref{LagFun1} and hence obtain an updated Lagrangian $\mathcal{L}_2$ which is a function of only $n+2$ variables: \begin{equation}\label{LangFun2} \hspace{-0.5mm}\textstyle\mathcal{L}_2\hspace{-0.5mm}=\hspace{-0.5mm}\prod_{q=1}^{n}\hspace{-0.5mm}\left(1\hspace{-0.5mm}+\hspace{-0.5mm}P_{\mathcal{S}_q}[\mathcal{K}_q]^{-1}\right)\hspace{-0.5mm}-\hspace{-0.5mm}\lambda\hspace{-0.25mm}\big(\hspace{-0.5mm}\textstyle\sum_{q=1}^{n}P_{\mathcal{S}_q}(1\hspace{-0.5mm}+\hspace{-0.5mm}\mathcal{Z}_q)\hspace{-0.5mm}-\hspace{-0.5mm}P_B\big), \end{equation} where $\mathcal{K}_q=N_q \Delta f a_q^{d_{\mathcal{S}\mathcal{R}}}{d_{\mathcal{S}\mathcal{R}}^\alpha}\Big(1+\mathcal{Z}_q^\frac{1}{\mathcal{B}}\Big)^\frac{1}{\mathcal{B}}[\beta c_{\mathcal{S}\mathcal{R}_q}]^{-1}$. Now to obtain the optimal $\{P_{\mathcal{S}_q}\}_{q=1}^n$ and $\lambda$ for given $d_{\mathcal{S}\mathcal{R}}$ and $P_{\mathcal{R}_q}=\mathcal{Z}_qP_{\mathcal{S}_q}\;\forall q$, the corresponding KKT conditions are: \begin{subequations} \begin{eqnarray}\label{condA} \begin{aligned} \textstyle\frac{\partial \mathcal{L}_2}{\partial P_{\mathcal{S}_q}}=\frac{1}{\mathcal{K}_q}\textstyle\prod_{j=1,j\neq q}^{n}\left(1+\frac{P_{\mathcal{S}_j}}{\mathcal{K}_j}\right)-\lambda (1+\mathcal{Z}_q)=0, \end{aligned} \end{eqnarray} \begin{eqnarray}\label{condB} \begin{aligned} \textstyle \lambda\left(\sum_{q=1}^{n}(1+\mathcal{Z}_q)P_{\mathcal{S}_q}-P_B\right)=0. \end{aligned} \end{eqnarray} \end{subequations} As for $\lambda^*=0$,~\eqref{condA} cannot be satisfied, we note that $\lambda^*>0$. On solving~\eqref{condA} and~\eqref{condB}, $\{P_{\mathcal{S}_q}^*\}_{q=1}^n$ and $\lambda^*$ are obtained as: \begin{subequations} \begin{eqnarray}\label{eq:OPA-S} \begin{aligned} \textstyle P_{\mathcal{S}_q}^*\triangleq \frac{P_B+\textstyle\sum_{j=1}^{n}(1+\mathcal{Z}_j)\mathcal{K}_j-(1+\mathcal{Z}_q)\mathcal{K}_q}{n(1+\mathcal{Z}_q)},\quad \end{aligned} \end{eqnarray} \begin{eqnarray}\label{eq:opt-lam} \begin{aligned} \textstyle \lambda^*\triangleq(1+\mathcal{Z}_1)(\mathcal{K}_1+P_{\mathcal{S}_1})^{n-1}\textstyle\prod_{j=1}^{n}\frac{1}{\mathcal{K}_j(1+\mathcal{Z}_j)}. \end{aligned} \end{eqnarray} \end{subequations} \color{black} Further, as for practical system parameter values in UANs, $P_B\gg \textstyle\sum_{j=1}^{n}(1+\mathcal{Z}_j)\mathcal{K}_j-n(1+\mathcal{Z}_q)\mathcal{K}_q$, we note that $ P_{\mathcal{S}_q}^* \approx P_B[n(1+\mathcal{Z}_q)]^{-1}$. \textit{Hence, this approximation along with~\eqref{eq:OPA-S} and $P_{\mathcal{R}_q}=\mathcal{Z}_qP_{\mathcal{S}_q}$ provide novel insights on OPA across different sub-bands as a function of $f_q$ and RP $d_{\mathcal{S}\mathcal{R}}$.} \subsection{Optimal Positioning of Relay for the Obtained OPA} Using~\eqref{eq:OPA-S} and~\eqref{eq:opt-lam} in~\eqref{LangFun2}, $\mathcal{L}_2$ having $n+2$ variables gets reduced to a single variable Lagrangian $\mathcal{L}_3$ after writing $\{P_{\mathcal{S}_q}\}_{q=1}^n$ and $\lambda$ as functions of RP $d_{\mathcal{S}\mathcal{R}}$. Thus, we get optimal RP $d_{\mathcal{S}\mathcal{R}}^*$ by solving $\frac{\partial \mathcal{L}_3}{\partial d_{\mathcal{S}\mathcal{R}}}=0$, and then the OPA $P_{\mathcal{S}_q}^*$ by substituting $d_{\mathcal{S}\mathcal{R}}^*$ in~\eqref{eq:OPA-S} and $P_{\mathcal{R}_q}^*$ by $P_{\mathcal{R}_q}^*=\mathcal{Z}_qP_{\mathcal{S}_q}^*$. Here, it is worth noting that, \textit{regardless of value of $n\gg1$}, \textit{we just need to solve one single variable equation $\frac{\partial \mathcal{L}_3}{\partial d_{\mathcal{S}\mathcal{R}}}=0$ to obtain the tight approximation to the joint global-optimal solution as obtained by solving the system of $(2n+2)$ equations.} This in turn yields huge reduction in computational time complexity. \section{Numerical Results}\label{sec_results} The default experimental parameters are as follows. Operating frequency range is between $5$ to $15$ kHz~\cite{stoj02}, $c_{\mathcal{S}\mathcal{R}}(f)=c_{\mathcal{R}\mathcal{D}}(f)$ which is assumed to be constant over entire operating bandwidth~\cite{babu10}, $D=10$ km, $d_{\mathcal{S}\mathcal{R}}=5$ km, $n=260$, $r=1$ kbps, $K=3.01$ dB, $\alpha=1.5$, and $P_B=100$ dB re $\mu$ Pascal. First we validate the analysis by plotting the mean value of data rate in both continuous and discrete frequency domains (with $n=260$) in Fig.~\ref{fig:validation}(a). A percentage error of $\le0.02\%$ between the analytical and simulation results in each case validates that with $n\ge260$, $\widehat{p_{out}}$ closely matches $p_{out}$. Further via Fig.~\ref{fig:validation}(b), minimum $\widehat{p_{out}}$ obtained using the low complexity approximation algorithm (cf. Section~\ref{section4}) differs by less than $0.032\%$ from the global minimum value as returned by solving $(2n+2)$ equations for obtaining solution of (P1). Next we get insights on OPA and optimal RP (ORP). In Fig.~\ref{fig:validation}(b), the performance of different fixed PA (FPA) schemes is compared against OPA for varying RPs. If total PA $nP_{\mathcal{S}_q}$ at $\mathcal{S}$ in FPA increases, the minimum $\widehat{p_{out}}$ is obtained when $\mathcal{R}$ is located near $\mathcal{D}$. The uniform PA (UPA), having $P_{\mathcal{S}_q}=P_{\mathcal{R}_q}={P_B}/(2n)$ $\forall$ $q$, achieves nearly the same global minimum value of $\widehat{p_{out}}$ approximately at same point $d_{\mathcal{S}\mathcal{R}}=0.5D$. Because on using $c_{\mathcal{S}\mathcal{R}}(f)=c_{\mathcal{R}\mathcal{D}}(f)$ and $d_{\mathcal{S}\mathcal{R}}=0.5D$ in~\eqref{eq:OPA-S}, $\mathcal{Z}_q=1$ $\forall$ $q$, and as a result OPA is independent of center frequencies. \textit{Thus, for symmetric channels, i.e., $c_{\mathcal{S}\mathcal{R}}(f)=c_{\mathcal{R}\mathcal{D}}(f)$, OPA on sub-bands is uniform regardless of the values of $\{f_q\}_{q=1}^n$, as also evident from Fig.~\ref{fig:validation}(c)}. However, in practice for asymmetric $\mathcal{S}\mathcal{R}$ and $\mathcal{R}\mathcal{D}$ links, we need to obtain OPA using proposed algorithm. The variation of OPA along the sub-bands vary with different channel gains for $\mathcal{S}\mathcal{R}$ and $\mathcal{R}\mathcal{D}$ link is shown in Fig.~\ref{fig:validation}(c). When $c_{\mathcal{S}\mathcal{R}}(f):c_{\mathcal{R}\mathcal{D}}(f)=2:1$, $\mathcal{S}$ requires lower PA and optimal RP is nearer to $\mathcal{D}$, because channel gain of $\mathcal{S}\mathcal{R}$ link is higher. But for $c_{\mathcal{S}\mathcal{R}}(f):c_{\mathcal{R}\mathcal{D}}(f)=4:1$ and $6:1$, initially the OPA is lower at $\mathcal{S}$ followed by an inversion taking place due to $Z_q<1$ at $q\geq 138$ and $\geq 188$, respectively, because the relative attenuation $\frac{a_q^{d_{\mathcal{S}\mathcal{R}}}d_{\mathcal{S}\mathcal{R}}^\alpha}{a_q^{D-\delta-d_{\mathcal{S}\mathcal{R}}}{(D-\delta-d_{\mathcal{S}\mathcal{R}})}^\alpha}$ dominates over the relative expected gain of $\frac{c_{\mathcal{S}\mathcal{R}}(f)}{c_{\mathcal{R}\mathcal{D}}(f)}$ of $\mathcal{S}\mathcal{R}$ to $\mathcal{R}\mathcal{D}$ link (cf. Section~\ref{sec:PA-a}). \textit{Therefore, OPA along a sub-band over $\mathcal{S}\mathcal{R}$ and $\mathcal{R}\mathcal{D}$ link depends on dominance of relative gain of fading channels over relative channel attenuation, and vice versa.} Finally, we compare the outage performance of the three optimization schemes, (i) ORP with UPA, (ii) OPA with $d_{\mathcal{S}\mathcal{R}}=0.5D$, and (iii) joint PA and RP, against a fixed benchmark scheme with UPA and $d_{\mathcal{S}\mathcal{R}}=0.5D$ (cf. Fig.~\ref{fig:compare}). The average percentage improvement provided by ORP, OPA, and joint optimization schemes are $15.5\%$, $1.2\%$, and $23.85\%$ respectively for $c_{\mathcal{S}\mathcal{R}}(f):c_{\mathcal{R}\mathcal{D}}(f)=4:1$, and $0.31\%$, $0.19\%$, and $0.31\%$ for $c_{\mathcal{S}\mathcal{R}}(f):c_{\mathcal{R}\mathcal{D}}(f)=1:1$. Also, the same is true for reverse ratio, i.e., $c_{\mathcal{S}\mathcal{R}}(f):c_{\mathcal{R}\mathcal{D}}(f)=1:2 $ and $1:4$. \textit{Thus, higher the asymmetry in channel gains of $\mathcal{S}\mathcal{R}$ and $\mathcal{R}\mathcal{D}$ links, higher is the percentage improvement in performance and the ORP is a better semi-adaptive scheme than OPA.} \begin{figure} \centering \includegraphics[width=3.5in]{Fig4.eps} \caption{\small Percentage improvement achieved by different proposed optimization schemes over FPA for different $[c_{\mathcal{S}\mathcal{R}}(f):c_{\mathcal{R}\mathcal{D}}(f), r]$.} \label{fig:compare} \end{figure} \section{Concluding Remarks} We jointly optimized PA and RP to minimize outage probability. After proving the global optimality of the problem, we also propose an efficient, tight approximation algorithm which substantially reduces the complexity in calculation. In general, the numerically validated proposed analysis and joint optimization have been shown to provide more than $10\%$ outage improvement over the fixed benchmark scheme. Though this performance enhancement depends on the $\mathcal{S}\mathcal{R}$ and $\mathcal{R}\mathcal{D}$ channel gains, the cost incurred in practically realizing them is negligible due to the proposed low complexity design. \appendices \setcounter{equation}{0} \setcounter{figure}{0} \renewcommand{\theequation}{A.\arabic{equation}} \renewcommand{\thefigure}{A.\arabic{figure}} \section{} \subsection{Proof of Pseudoconvexity of $p_{out}$ in $S_\mathcal{S}$, $S_\mathcal{R}$, $d_{\mathcal{S}\mathcal{R}}$}\label{AppA} From~\eqref{out_exp}, we notice that the outage probability $p_{out}$ can be observed as the CDF of the random rate $\mathfrak{R}\triangleq\int_{0}^{B_W}\frac{1}{2}\log_2(1+\min\{\gamma_{\mathcal{S}\mathcal{R}}(f),\gamma_{\mathcal{R}\mathcal{D}}(f)\})\text{d}f$. It is clear that $\mathfrak{R}$ depends on the end-to-end SNR $\gamma=\min\{\gamma_{\mathcal{S}\mathcal{R}}(f),\gamma_{\mathcal{R}\mathcal{D}}(f)\}$, whose expectation as obtained using the relationship $\textbf{Pr}[\gamma>x]=\textbf{Pr}[\gamma_{\mathcal{S}\mathcal{R}}>x]\textbf{Pr}[\gamma_{\mathcal{R}\mathcal{D}}>x]$ in~\eqref{ccdf}, is given by $\overline{\gamma}= \frac{\overline{\gamma}_{\mathcal{S}\mathcal{R}}\hspace{1mm}\overline{\gamma}_{\mathcal{R}\mathcal{D}}}{[\overline{\gamma}_{\mathcal{S}\mathcal{R}}^\mathcal{B}+\overline{\gamma}_{\mathcal{R}\mathcal{D}}^\mathcal{B}]^\frac{1}{\mathcal{B}}}$. After using the definitions for $\overline{\gamma}_{ij}$ (as given in Section~\ref{section2}), we obtain: \begin{equation}\label{meanSNR} \overline{\gamma}\!=\!\textstyle\frac{\beta}{N(f)}\Big[\Big(\frac{a(f)^{d_{\mathcal{S}\mathcal{R}}}d_{\mathcal{S}\mathcal{R}}^\alpha}{c_{\mathcal{S}\mathcal{R}}S_{S}(f)}\Big)^\mathcal{B}+\Big(\frac{a(f)^{D-\delta-d_{\mathcal{S}\mathcal{R}}}(D-\delta-d_{\mathcal{S}\mathcal{R}})^\alpha}{c_{\mathcal{R}\mathcal{D}}S_{R}(f)}\Big)^\mathcal{B}\Big]^{\frac{-1}{\mathcal{B}}} \end{equation} As the distribution of $\mathfrak{R}$ depends on SNR $\gamma$, using the joint pseudoconcavity of $\overline{\gamma}$ as proved in Appendix~\ref{AppB}, it can be shown that the expectation $\overline{\mathfrak{R}}$ of $\mathfrak{R}$ is also jointly pseudoconcave in $S_\mathcal{S}(f)$, $S_\mathcal{R}(f)$, and $d_{\mathcal{S}\mathcal{R}}$. The latter holds because the affine and logarithmic transformation along with integration preserve the pseudoconcavity of the positive pseudoconcave function $\overline{\gamma}$~\cite{Baz06},~\cite[App. C]{mish17}. Finally, using the property that the CDF is a monotonically decreasing function of the expectation of the underlying random variable~\cite[Theorem 1]{mish18}, we observe that $p_{out}$, which holds a similar CDF and expectation relationship with $\overline{\mathfrak{R}}$, is jointly pseudoconvex~\cite{Baz06}. \subsection{Proof of Pseudoconcavity of $\overline{\gamma}$ in $S_\mathcal{S}$, $S_\mathcal{R}$, and $d_{\mathcal{S}\mathcal{R}}$}\label{AppB} The bordered Hessian matrix $B_H(\overline{\gamma})$ for $\overline{\gamma}$ is given by: \begin{eqnarray}\label{eq:ApB1} B_H(\overline{\gamma})=\left[\hspace{-0.5mm} \begin{array}{cccc} 0 & \frac{\partial \overline{\gamma}}{\partial {S_\mathcal{S}}} & \frac{\partial \overline{\gamma}}{\partial {S_\mathcal{R}}} & \frac{\partial \overline{\gamma}}{\partial d_{\mathcal{S}\mathcal{R}}} \\ \frac{\partial \overline{\gamma}}{\partial {S_\mathcal{S}}} & \frac{\partial^2 \overline{\gamma}}{\partial {S_\mathcal{S}}^2} & \frac{\partial^2 \overline{\gamma}}{\partial {S_\mathcal{S}} \partial {S_\mathcal{R}}} & \frac{\partial^2 \overline{\gamma}}{\partial {S_\mathcal{S}} \partial {d_{\mathcal{S}\mathcal{R}}}}\\ \frac{\partial \overline{\gamma}}{\partial {S_\mathcal{R}}} & \frac{\partial^2 \overline{\gamma}}{\partial {S_\mathcal{R}} \partial {S_\mathcal{S}}} & \frac{\partial^2 \overline{\gamma}}{\partial {S_\mathcal{R}}^2} & \frac{\partial^2 \overline{\gamma}}{\partial {S_\mathcal{R}} \partial d_{\mathcal{S}\mathcal{R}}} \\ \frac{\partial \overline{\gamma}}{\partial d_{\mathcal{S}\mathcal{R}}} & \frac{\partial^2 \overline{\gamma}}{\partial d_{\mathcal{S}\mathcal{R}} \partial {S_\mathcal{S}} } & \frac{\partial^2 \overline{\gamma}}{\partial d_{\mathcal{S}\mathcal{R}} \partial {S_\mathcal{R}} } & \frac{\partial^2 \overline{\gamma}}{\partial d_{\mathcal{S}\mathcal{R}}^2} \end{array} \right] \end{eqnarray} From \eqref{eq:ApB1}, the joint pseudoconcavity of $\overline{\gamma}$ in $S_\mathcal{S}(f)$, $S_\mathcal{R}(f)$, and $d_{\mathcal{S}\mathcal{R}}$ is proved next by showing that the determinant of $3\times3$ leading principal submatrix of $B_H(\overline{\gamma})$, denoted by $\mathfrak{L}$, is positive, and the determinant of $B_H(\overline{\gamma})$ is negative~\cite{Baz06}. \begin{subequations} \begin{align}\label{L_var} \begin{aligned} \hspace{-3mm}|\mathfrak{L}|=(1+\mathcal{B})Y_1^\mathcal{B}Y_2^\mathcal{B}(Y_1^\mathcal{B}+Y_2^\mathcal{B})^{-3-\frac{3}{\mathcal{B}}}(S_\mathcal{S}S_\mathcal{R})^{-2}>0,\!\! \end{aligned} \end{align} \begin{align}\label{Bh_var} \hspace{-1mm}|B_H(\overline{\gamma})&|\hspace{-1mm}=\hspace{-1mm}-\{Y_1^\mathcal{B}Y_2^\mathcal{B}(\hspace{-0.5mm}Y_1^\mathcal{B}\hspace{-1mm}+\hspace{-1mm}Y_2^\mathcal{B}\hspace{-0.5mm})^{\hspace{-0.5mm}-2\hspace{-0mm}-\hspace{-0mm}\frac{3}{\mathcal{B}}}(d_{\mathcal{S}\mathcal{R}}(\hspace{-0.5mm} D\hspace{-1mm}-\hspace{-1mm}\delta\hspace{-1mm}-\hspace{-1mm}d_{\mathcal{S}\mathcal{R}}\hspace{-0.5mm})\nonumber\\ &\;\times\hspace{-0.5mm} S_\mathcal{S}S_\mathcal{R})^{\hspace{-0.5mm}-2}\hspace{-0.5mm}\}\{\hspace{-0.5mm}\alpha(\alpha\hspace{-1mm}-\hspace{-1mm}1)(1\hspace{-1mm}+\hspace{-1mm}\mathcal{B}\hspace{-0mm})((\hspace{-0.5mm}D\hspace{-1mm}-\hspace{-1mm}\delta\hspace{-0.5mm})Y_1\hspace{-1mm}-\hspace{-1mm}d_{\mathcal{S}\mathcal{R}}(Y_1\hspace{-1mm}+\hspace{-1mm}Y_2))^2\nonumber\\ &\;+\hspace{-0.5mm}\alpha(\mathcal{B}(\alpha\hspace{-1mm}-\hspace{-1mm}1)\hspace{-1mm}-\hspace{-1mm}1)(D\hspace{-1mm}-\hspace{-1mm}\delta)^2Y_1Y_2\}\hspace{-1mm}+\hspace{-1mm}2\alpha d_{\mathcal{S}\mathcal{R}}(D\hspace{-1mm}-\hspace{-1mm}\delta\hspace{-1mm}-\hspace{-1mm}d_{\mathcal{S}\mathcal{R}})\nonumber\\ &\;\times \ln a\{(1+\mathcal{B})d_{\mathcal{S}\mathcal{R}}Y_2^2+(1+\mathcal{B})(D-\delta-d_{\mathcal{S}\mathcal{R}})Y_1^2\nonumber\\ &\;+\hspace{-0.5mm}(\mathcal{B}\hspace{-1mm}-\hspace{-1mm}1)(D\hspace{-1mm}-\hspace{-1mm}\delta)Y_1Y_2\} \hspace{-0.5mm}+\hspace{-0.5mm} d^2(D\hspace{-1mm}-\hspace{-1mm}\delta\hspace{-1mm}-\hspace{-1mm}d_{\mathcal{S}\mathcal{R}})^2(\ln a)^2\hspace{-0.5mm}\{(Y_1\hspace{-1mm}\nonumber\\ &\;-\hspace{-1mm}Y_2)^2+\hspace{-1mm}\mathcal{B}(Y_1\hspace{-1mm}+\hspace{-1mm}Y_2)^2\}\hspace{-1mm}<\hspace{-1mm}0, \hspace{1mm}\forall\{(\alpha\hspace{-1mm}>\hspace{-1mm}1)\hspace{-0.5mm}\wedge\hspace{-0.5mm} (\mathcal{B}\hspace{-1mm} > \hspace{-1mm}1)\}\!\!\!\!\!\!\!\! \end{align} \end{subequations} Here $Y_1\triangleq\frac{a^{d_{\mathcal{S}\mathcal{R}}}d_{\mathcal{S}\mathcal{R}}^\alpha}{c_{\mathcal{S}\mathcal{R}}S_\mathcal{S}}$ and $Y_2\triangleq\frac{a^{D-\delta-d_{\mathcal{S}\mathcal{R}}}(D-\delta-d_{\mathcal{S}\mathcal{R}})^\alpha}{c_{\mathcal{R}\mathcal{D}}S_\mathcal{R}}$. So,~\eqref{L_var} and~\eqref{Bh_var} along with the implicit negativity of $2\times2$ leading principal submatrix of $B_H(\overline{\gamma})$ complete the proof. \color{black} \bibliographystyle{IEEEtran}
{ "redpajama_set_name": "RedPajamaArXiv" }
1,835
noun Machinery, Automotive. See under rack1 (def. 5a). Words nearby rack and pinion racing form, racing skate, racism, racist, rack, rack and pinion, rack and ruin, go to, rackboard, rack car, racket, racketeer Other definitions for rack and pinion (2 of 2) rack-and-pinion [ rak-uhn-pin-yuhn ] / ˈræk ənˈpɪn yən / of or relating to a mechanism in which a rack engages a pinion: rack-and-pinion steering. Origin of rack-and-pinion First recorded in 1900–05 How to use rack and pinion in a sentence As an example of good science-and-society policymaking, the history of fluoride may be more of a cautionary tale. Anti-Fluoriders Are The OG Anti-Vaxxers|Michael Schulson|July 27, 2016|DAILY BEAST Yes, Byrd—dead four-and-a-half years now—was a Kleagle in the Ku Klux Klan. Steve Scalise and the Right's Ridiculous Racial Blame Game|Michael Tomasky|January 2, 2015|DAILY BEAST Bake on the center rack of the oven for 40 to 50 minutes or until set. Make 'The Chew's' Carla Hall's Pumpkin Pecan Pie|Carla Hall|December 26, 2014|DAILY BEAST Later that night, that same black-and-red banner would be seen again—in the column of marchers chanting for dead cops. The Monsters Who Screamed for Dead Cops|Jacob Siegel|December 23, 2014|DAILY BEAST She also practises etching, pen-and-ink drawing, as well as crayon and water-color sketching. Women in the fine arts, from the Seventh Century B.C. to the Twentieth Century A.D.|Clara Erskine Clement No law of that country must exceed in words the number of letters in their alphabet, which consists only in two-and-twenty. Gulliver's Travels|Jonathan Swift Mr. Spurrell came down to see a horse, and we shall be very glad to have the benefit of his opinion by-and-by. Punch, or the London Charivari, Volume 107, November 3, 1894|Various They were eaten too quickly, in long gulps of four-and-twenty hours at a time. Kipling Stories and Poems Every Child Should Know, Book II|Rudyard Kipling The whole thing begins to have a jigsaw look, like a child's toy rack with wooden soldiers on it, expanding and contracting. British Dictionary definitions for rack and pinion a device for converting rotary into linear motion and vice versa, in which a gearwheel (the pinion) engages with a flat toothed bar (the rack) (of a type of steering gear in motor vehicles) having a track rod with a rack along part of its length that engages with a pinion attached to the steering column
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
567
WOZ prototyping requires developers to create a rudimentary model of the completed product, which is called a prototype. The prototype may be quite simple, using every-day objects to represent parts of the finished product or it may be a working model, capable of performing some – but not all – of the tasks the completed product will perform. Once the prototype has been created, developers use role playing to test how end users will interact with the product. The WOZ methodology requires three things: a script that provides directions for what is to take place, a person to play the role of the end user and a human "wizard" that will perform tasks that will simulates the behavior of the completed product. The person playing the end user may -- or may not know -- that they are playing a role or that the wizard's tasks are being performed manually by a human being instead of by a machine or computer program. Wizard of Oz prototyping is often used in agile software development and lean programming to improve how business rules are implemented in software. After each iteration, anecdotal feedback and data is gathered and analyzed to help improve the next round of development. The test-and-learn cycle is repeated until development is concluded. The term Wizard of Oz protoyyping is credited to usability expert Dr. Jeff Kelley, who was inspired by the scene in the movie The Wizard of Oz in which Toto the dog pulls back a curtain to reveal that the wizard is actually a man who is flipping switches and pulling levers. Where does Wizard of Oz prototyping come in handy in your organization?
{ "redpajama_set_name": "RedPajamaC4" }
7,203
Latinus of Burgundy (420–500) was a 5th-century Duke of Burgundy. All that is known of the life of Latinus (Dux Latinus Gontbado) is contained in the following incident: "A certain Domitian mounted his donkey and went to Torcieu, a village a league away. His food being exhausted, he returned to ask a noble and powerful man called Latinus, who lived in a place called Catonica. This Latinus gave his name to the village where he lived, and to a fountain that was near. (This village is now called Lagnieu.) The wife of Latinus was called Syagria. Domitian presented himself to him with his donkey, and asked him to buy food for him and his companions who were building an oratory. Latinus said to him: "How do you want me to give you corn? You look more like a histrion than a servant of God." "Latinus, who was Arian, asked him his profession of faith; Domitian recited it to him: Latinus told him that his profession of faith was false, and he refused to load wheat on his donkey. There were near two ancient Roman temples devoted to Jupiter and Saturn; the men who cultivated the fields came secretly to adore these gods. Domitian replied: 'If what I say is true, let these temples crumble".' "Then there arose a violent storm, accompanied by thunder and hail; the temples collapsed. Latinus took refuge in his palace, built with enormous stones and adorned with marble. The storm over, he sent to know what had become of Domitian he thought killed. He was found safe and sound. Latinus repented, and apologized to Domitian for having called him mad" Latinus and his family converted to Catholicism, and he gave a vineyard to Domitian. References 420 births 500 deaths Dukes of Burgundy
{ "redpajama_set_name": "RedPajamaWikipedia" }
9,610
{"url":"https:\/\/viralfsharp.com\/category\/graphs\/","text":"### Archive\n\nArchive for the \u2018Graphs\u2019 Category\n\n## HashSet, Graph, Cognac\n\nThe post on applying GPU to finding Eulerian path mentioned a stumbling block: partitioning a very specific kind of graph.\n\nIn general, partitioning is a hard problem, NP-hard actually. The graphs we are dealing with are very specific, though, and may be partitioned in $O(|E|)$ time (E \u2013 the set of graph edges). Our graphs are of the following form: If we are \u201clucky\u201d it consists of just one or very few (directed) loops, if not \u2013 we have lots of such disconnected loops. And it is this unlucky case that kept the bottle of good cognac sealed all these months (only a descent solution to the partitioning problem would break it out).\n\nTo partition such a graph, i.e., color every loop in its distinct color, all we need to do is walk the graph from any vertex and once the loop is detected, we pick a vertex at random from the set of those we haven\u2019t visited yet and start walking again. We repeat until everything is visited:\n\nlet partitionLinear (end' : int [])=\nlet allVertices = HashSet<int>(end')\nlet colors = Array.create end'.Length -1\nlet mutable color = 0\n\nwhile allVertices.Count > 0 do\nlet mutable v = allVertices.First()\nwhile colors.[v] < 0 do\nallVertices.Remove v |> ignore\ncolors.[v] <- color\nv <- end'.[v]\ncolor <- color + 1\ncolors, color\n\n\nThis is great, except it doesn\u2019t work. The problem is revealed when a graph is very large and very fragmented. This is when the code at line 7 fails us. The problem is, we expect it to work in O(1): how hard can it be to retrieve the \u201cfirst\u201d element?! Well, since we are dealing with a data structure that does not have an intrinsic notion of order, it may be quite hard. In fact, the complexity here is $O(|E|)$ (actually $O(|V|$, but for our graphs $|V| = |E|$), thus the complexity of the code above is $O(|E|^2)$.\n\nThe following code actually works:\n\nlet partitionLinear (end' : int [])=\nlet colors = Array.create end'.Length -1\nlet mutable color = 0\nlet mutable num = end'.Length\nlet mutable curIdx = 0\nlet mutable nextIdx = num - 1\n\nwhile num > 0 && curIdx >= 0 do\nlet mutable v = curIdx\nwhile colors.[v] < 0 do\ncolors.[v] <- color\nv <- end'.[v]\ncolor <- color + 1\nnum <- num - 1\nwhile nextIdx >= 0 && colors.[nextIdx] >= 0 do\nnextIdx <- nextIdx - 1\ncurIdx <- nextIdx\ncolors, color\n\n\nIn the worst case it is still $O(|E|^2)$, however, this worst case is unlikely and we expect the performance close to $O(|E|)$ in general.\nThis won\u2019t cure the performance problems of the GPU algorithm, which still relies on the number of graph partitions to be somewhat reasonable, but it will enable it to run in some respectable time. Perhaps time to break out the cognac after all!\n\nCategories: F#, C#, bioinformatics, Graphs\n\n## Walking the Euler Path: PIN Cracking and DNA\u00a0Sequencing\n\nContinuing on to some cool applications of Eulerian paths.\n\nThe goal of this little graph experiment remains exploration of accelerating Eulerian path finding on the GPU. This is the final introductory post.\n\n### Eulerian Path\n\nHierholzer algorithm works great. It\u2019s linear in the number of edges, so as fast as we can possibly have. The idea is simple: pick a vertex, walk the graph, removing used edges from consideration and adding visited vertices to a stack, once we circle back to a vertex without edges \u2013 pop it from the stack and pre-pend it to the path. Once the stack is empty and all edges have been traversed \u2013 we have the path\/cycle.\n\nmember this.FindEulerCycle (?start) =\nlet mutable curVertex = defaultArg start 0\n\nlet stack = Stack<int>()\nlet connections = Dictionary<int, int []>()\nlet start = curVertex\nlet mutable cycle = []\nlet mutable first = true\n\nwhile stack.Count > 0 || first do\nfirst <- false\nlet connected = connections.[curVertex]\nif connected.Length = 0 then\ncycle <- curVertex :: cycle\ncurVertex <- stack.Pop()\nelse\nstack.Push curVertex\nconnections.[curVertex] <- connected.[1..]\ncurVertex <- connected.[0]\nif not (connections.ContainsKey curVertex) then\n\nlet path = start::cycle\nif path.Length <> this.NumEdges + 1 then []\nelse\nstart::cycle |> List.map (fun i -> verticesOrdinalToNames.[i])\n\n\nHere we don\u2019t check for pre-conditions on whether the graph has an Eulerian path\/cycle, since this check is expensive enough that failure of the algorithm can serve as such a check.\n\nGetting connected vertices (outgoing edges) is as fast as getting a sub-range. We only do it once for every vertex, then these are stored in a dictionary and mutated as we remove \u201cused\u201d edges, so the graph itself remains immutable. In our representation, getting outgoing edges is easy:\n\nlet getVertexConnections ordinal =\nlet start = rowIndex.[ordinal]\nlet end' = rowIndex.[ordinal + 1] - 1\ncolIndex.[start..end']\n\n\n### De Bruijn Sequence\n\nOn a seemingly unrelated, but actually intimately related topic. Given an alphabet of m characters, create a cyclical sequence which:\n\n1. Contains all sub-sequences of length n, and\n2. Does not have any repeating sub-sequences of length n\n\nThe sequence is cyclical in a sense that we recover all its subsequences of length n by sliding a cyclical window over the sequence. So, for example, for the binary alphabet and n=3:\n\nWe can traverse the graph in order of the marked edges and record each edge label, thus getting the sequence: 01011100. This is a cyclical sequence, we just broke it in an arbitrary way. Sliding the n=3 length window we\u2019ll get all the 3-digit binary numbers.\n\nWe get the sequence by first constructing the De Bruijn Graph from our sequence of numbers. The graph is constructed by taking all the sequences of length n \u2013 1 and connecting them \u201cprefix-to-suffix\u201d, where for each sequence of length n, prefix (suffix) is the subsequence of the first (last) n \u2013 1 characters of this sequence. So, for instance, in the above example, vertex \u201900\u2019 is a prefix of \u2018001\u2019, while \u201901\u2019 is its suffix. So while \u201900\u2019 and \u201901\u2019 are both vertices, they are linked with the edge that is labelled by the character necessary to create the entire number of length n (001) by moving from prefix to suffix: 00 -> 01, label: 1.\n\nThe resulting graph has a Eulerian cycle as it is easy enough to see by induction. We recover the sequence by traversing the cycle, and since we traverse all the edges only once, we\u2019ll get exactly what we are looking for.\n\nlet prefix (s:string) = s.[..s.Length - 2]\nlet suffix (s:string) = s.[1..]\nlet prefSuf s = prefix s, suffix s \/\/ shorthand\n\nlet numToBinary len n =\nlet rec numToBinaryRec n len acc =\nif len = 0 then acc\nelse\nnumToBinaryRec (n >>> 1) (len - 1) (String.Format(\"{0}{1}\", n &&& 0x1, acc))\nnumToBinaryRec n len \"\"\n\nlet binaryDebruijnSeq n =\nif n <= 0 then failwith \"n should be positive\"\nlet finish = pown 2 n\nlet gr =\n[0..finish-1]\n|> List.map (numToBinary n >> prefSuf)\n|> List.groupBy fst\n|> List.map (fun (v, prefSuf) -> v + \" -> \" + (prefSuf |> List.map snd |> List.reduce (fun st e -> st + \",\" + e )))\n|> DirectedGraph<string>.FromStrings\n\nlet debruinSeq = gr.FindEulerPath()\nlet debruinNum =\ndebruinSeq\n|> List.windowed 2\n|> List.mapi (fun i [p; s] -> \"\\\"\" + (i + 1).ToString() + \":\" + s.[s.Length - 1].ToString() + \"\\\"\")\n\ngr.Visualize(euler = true, eulerLabels = debruinNum)\n\n\n\nHere the function binaryDeruijnSeq computes a prefix and a suffix of all n-digit binary numbers, then groups prefixes together and builds a collection of graph strings in my notation: $p\\ ->\\ s_1,\\ s_2...s_n$, connecting a prefix to all its suffixes. After that, the collection is converted into an instance of a DirectedGraph class, the Eulerian cycle is found and visualized in such a way, that starting from the green vertex, moving along the edges that mark the Eulerian cycle, we recover the De Bruijn sequnce by recording the edge labels.\n\n### PIN Cracking\n\nIf we have a device protected by a 4-digit pin, such that punching in a few numbers in a sequence will unlock the device as long as there is a correct subsequence punched, we can use the De Bruijn approach above to generate a 10,000 long sequence that will necessarily yield the correct PIN in only 10,000 punches, as opposed to 40,000. See this article that describes it in some detail.\n\n### DNA Sequencing\n\nMy favorite application, of course, is to DNA sequencing. DNA is sequenced from a bunch of reads. The reads are not very long \u2013 maybe around 300 nucleotides, maybe less. They are not always perfect either: some nucleotide or a few may not be produced correctly by the sequencer. Still, if we can gather enough of them together, align and error-correct, we could then build a De Bruijn graph much the same way as described above thus linking the reads together in a DNA sequence. This is of course a gross oversimplification, but it is the reason why I love Eulerian cycles and the source of my interest in speeding up algorithms of finding them.\n\nIn the future posts \u2013 more forays into the GPU-land in an attempt to speed up something already pretty fast and what came out of it.\n\nCategories: bioinformatics, F#, Graphs\n\n## Visualizing Graphs\n\nSeptember 18, 2016 1 comment\n\n#### Previously\n\nWalking the Eule Path: Intro\n\n### Generating and Visualizing Graphs\n\nI can hardly overemphasize the importance of visusalizations. Many a bug had been immediately spotted just by looking at a visual of a complex data structure. I therefore decided to add visuals to the project as soon as the DirectedGraph class was born.\n\n#### Code & Prerequisits\n\nCode is on GitHub.\n\n1. GraphViz: install and add the bin directory to the PATH\n2. EmguCV v3.1: install and add the bin directory to the PATH\n\n#### DrawGraph\n\nThis is a small auxiliary component I wrote to make all future visualizations possible. And here is a sidebar. I didn\u2019t want to write this component. I am not a fan of re-writing something that was written a hundred times before me, so the first thing I did was look for something similar I could use. Sure enough, I found a few things. How can I put it? Software engineering is great, but boy, do we tend to overengineer things! I know, I\u2019m guilty of the same thing myself. All I wanted from the library was an ability to receive a text file written in GraphViz DSL, and get on the output a .png containing the picture of the graph. Just a very simple GraphViz driver, nothing more.\n\nOne library had me instantiate 3 (three!) classes, another developed a whole API of its own to build the GraphViz file\u2026 I ended up writing my own component, it has precisely 47 lines of code. the last 4 lines are aliasing a single function that does exactly what I wanted. It creates the png file and then immediately invokes the EmguCV image viewer to show it. After we\u2019re done, it cleans up after itself, deleting the temporary png file. Here it is.\n\n#### Taking it for a Ride\n\nJust to see this work\u2026\nAnother digression. Love the new feature that generates all the \u201c#r\u201d instructions for F# scripts and sticks them into one file! Yes, this one! Right-click on \u201cReferences\u201d in an F# project:\n\n.\n\nAnd the generated scripts auto-update as you recompile with new references! A+ for the feature, thank you so much.\n\nComes with a small gotcha, though: sometimes it doesn\u2019t get the order of references quite right and then errors complaining of references not being loaded appear in the interactive. I spent quite a few painful hours wondering how is it that this reference was not loaded, when here it is! Then I realized: it was being loaded AFTER it was required by references coming after it).\n\n#load \"load-project-release.fsx\"\nopen DrawGraph\n\ncreateGraph \"digraph{a->b; b->c; 2->1; d->b; b->b; a->d}\" \"dot.exe\" None\n\n\n\nCool. Now I can take this and use my own function to generate a graph from a string adjacency list, visualize it, and even view some of its properties. Sort of make the graph \u201cpalpable\u201d:\n\nlet sparse = [\"a -> b, c, d\"; \"b -> a, c\"; \"d -> e, f\"; \"e -> f\"; \"1 -> 2, 3\"; \"3 -> 4, 5\"; \"x -> y, z\"; \"2 -> 5\"]\nlet grs = StrGraph.FromStrings sparse\n\ngrs.Visualize(clusters = true)\n\n\nStrGraph.FromStrings does exactly what it says: it generates a graph from a sequence of strings, formatted like the sparse list above.\nMy Visualize function is a kitchen sink for all kinds of visuals, driven by its parameters. In the above example, it invokes graph partitioning to clearly mark connected components.\n\nIt is important to note, that this functionality was added to the visualizer not because I wanted to see connected components more clearly, but as a quick way to ensure that my partitioning implementation was indeed working correctly.\n\n#### Generating Data and Looking at It\n\nNow we have a class that builds graphs and even lets us look at them, so where do we get these graphs? The easiest thing (seemed at the time) was to create them.\n\nEnter FsCheck. It\u2019s not the easiest library to use, there is a learning curve and getting used to things takes time, but it\u2019s very helpful. Their documentation is quite good too. The idea is to write a generator for your type and then use that generator to create as many samples as you like:\n\n#load \"load-project-release.fsx\"\n\nopen Graphs\nopen FsCheck\nopen System\nopen DataGen\n\nlet grGen = graphGen 3 50\nlet gr = grGen.Sample(15, 5).[2]\n\ngr.Visualize(into=3, out= 3)\n\n\nThis produces something like:\n\nMy function graphGen len num generates a graph of text vertices where len is the length of a vertex name and num is the number of vertices. It returns an FsCheck generator that can then be sampled to get actual graphs. This was a one-off kind of experiment, so it\u2019s in a completely separate module:\n\n\n\/\/DataGen.fs\n\nmodule DataGen\nopen FsCheck\nopen System\nopen Graphs\n\nlet nucl = Gen.choose(int 'A', int 'Z') |> Gen.map char\n\nlet genVertex len = Gen.arrayOfLength len nucl |> Gen.map (fun c -> String(c))\nlet vertices len number = Gen.arrayOfLength number (genVertex len) |> Gen.map Array.distinct\n\nlet graphGen len number =\nlet verts = vertices len number\nlet rnd = Random(int DateTime.UtcNow.Ticks)\nlet pickFrom = verts |> Gen.map (fun lst -> lst.[rnd.Next(lst.Length)])\nlet pickTo = Gen.sized (fun n -> Gen.listOfLength (if n = 0 then 1 else n) pickFrom)\n\nGen.sized\n<|\n(fun n ->\nGen.map2\n(fun from to' ->\nfrom, (to' |> Seq.reduce (fun acc v -> acc + \", \" + v))) pickFrom pickTo\n|>\nGen.arrayOfLength (if n = 0 then 1 else n)\n|> Gen.map (Array.distinctBy fst)\n|> Gen.map (fun arr -> arr |> Array.map (fun (a, b) -> a + \" -> \" + b))\n)\n|> Gen.map StrGraph.FromStrings\n\n\n\nThis whole module cascades different FsCheck generators to create a random graph.\nThe simplest of them nucl, generates a random character. (Its name comes from the fact that originally I wanted to limit the alphabet to just four nucleotide characters A, C, G, T). Then this generator is used by genVertex to generate a random string vertex, and finally vertices creates an array of distinct random vertices.\n\ngraphGen creates a sequence of strings that FromStrings (above) understands. It first creates a string of \u201cinbound\u201d vertices and then adds an outbound vertex to each such string.\n\nSampling is a little tricky, for instance, the first parameter to the Sample function, which, per documentation, controls sample size, in this case is responsible for complexity and connectivity of the resulting graphs.\n\n#### On to Euler\u2026\n\nThe script above also specifies a couple of optional parameters to the visualizer: into will mark any vertex that has into or more inbound connections in green. And out will do the same for outbound connections and yellow. If the same vertex possesses both properties, it turns blue.\n\nInspired by all this success, I now want to write a function that would generate Eulerian graphs. The famous theorem states that being Eulerian (having an Euler cycle) for a directed graph is equivalent to being strongly connected and having in-degree of each vertex equal to its out-degree. Thus, the above properties of the visualizer are quite helpful in confirming that the brand new generator I have written for Eulerain graphs (GenerateEulerGraph) is at the very least on track:\n\n\nlet gre = StrGraph.GenerateEulerGraph(10, 5)\ngre.Visualize(into=3, out=3)\n\n\n\nVery encouraging! Whatever has at least 3 edges out, has at least 3 edges in. Not a definitive test, but the necessary condition of having only blue and transparent vertices in the case of an Eulerian graph is satisfied.\n\nIn the next post \u2013 more about Eulerian graphs, de Brujin sequences, building (and visualizing!) de Bruijn graphs, used for DNA sequence assembly.\n\nCategories: CUDA, data visualization, F#, Graphs\n\n## Walking the Euler Path:\u00a0Intro\n\n### Source Code\n\nI\u2019m thinking about a few posts in these series going very fast through the project. The source is on my GitHub, check out the tags since the master branch is still work in progress.\n\n### Experimenting with Graph Algorithms with F# and GPU\n\nGraphs play their role in bioinformatics which is my favorite area of computer science and software engineering lately. This relationship was the biggest motivator behind this project.\n\nI have been experimenting with a few graph algorithms trying to parallelize them. This is interesting because these algorithms usually resist parallelization since they are fast in their serial version running in O(|E|) or O(|E| + |V|) time (E \u2013 the set of edges, V \u2013 the set of vertices of the graph). And of course I use any excuse to further explore the F# language.\n\n### Representation\n\nThe object of this mini-study is a directed unweighted graph. The choice to represent it is simple: adjacency list or incidence matrix. Since I had CUDA in mind from the start, the latter was chosen, and since I had large graphs in mind, hundreds of millions, possibly billions of edges (limited only by the .NET object size: is it still a problem? I haven\u2019t checked, and by the size of my GPU memory), sparse matrix data structure was picked.\n\n#### Sparse Matrix Implementation\n\nI first wrote a very bare-bones sparse matrix class, just to get my feet wet. Of all possible representations for a sparse matrix, I chose CSR (or CSC which is the transposition of CSR), the idea is intuitive and works great for a directed graph incidence matrix.\n\nBriefly (taking CSR \u2013 Compressed Sparse Row as an example), we represent our matrix in 3 arrays: V, C, R. V \u2013 the array of non-zero values, written left-to-right, top-to-bottom. C \u2013 the array of column indices of the values in V. And C \u2013 the \u201cboundary\u201d, or \u201crow index\u201d array, built as follows: We start by recording the number of non-zero values per row in each element of R, starting with R[1]. R[0] = 0. Then we apply the scan operation (like the F# Seq.scan) to the row array, to produce the final result. The resulting array contains m + 1 (m \u2013 number of rows in the matrix) elements, its last entry equals the total number of non-zero values in the matrix). This array is used as a \u201cslicer\u201d or \u201cindexer\u201d into the column\/value arrays: non-zero columns of row $i$ will be located in arrays V and C at the indices starting from R[i] and ending at R[i + 1] \u2013 1. This is all pretty intuitive.\n\n#### Overcoming F# Strong Typing\n\nF# is a combination of strong typing and dynamic generic resolution, which makes it a challenge when you need to write a template for which it is natural to be resolved at compile time. Then sweet memories of C++ or Python invade\u2026 There exists a way to overcome all that and it is not pretty. To implement it I needed the old F# PowerPack with INumeric included. Then I just coded the pattern explained in the blog post:\n\n\/\/ SparseMatrix.fs\n\n\/\/\/ <summary>\n\/\/\/ Sparse matrix implementation with CSR and CSC storage\n\/\/\/ <\/summary>\n[<StructuredFormatDisplay(\"{PrintMatrix}\")>]\ntype SparseMatrix<'a> (ops : INumeric<'a>, row : 'a seq, rowIndex : int seq, colIndex : int seq, rowSize, isCSR : bool) =\n\n....\n\nstatic member CreateMatrix (row : 'a []) (isCSR : bool) =\nlet ops = GlobalAssociations.GetNumericAssociation<'a>()\nlet colIdx, vals =\nArray.zip [|0..row.Length - 1|] row\n|> Array.filter (fun (i, v) -> ops.Compare(v, ops.Zero) <> 0)\n|> Array.unzip\n\nSparseMatrix(ops, vals, [0; vals.Length], colIdx, row.Length, isCSR)\n\n\nThe idea is to use the GlobalAssociations to smooth-talk the compiler into letting you do what you want. The pattern is to not directly use the constructor to create your object, but a static method instead, by means of which this \u201ccompiler-whispering\u201d is hidden from the user.\n\nMy sparse matrix is built dynamically: it is first created with a single row through a call to CreateMatrix and then rows can be appended to it by calling AddValues row. The idea is to allow creation and storage of huge matrices dynamically. These matrices may be stored in large files for which representation in dense format in memory may not be feasible.\n\n#### Representing the graph\n\nSo, at which point does it make sense to use a sparse matrix instead of a dense one in CSR\/CSC? It\u2019s easy to figure out:\n\nIf we have a matrix $|M| = m \\cdot n$, then the answer is given by the equation: $m \\cdot n > 2 \\cdot e + m + 1$, here $e$ is the number of non-zero elements in the matrix.\n\nFor a graph $G=(V, E)$ the set V takes a place of rows, and E \u2013 that of columns. The above inequality becomes: $v \\cdot e > e + v + 1 \\ (v = |V|,\\ e = |E|)$, so our sparse structure becomes very economical for large, not to mention \u201creally huge\u201d graphs. (We don\u2019t have the values array anymore, since all our values are just 0s and 1s).\n\nAnd so the graph is born:\n\n\n[<StructuredFormatDisplay(\"{AsEnumerable}\")>]\ntype DirectedGraph<'a when 'a:comparison> (rowIndex : int seq, colIndex : int seq, verticesNameToOrdinal : IDictionary<'a, int>) as this =\nlet rowIndex = rowIndex.ToArray()\nlet colIndex = colIndex.ToArray()\nlet nEdges = colIndex.Length\nlet verticesNameToOrdinal = verticesNameToOrdinal\nlet nVertices = verticesNameToOrdinal.Count\n\n\/\/ vertices connected to the ordinal vertex\nlet getVertexConnections ordinal =\nlet start = rowIndex.[ordinal]\nlet end' = rowIndex.[ordinal + 1] - 1\ncolIndex.[start..end']\n\n\nThis is not very useful, however, since it assumes that we already have rowIndex for the CSR type \u201cR\u201d and colIndex for the \u201cC\u201d arrays. It's like saying: \"You want a graph? So, create a graph!\". I would like to have a whole bunch of graph generators, and I do. I placed them all into the file Generators.fs.\n\nThis is a good case for using type augmentations. When we need to implement something that \u201clooks good\u201d on the object, but doesn\u2019t really belong to it.\nIn the next post I\u2019ll talk about visualizing things, and vsiualization methods really have nothing to do with the graph itself. Nevertheless, it is natural to write:\n\nmyGraph.Visualize(euler=true)\n\n\nVisualize(myGraph, euler=true)\n\n\nSo we use type augmentations, for instance, going back to the generators:\n\n\n\/\/Generators.fs\n\ntype Graphs.DirectedGraph<'a when 'a:comparison> with\n\/\/\/ <summary>\n\/\/\/ Create the graph from a file\n\/\/\/ <\/summary>\n\/\/\/ <param name=\"fileName\"><\/param>\nstatic member FromFile (fileName : string) =\n\nif String.IsNullOrWhiteSpace fileName || not (File.Exists fileName) then failwith \"Invalid file\"","date":"2018-02-21 14:54:09","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 1, \"img_math\": 14, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.3889710605144501, \"perplexity\": 2477.3597244574285}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2018-09\/segments\/1518891813626.7\/warc\/CC-MAIN-20180221143216-20180221163216-00343.warc.gz\"}"}
null
null
Kyrkskolan i Huddinge var Huddinge sockens första fasta folkskola som invigdes i Fullersta den 1 juli 1847 i en ombyggd fattigstuga. Därmed hade Huddinge inrättat en modern folkskoleundervisning fem år efter att den svenska folkskolestadgan trädde i kraft 1842. År 1914 invigdes den kyrkskolan vid nuvarande Kommunalvägen i Huddinge kommun vars byggnad fortfarande finns kvar. På 1950-talets mitt var skolan rivningshotad men fick vara kvar och utgör idag en värdefull kulturmiljö i Huddinge kommun. Här bedrev kommunen skolverksamhet en bit in på 2000-talet. Numera har Huddinge församling bland annat sin kör- och konfirmandundervisning här. Första Kyrkskolan När skolundervisningen började i Huddinge socken var det en kyrklig angelägenhet. I december 1845 bestämde skolstyrelsen inom kyrkorådet att en fast skola för 100 barn skulle inrättas och som skolbyggnad skulle man använda fattighuset strax söder om Huddinge kyrka. Efter en till- och påbyggnad fanns i bottenvåningen "bostad för sockens barnmorska, herberge för långwäga Scholbarn, bagarstufwa och boningsrum för 12 fattige". I övre våningen inrättades "Scholsal 15 alnar lång, 11 alnar bred med boställe för scholläraren af två rum och kök". Den 28 juni 1847 samlades ett stort antal barn och föräldrar vid den nyrenoverade skolbyggnaden intill kyrkan för inskrivning och den 1 juli 1847 invigdes skolan, bara fem år efter att den svenska folkskolestadgan hade trätt i kraft. På det äldsta fotografiet av en Huddingeskola som togs 1862 eller 1863 av hovfotografen Johannes Jaeger syns den till skola ombyggda fattigstugan intill Huddinge kyrka (som då fortfarande var utan torn). Därefter genomgick skolbyggnaden ytterligare ombyggnader och förändringar. Snart behövdes fler skolsalar för Huddinges växande barnaskara. Därför hyrde man under åren 1878 till 1886 ett rum för småskoleverksamhet i den närbelägna stugan Nyboda (se Nyboda hembygds- och skolmuseum). Andra Kyrkskolan I mars 1884 beslutade en extra kyrkostämma att förbättra Huddinges skolsituation. Beslutet innebar att församlingen skulle bekosta och låta bygga fem nya skolor, bland annat vid Huddinge kyrka och mellan Vårby gård och Juringe gård (se Vårby skola). Det skulle dock dröja 30 år tills den nya Kyrkskolan invigdes år 1914. Skolhuset uppfördes vid nuvarande Kommunalvägen 34. På bottenvåningen fanns två lärosalar samt en gymnastiksal som inrymdes i den låga byggnadskroppen. På övervåningen inrättades ytterligare två salar. Planlösningen var nästan identisk med Segeltorpsskolans första skolhus som uppfördes fyra år senare, med undantag för gymnastiksalen som Segeltorpsskolan först fick 1927. Med sitt brutna sadeltak och de svängda port- och fönsteröppningar till veranda och vindfång liknade Kyrkskolan och Segeltorpsskolan varann även exteriört. Vid invigningen fällde prosten de legendariska orden: "Huddinges behov av skolbyggnader är nu för all framtid tillgodosett". Men redan efter några år blev Kyrkskolan för liten, trots klasser med större antal barn och undervisning i förmiddags- och eftermiddagsskift. För att lätta på trycket öppnade 1923 Centralskolan (namnet ändrades på 1940-talet till Tomtbergaskolan) med sex skolsalar och specialsalar. Hösten 1931 fanns fyra klasser i Kyrkskolan och 21 i Centralskolan. Då hade skolväsendet i Huddinge övergått från en kyrklig, välvillig institution till kommunalt huvudmannaskap. 1941 kom Kyrkskolan i kommunens ägo. I mitten på 1950-talet hade kyrkskolan fortfarande fyra klassrum, men byggnaden var nergången och rivningshotad. I en fastställd stadsplan från år 1955 hette det bland annat: "…detta vackra parklandskap som inom sig innesluter kyrkan och prästgården har i sitt nuvarande skick […] två skönhetsfläckar, nämligen Telegrafverkets hus och Gamla folkskolan vid Kommunalvägen. Ur helhetsbildens synpunkt vore det önskvärt att båda dessa byggnader i framtiden kunde rivas…" Kyrkskolan levde dock vidare och hörde 1963 tillsammans med Hörningsnäs och Stenängens skolor till "Södra rektorsområdet". Byggnaden upprustades i början av 1980-talet och blev då en värdefull kulturmiljö i Huddinge kommun. 1991 såldes den då kommunalägda Kyrkskolan till Svenska kyrkan i Huddinge som lät genomföra en invändig ombyggnad och modernisering. Under 1990-talets andra hälft fick Huddinges kommunala musikskola tillgång till kyrkskolans lokaler. Numera bedriver Huddinge församling bland annat sin kör- och konfirmandundervisning här. Första Kyrkskolan "Klockargården" rivs Huddinges första kyrkskola, den ombyggda fattigstugan från 1847, kallades senare Klockargården och fungerade under några år som kommunalhus innan nuvarande Huddinge kommunalhus stod klar 1948. Byggnaden revs slutligen i början av 1960-talet och på platsen (kvarteret Klockargården) uppfördes nuvarande anläggning "Klockargården" för kyrkans församling, ritad av arkitekt Sture Frölén. Idag har Huddinge församling sin verksamhet i Huddinge kyrka, Klockargården, Kyrkskolan och Tomtberga kapell. Bilder Referenser Noter Tryckta källor Externa länkar Byggnader i Huddinge kommun Grundskolor i Sverige Sjödalen-Fullersta (kommundel)
{ "redpajama_set_name": "RedPajamaWikipedia" }
532
{"url":"http:\/\/accesspediatrics.mhmedical.com\/content.aspx?bookid=457&sectionid=40092809","text":"Chapter 1\n\n1. First-trimester screening. Maternal serum can be analyzed for certain biochemical markers that, in combination with ultrasound measurement of the fetal nuchal translucency (NT), can be used to calculate a risk assessment for trisomies 18 and 21. In the first trimester, these serum markers are the free \u03b2-human chorionic gonadotropin (hCG) and pregnancy-associated plasma protein A (PAPP-A). Free \u03b2-hCG is elevated and PAPP-A is decreased in a pregnancy affected by Down syndrome. First-trimester screening is an effective screening tool, with a detection rate of 82\u201387% for trisomy 21 with a 5% false-positive screen rate. First-trimester screening is performed between 10 4\/7 and 13 6\/7 weeks' gestation and requires confirmation of a chromosomal abnormality by an invasive genetic test. The results are available to the patient early enough that chorionic villus sampling (CVS) is an option for diagnostic testing.\n\n2. Second-trimester screening. For patients who present after 13 6\/7 weeks or choose not to undergo first-trimester screening, the quadruple screen test (Quad screen) is an option. The Quad screen yields a risk assessment for trisomies 18 and 21; unlike first-trimester screening, though, it also provides a risk assessment for open neural tube defects. It involves analyzing levels of maternal serum alpha fetoprotein (MSAFP), total hCG, unconjugated estriol, and inhibin A between 15 and 21 weeks' gestation. In a pregnancy affected by Down syndrome, both MSAFP and unconjugated estriol are low; hCG and inhibin A are elevated. The Quad screen has a detection rate of 81% for Down syndrome at a 5% false-positive screen rate. Like first-trimester screening, the Quad screen requires an invasive test to confirm the diagnosis of a chromosomal abnormality (ie, amniocentesis).\n\nFor those patients who chose to undergo first-trimester screening and\/or a CVS, neural tube defect screening in the form of a second-trimester MSAFP level should be offered. This maternal serum analyte is elevated in the presence of an open neural tube defect. Evidence exists that focused ultrasound during the second trimester is an effective tool for detecting an open neural tube defect.\n\n3. Sequential, contingent sequential, and integrated screening. These options involve a combination of first- and second-trimester screening. Specifically with the integrated screen, an ultrasound measurement of the fetal NT is performed between 10 4\/7 and 13 6\/7 weeks. In addition, maternal serum levels of PAPP-A are obtained in the first trimester and the Quad screen obtained in the second trimester. The results are reported when all the tests are complete. In sequential screening, NT, PAPP-A, and free \u03b2-hCG are measured in the first trimester followed by a Quad screen in the second trimester. The results are reported to the patient after completion of the first-trimester portion of the test and then again after the second-trimester portion. Although this test has a high sensitivity, it has a high false-positive rate because it involves two independent tests. The contingent sequential screen does the first-trimester portion of the sequential testing and follows with the Quad screen when an elevated risk is noted. The contingent sequential screening has an ...\n\nSign in to your MyAccess profile while you are actively authenticated on this site via your institution (you will be able to verify this by looking at the top right corner of the screen - if you see your institution's name, you are authenticated). Once logged in to your MyAccess profile, you will be able to access your institution's subscription for 90 days from any location. You must be logged in while authenticated at least once every 90 days to maintain this remote access.\n\nOk\n\nSubscription Options\n\nAccessPediatrics Full Site: One-Year Subscription\n\nConnect to the full suite of AccessPediatrics content and resources including 20+ textbooks such as Rudolph\u2019s Pediatrics and The Pediatric Practice series, high-quality procedural videos, images, and animations, interactive board review, an integrated pediatric drug database, and more.","date":"2017-02-21 12:10:55","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.25392112135887146, \"perplexity\": 5708.285965588717}, \"config\": {\"markdown_headings\": false, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2017-09\/segments\/1487501170708.51\/warc\/CC-MAIN-20170219104610-00313-ip-10-171-10-108.ec2.internal.warc.gz\"}"}
null
null
\section{Introduction} Bringing intelligence close to the sensors is an effective strategy to meet the energy requirement of battery-powered devices for always-ON applications~\cite{Alioto2017}. Power-optimized solutions for near-sensor processing aim at reducing the amount of data to be dispatched out from the sensors. Local data analysis can compress the data down to even a single bit in case of a binary classifier, hence massively reducing the output bandwidth and energy consumption over raw sensor data communication~\cite{Rusci2016}. In the context of visual sensing, novel computer vision chips feature embedded processing capabilities to reduce the overall energy consumption~\cite{Rodriguez2017}. By placing computational modules within the sensor, mid- and low- level visual features can be directly extracted and transferred to a processing unit for further computation or used to feed a first stage classifier. Moreover, by integrating analog processing circuits on the focal-plane, the amount of data crossing the costly analog-to-digital border is reduced~\cite{Likamwa2016}. If compared with a camera-based system featuring a traditional imaging technology, this approach has a lower energy consumption because of (a) a reduced sensor-to-processor bandwidth and (b) a lower demand for digital computation~\cite{Zhang2016}. Relevant examples of mixed-signal smart capabilities include the extraction of spatial and temporal features, such as edges or frame-difference maps, or a combination of them~\cite{Fernandez2014}. Because of the employed highly optimized architectures, the power consumption of smart visual chips results to be more than one order of magnitude lower than off-the-shelf traditional image sensors~\cite{Gottardi2009}. However, to favor the meeting between smart ultra-low power sensing and deep learning, which is nowadays the leading technique for data analytics, a further step is required. At present, the high computational and memory requirement of deep learning inference models have prevented a full integration of these approaches close to the sensor at an ultra low power cost~\cite{Likamwa2016,andri2017yodann}. A big opportunity for pushing deep learning into low-power sensing come from recently proposed Binarized Neural Networks (BNNs)~\cite{Courbariaux2016,Rastegari2016}. When looking at the inference task, a BNN consists of logical XNOR operations, binary popcounts and integer thresholding. Therefore, major opportunities arise for hardware implementation of these models as part of the smart sensing pipeline~\cite{Umuroglu2017}. In this paper, we explore the feasibility of deploying BNNs as a front-end for an ultra-low power smart vision chip. The combination of mixed-signal processing and hardware BNN implementation represents an extremely energy-efficient and powerful solution for always-ON sensing, serving as an early detector of interesting events. Therefore, we design and synthesize a purely combinational hard-wired BNN, which is fed with the binary data produced by a mixed-signal ultra-low power imager~\cite{Gottardi2009}. The main contributions of this paper are: \begin{itemize} \item The hardware design and logic synthesis of a combinational BNN architecture for always-ON near-sensor processing. \item The area and energy evaluation of the proposed approach, for varying network models and configurations. \end{itemize} We evaluate two BNN models with 16$\times$ 16 and 32$\times$ 32 binary input size, either with fixed or variable parameters. In case of a combinational BNN with 32$\times$ 32 input data and hardwired parameters, our synthesis results in GlobalFoundries 22\,nm SOI technology shows an area occupancy of 2.61\,\si{\square\milli\meter}{}, which is 2.2$\times${} smaller than the model with variable parameters, and features a 10$\times${} higher energy efficiency with respect to comparable techniques for deep learning-based near-sensor processing. Moreover, our study paves the way for exploring a new generation of logic synthesis tools---aimed at aggressively optimizing deep binarized networks and enabling focal-plane processing of images with higher resolution. \section{Related Work} Several proposed smart imaging chips for always-ON applications embed mixed-signal processing circuits for extracting basic spatial and temporal features directly on the sensor die~\cite{Choi2014,Fernandez2014,Kim2013}. Recent approaches tried to push deep learning circuits to the analog sensor side to exploit the benefits of focal-plane processing~\cite{Rodriguez2017}. The work presented in~\cite{Chen2016} makes use of angle-sensitive pixels, integrating diffraction gratings on the focal plane. Based on the different orientations of the pixel-level filters, multiple feature maps are locally computed as the first layer of a convolutional network. A sensing front-end supporting analog multiplication is proposed in~\cite{Lee2017}. They introduce a MAC unit composed of only passive switches and capacitors to realize a switched-capacitor matrix multiplier, which achieves an energy efficiency of 8.7\,TOp/s/W when running convolution operations. RedEye~\cite{Likamwa2016} embeds column-wise processing pipelines in the analog domain to perform 3D convolutions before of the digital conversion. The chip is implemented in 0.18\,$\mu$m technology and needs 1.4\,mJ to process the initial 5 layers of GoogLeNet, leading to an energy efficiency of less than 2\,TOp/s/W. With respect to these focal-plane analog approaches, we leverage the potentiality of BNNs to deploy a digital and optimized purely combinational network to notably increase the energy efficiency of near-sensor processing circuits. Many neural network accelerators have been reported in the literature, most of them with an energy efficiency in the range of few TOp/s/W~\cite{du2015shidiannao,cavigelli2017origami,sze2017efficient}. Several recent approaches have focused on quantizing the weights down to binarization in order to gain a significant advantage in memory usage and energy efficiency~\cite{sze2017efficient,andri2017yodann}, pushing it up to around 60\,TOp/s/W while advances in training methods have achieved accuracy losses of less than 1\% for this setup. A new approach has been to quantize also the activations down to binary with initial accuracy losses of up to 30\% on the ILSVRC dataset, these have improved to around 11\% over the last two years and even less for smaller networks on datasets such as CIFAR-10 and SVHN~\cite{Courbariaux2016,Rastegari2016,sze2017efficient}. During this time, some VLSI implementations have been published, most of them targeting FPGAs such as the FINN framework~\cite{Umuroglu2017,intel2016}. Only few ASIC implementations exist~\cite{intel2016,BRein2017,xnorpop2017}, of which XNOR-POP uses in-memory processing and reports the highest energy efficiency of 21.1\,TOp/s/W and thus less than the best binary-weight-only implementation. \section{Combinational Hardware BNN Design} BNNs feature a single-bit precision for both the weights and the activation layers when performing inference. This makes the approach promising for resource-constrained devices, also considering the intrinsic 32$\times$ memory footprint reduction with respect to baseline full-precision models. When applying the binarization scheme to a Convolutional Neural Network (CNN), a BNN features a stacked architecture of binary convolutional layers. Every layer transforms the $IF$ binary input feature maps into the $OF$ binary output feature maps through the well-known convolution operation. Because of the binary domain \{0,1\} of both the input data and the weight filters, the convolution kernel can be rewritten as \begin{equation} \label{eq:conv_bin} \varphi(m,x,y) = \mathrm{popcount} (\textit{weights(m)}\;\mathrm{xnor}\; \textit{recField(x,y)}), \end{equation} where $\varphi(m,x,y)$ is the result of the convolution, $\textit{weights(m)}$ is the array of binary filter weights and $\textit{recField(x,y)}$ is the receptive field of the output neuron located at position $(x,y)$ of the $m$-th output feature map. The $\mathrm{popcount}(\cdot)$ function returns the numbers of asserted bits of the argument. Note that the convolution output $\varphi(m,x,y)$ is an integer value. As presented by~\cite{Courbariaux2016}, the popcount result is binarized after a batch normalization layer. However, the normalization operation can be reduced to a comparison with an integer threshold, \begin{figure} \centering \includegraphics[width=1\linewidth]{figures/BinConv.png} \caption{Binary convolution flow for every convolutional layer. For any of the OF output feature maps, the binary value at position $(x,y)$ is produced by overlapping the $m$-th weight filter to the array of the receptive field of the input feature map centered at the spatial position $(x,y)$.} \label{fig:BinaryConv} \end{figure} \begin{equation} \label{eq:bin_opt} outMap(m,x,y) = \begin{cases} \varphi(m,x,y) \ge thresh(m) & \text{if } \gamma > 0 \\ \varphi(m,x,y) \le thresh(m) & \text{if } \gamma < 0 \\ 1 \qquad \text{if } \gamma = 0 \text{ and } \beta \ge 0\\ 0 \qquad \text{if } \gamma = 0 \text{ and } \beta < 0 \end{cases}, \end{equation} where $thresh(m)$ is the integer threshold that depends on the convolution bias $b$ and on the parameters learned by the batch normalization layer $\mu$, $\gamma$, $\sigma$ and $\beta$. After training the network, the $thresh(m)$ parameters are computed offline as $\lfloor \mu - b - \beta \cdot \sigma/ \gamma \rfloor$ if $ \gamma > 0$ or $\lceil \mu - b - \beta \cdot \sigma/ \gamma \rceil$ if $\gamma < 0$. \figref{fig:BinaryConv} graphically schematizes the binary convolution kernel. The \textit{BinConv} module applies \eqref{eq:conv_bin} and \eqref{eq:bin_opt} over the receptive field values of the output neuron $outMap(m,x,y)$. To build a convolutional layer, the \textit{BinConv} is replicated for every output neuron. The hardware architecture of a \textit{BinConv} element is shown in \figref{fig:BinaryConvBlock}. The input signals $\textit{recField(x,y)}$, $\textit{weights(m)}$ and $\textit{thresh(m)}$ and the output signal $\textit{outMap(m,x,y)}$ of the block refer to \eqref{eq:conv_bin} and \eqref{eq:bin_opt}. Additionally, the $\textit{sign(m)}$ signal drives the selection of the correct output neuron's value depending on the batch normalization parameters (eq. \eqref{eq:bin_opt}). The network parameters, \emph{weights, thresh} and \emph{sign}, highlighted in red, can be stored in a memory block, to allow online reconfiguration, or can be fixed at design time. In total, the memory footprint required to store the parameters of a convolutional layer is $OF \cdot (IF\cdot kw \cdot kh + \lfloor \log_2(IF\cdot kw \cdot kh) \rfloor + 3 )$ bits. Despite the reduced reconfigurability, relevant benefits in terms of silicon occupation arise when hard-wiring the binary weights. In this case, the synthesis tool plays a major role to enable the implementability of the model. The synthesis tool has to exploit the optimizations based on a high-level abstract HDL description of the network. \begin{figure} \centering \includegraphics[width=0.93\linewidth]{figures/binConvBlock.png} \caption{Hardware architecture of the combinational building block for computing binary convolutions. Every \textit{binConv(m,x,y)} module instantiated within a convolutional layer produces the binary value of the output neuron at location $(x,y)$ of the $m$-th output feature map.} \label{fig:BinaryConvBlock} \end{figure} To explore the feasibility of deep combinational BNNs, we focus on VGG-like network topologies as in~\cite{Courbariaux2016}. These networks include convolutional layers with a small filter size (typically $kw=kh=3$) and an increasing feature dimension going deeper into the network. The spatial dimension tends to decrease by means of strided pooling operations placed after the binary convolution of \eqref{eq:conv_bin}. Following the intuition of~\cite{Umuroglu2017}, a MaxPooling layer can be moved behind the binarization by replacing the MAX with an OR operation among the binary values passing through the pooling filter. The VGG-like topology features multiple fully-connected layers. Their hardware implementation is similar to the \textit{binConv} module of \figref{fig:BinaryConvBlock}, where the convolutional receptive field contains all the input neurons of the layer. The last fully-connected layer generates a confidence score for every class. Differently from the original BNN scheme, our network architecture is fed with a binary single-layer signal coming from a mixed-signal imager~\cite{Gottardi2009}. However, the presented approach also holds for multi-channel imagers. \subsection{Estimating Area} \label{sec:areaEst} Before looking at synthesis results, we estimate the area of a binary convolutional layer. For each output value (output pixel and feature map, $N_\mathrm{out}=H\cdot W\cdot OF$), we have a receptive field of size $N_\mathrm{RF}=IF\cdot kw\cdot kh$ and thus need a total of $\NoutN_\mathrm{RF}$ XNOR gates. These are followed by popcount units---adder trees summing over all $N_\mathrm{RF}$ values in the receptive field. The resulting full-precision adder trees require $\sum_{i=1}^{\log_2(N_\mathrm{RF})} N_\mathrm{RF} 2^{-i}=N_\mathrm{RF}-1$ half-adders and $\sum_{i=1}^{\log_2(N_\mathrm{RF})} (i-1) N_\mathrm{RF} 2^{-i}=N_\mathrm{RF}-\log_2(N_\mathrm{RF})-1$ full-adders each, and are replicated for every output value. The subsequent threshold/compare unit is insignificant for the total area. To provide an example, we look at the first layer of the network for $16\times 16$ pixel images with 1 input and 16 output feature maps and a $3\times 3$ filter ($N_\mathrm{RF}=9, N_\mathrm{out}=4096$). Evaluating this for the GF22 technology with $A_\mathrm{XNOR}=0.73\,\mu\mathrm{m}^2$, $A_\mathrm{HA}=1.06\,\mu\mathrm{m}^2$ and $A_\mathrm{FA}=1.60\,\mu\mathrm{m}^2$, we obtain an area of $A_\mathrm{XNOR,tot}=0.027\,\mathrm{mm}^2$, $A_\mathrm{HA,tot}=0.033\,\mathrm{mm}^2$ and $A_\mathrm{FA,tot}=0.029\,\mathrm{mm}^2$---a total of 0.089\,mm${}^2$. Note that this implies that the area scales faster than linearly with respect to the size of the receptive field $N_\mathrm{RF}$ since the word width in the adder tree increases rapidly. This is not accounted for in the widely used GOp/img complexity measure for NNs, as it is becoming only an issue in this very low word-width regime. \section{Experimental Results} \begin{table} \caption{VGG-like BNN Models\tablefootnote{bConvLyr3x3($x$,$y$) indicates a binary convolutional layer with a 3$\times$ 3 filter, $x$ input and $y$ output feature maps, MaxP2x2 is a max pooling layer of size 2$\times$ 2, bFcLyr($x$, $y$) is a binary fully connected layer with $x$ binary input $y$ binary output binary neurons.}} \label{tab:bnnmodel} \centering \resizebox{1\columnwidth}{!}{ \begin{tabular}{cll} \toprule layer & Model with a 16$\times$ 16 input map & Model with a 32$\times$ 32 input map \\ \midrule 1 & bConvLyr3x3( 1,16)+MaxP2x2 & bConvLyr3x3( 1,16)+MaxP2x2\\ 2 & bConvLyr3x3(16,32)+MaxP2x2 & bConvLyr3x3(16,32)+MaxP2x2\\ 3 & bConvLyr3x3(32,48)+MaxP2x2 & bConvLyr3x3(32,48)+MaxP2x2\\ 4 & bFcLyr(192,64) & bConvLyr3x3(48,64)+MaxP2x2\\ 5 & bFcLyr( 64, 4) & bFcLyr(256,64)\\ 6 & & bFcLyr( 64, 4)\\ \bottomrule \end{tabular} } \end{table} \subsection{BNN Training} The experimental analysis focuses on two VGG-like network topologies described in \tblref{tab:bnnmodel} to investigate also the impact of different input and network size. As a case-study, we trained the networks with labelled patches from the MIO-TCD dataset~\cite{MIO-TCD} belonging to one of the following classes: cars, pedestrians, cyclist and background. The images from the dataset are resized to fit the input dimension before applying a non-linear binarization, which simulates the mixed-signal preprocessing of the sensor~\cite{Gottardi2009}. By training the BNNs with ADAM over a training set of about 10ksamples/class (original images are augmented by random rotation), the classification accuracy against the test-set achieves 64.7\% in case of the model with 32$\times$ 32 input data, while a 50\% is measured for the 16$\times$ 16 model because of the smaller input size and network. Since this work focuses on hardware synthesis issues of BNN inference engines, we do not explore advanced training approaches for NNs with non-traditional input data, which have been discussed in the literature~\cite{Jayasuriya2016}. \subsection{Synthesis Results} We analyze both aforementioned networks for two configurations, with weights fixed at synthesis time and with variable weights (excl. storage, modeled as inputs). The fixed weights are taken from the aforementioned trained models. \begin{table} \centering \caption{Synthesis and Power Results for Different Configurations} \label{tbl:synthOverview} \begin{threeparttable} \begin{tabularx}{\linewidth}{@{\hskip 1mm}l@{\hskip 2mm}c@{\hskip 2mm}r@{\hskip 2mm}r@{\hskip 2mm}r@{\hskip 2mm}r@{\hskip 3mm}r@{\hskip 2mm}r@{\hskip 2mm}r@{\hskip 1mm}} \toprule & &\multicolumn{2}{c}{------\,area\,------}& \multicolumn{2}{c}{---\,time/img\,---} & E/img & leak. & E-eff. \\ netw. & type & [\si{\square\milli\meter}] & [MGE]\tnote{\dag} & [ns] & [FO4]\tnote{\ddag} & [nJ] & [\si{\micro\watt}{}] & [TOp/J] \\ \midrule 16$\times$ 16 & var. & 1.17 & 5.87 & 12.82 & 560 & 2.40 & 945 & 470.8 \\ 16$\times$ 16 & fixed & 0.46 & 2.32 & 12.40 & 541 & 1.68 & 331 & 672.6 \\ 32$\times$ 32 & var. & 5.80 & 29.14 & 17.27 & 754 & 11.14 & 4810 & 479.4 \\ 32$\times$ 32 & fixed & 2.61 & 13.13 & 21.02 & 918 & 11.67 & 1830 & 457.6 \\ \bottomrule \end{tabularx} \begin{tablenotes} \item[\dag] Two-input NAND-gate size equivalent: $1\,\mathrm{GE}=0.199\,\mu\mathrm{m}^2$ \item[\ddag] Fanout-4 delay: $1\,\mathrm{FO4}=22.89\,\mathrm{ps}$ \end{tablenotes} \end{threeparttable} \end{table} We provide an overview of synthesis results for different configurations in \tblref{tbl:synthOverview}. We synthesized both networks listed in \tblref{tab:bnnmodel} in GlobalFoundries 22\,nm SOI technology with LVT cells in the typical case corner at 0.65\,V and 25$^{\circ}$C. The configuration with variable weights scales with the computational effort associated with the network (1.13\,MOp/img and 5.34\,MOp/img for the 16$\times$ 16 and 32$\times$ 32 networks) with 0.97 and 0.92\,MOp/cycle/\si{\square\milli\meter}{}, respectively. The variable parameters/weights configuration does not include the storage of the parameters themselves, which would add \SI{1.60}{\square\micro\meter} (8.0\,GE) per FF which could be loaded through a scan-chain without additional logic cells (from some flash memory elsewhere on the device). Alternatively, non-volatile memory cells could be used to store them. The number of parameters is 33 and 65\,kbit and thus 0.05\,\si{\square\milli\meter}{} (264\,kGE) and 0.10\,\si{\square\milli\meter}{} (520\,kGE) for the 16$\times$ 16 and 32$\times$ 32 network, respectively. \begin{table} \centering \caption{Area Breakdown for the 16$\times$ 16 Network} \label{tbl:areaDetail2} \begin{tabular}{cr|rrr} \toprule & compute & area estim. & var. weights & fixed weights \\ layer & [kOp/img] & [\si{\square\milli\meter}] & area [\si{\square\milli\meter}] & area [\si{\square\milli\meter}] \\ \midrule 1 & 74 ( 6.5\%) & 0.093 & 0.077 ( 6.6\%) & 0.008 ( 1.7\%) \\ 2 & 590 (52.2\%) & 0.971 & 0.647 (55.4\%) & 0.204 (44.3\%) \\ 3 & 442 (39.1\%) & 0.738 & 0.417 (35.8\%) & 0.241 (52.3\%) \\ 4 & 25 ( 2.2\%) & 0.041 & 0.026 ( 2.2\%) & 0.008 ( 1.7\%) \\ \bottomrule \end{tabular} \end{table} Looking at the more detailed area breakdown in \tblref{tbl:areaDetail2}, we can see that there is a massive reduction when fixing the weights before synthesis. Clearly, this eliminates all the XNOR operations which become either an inverter or a wire, and even the inverter can now be shared among all units having this particular input value in their receptive field. However, based on the estimates described in \secref{sec:areaEst}, this cannot explain all the savings. Additional cells can be saved through the reuse of identical partial results, which not only can occur randomly but must occur frequently. For example, consider 16 parallel popcount units summing over 8 values each. We can split the value into 4 groups with 2 values each. Two binary values can generate $2^2=4$ output combinations. Since we have 16 units of which each will need one of the combinations, they will on average be reused 4 times. This is only possible with fixed weights, otherwise the values to reuse would have to be multiplexed, thereby loosing all the savings. Generally, we can observe that these already small networks for low-resolution images require a sizable amount of area, such that more advanced ad-hoc synthesis tools exploiting the sharing of weights and intermediate results are needed. \subsection{Energy Efficiency Evaluations} \begin{table} \centering \caption{Energy and Leakage Breakdown for the 16$\times$ 16 Network} \label{tbl:powerDetail} \begin{tabular}{crrrr} \toprule & \multicolumn{2}{c}{-------- var. weights --------} & \multicolumn{2}{c}{-------- fixed weights --------} \\ layer & energy/img [pJ] & leakage & energy/img [pJ] & leakage \\ \midrule 1 & 38 ( 1.6\%) & 68\,\si{\micro\watt} & 9 ( 0.5\%) & 8\,\si{\micro\watt} \\ 2 & 806 (33.7\%) & 547\,\si{\micro\watt} & 478 (28.5\%) & 152\,\si{\micro\watt} \\ 3 & 1440 (60.2\%) & 310\,\si{\micro\watt} & 1037 (61.9\%) & 163\,\si{\micro\watt} \\ 4 & 107 ( 4.5\%) & 20\,\si{\micro\watt} & 151 ( 9.0\%) & 7\,\si{\micro\watt} \\ \bottomrule \end{tabular} \end{table} We have performed post-synthesis power simulations using 100 randomly selected real images from the dataset as stimuli. The results are also reported in \tblref{tbl:synthOverview} while a detailed per-layer breakdown is shown in \tblref{tbl:powerDetail}. We see that the model with 32$\times$ 32 input has lower energy-efficiency and higher latency when fixing the weights, while the opposite is observed for the smaller model. We attribute this to the fact that synthesis is set to optimize for area and both, the critical path length and target power are unconstrained. These energy efficiency numbers are in the order of 10$\times${} higher than those of the next competitor YodaNN~\cite{andri2017yodann}. However, they are fundamentally different in the sense that YodaNN (a) runs the more complex binary weight networks, (b) requires additional off-chip memory for the weights and intermediate results, (c) can run large networks with a fixed-size accelerator, and (d) is in an older technology but doing aggressive voltage scaling. Given these major differences, a more in-depth comparison would require a redesign of YodaNN in 22\,nm and re-tuning to the single-channel input architecture we are using for comparison. Nevertheless, is is clear that these combinational BNNs are by far more efficient. When heavily duty-cycling a device, leakage can become a problem. In this case, we see 945\,\si{\micro\watt}{} and 331\,\si{\micro\watt}{} of leakage power, which might be significant enough in case of low utilization to require mitigation through power-gating or using HVT cells. Generally, voltage scaling can also be applied, not only reducing leakage, but also active power dissipation. The throughput we observe in the range of 50\,Mframe/s is far in excess of what is meaningful for most applications. Thus aggressive voltage scaling, power gating and the reverse body biasing available in this FD-SOI technology should be optimally combined to reach the minimum energy point where leakage and dynamic power are equal while the supply is ON. We expect these values to be highly dependent on the input data, since energy is consumed only when values toggle. While a single pixel toggling at the input might affect many values later in the network, it has been shown that rather the opposite effect can be seen: changes at the input tend to vanish deeper into the network~\cite{cbinfer}. A purely combinational implementation fully leverages this and BNNs naturally have a threshold that keeps small changes from propagating and might thus perform even better for many real-world applications. \subsection{Scaling to Larger Networks} Our results show an area requirement in the range of 2.05 to 2.46\,GE/Op and an average 1.9\,fJ/Op. Scaling this up to \SI{0.5}{\square\centi\meter}~(250\,MGE) of silicon and an energy consumption of only 210\,nJ/img, we could map networks of around 110\,MOp/img---this is already more than optimized high-quality ImageNet classification networks such as ShuffleNets require~\cite{zhang2017shufflenet}. \figref{fig:plot} shows the estimation and measurements of the silicon area corresponding to the synthesized BNNs for fixed and variable weights. We also consider a model with a larger 64$\times$ 64 input imager receptive field and a higher complexity (5 convolutional and 2 fully-connected layers, 23.05\,GOp/img). Such a model presents is more accurate on the considered classification task (73.6\%) but current synthesis tool cannot handle the high complexity of the design, using in excess of 256\,GB of memory. When estimating the area occupancy, the 64$\times$ 64 BNNs result to be 4.3$\times${} larger than the area estimated for the 32$\times$ 32 model. A direct optimization of such large designs is out of scope of today's EDA tools, clearly showing the need for specialized design automation tools for BNNs. \begin{figure} \centering \includegraphics[width=1\linewidth]{figures/AreaPlot.png} \caption{Silicon area estimation (in red) and measurements with variable (green) and fixed (blue) weights of three BNNs featuring a model complexity which scales depending on the imager resolution. The area occupation of the 64$\times$ 64 model is not reported because the synthesis tool is not able to handle such a complex and large design.} \label{fig:plot} \end{figure} \section{Conclusion} We have presented a purely combinational design and synthesis of BNNs for near-sensor processing. Our results demonstrate the suitability and the energy efficiency benefits of the proposed solution, fitting on a silicon area of 2.61\,\si{\square\milli\meter}{} when considering a BNN model with 32$\times$ 32 binary input data and weight parameters fixed at design time. Our study also highlighted the need for novel synthesis tools able to deal with very large and complex network designs, that are not easily handled by current tools. \bibliographystyle{IEEEtran}
{ "redpajama_set_name": "RedPajamaArXiv" }
5,234
HomeLifestyleEntertainmentDisney Honors the Late Chadwick Boseman Disney Honors the Late Chadwick Boseman December 3, 2020 Ron Wynn Entertainment Comments Off on Disney Honors the Late Chadwick Boseman By Ron Wynn NASHVILLE, TN — Though he's now passed, memories of the impact and importance of actor Chadwick Boseman remain strong. Now Disney has ensured he'll be remembered for many days and months to come. They announced Monday that the company had redesigned the Marvel Logo's introduction to Black Panther on Disney Plus. This was done in honor of what would have been Boseman's 44th birthday Sunday. Boseman will next be seen posthumously in an upcoming August Wilson adaptation Ma Rainey's Black Bottom on Netflix. He died on August 28 following a four-year battle with colon cancer. Boseman never discussed his diagnosis publicly. His passing came as a huge shock both for fans and Hollywood. It was met with unilateral praise for Boseman's bravery and unwavering humility. Disney executive Bob Iger announced the tribute on Twitter in memory of his life, and as a commemoration of his notable role as T'Challa/Black Panther. Iger wrote 'To all fans of Black Panther, watch the film on DisneyPlus late tonight (Sunday), for a special tribute to someone that was and will always be near and dear to our hearts." The tribute also showed footage from Boseman's appearances in 'Captain America: Civil War,' 'Avengers: Infinity War' and 'Avengers: Endgame.' In addition to new images of Boseman, there were additional quotes given by the actor in his role as T'Challa – including the famous line from when his character addressed the UN Assembly: 'In times of crisis, the wise build bridges while the foolish build barriers.' The tribute served as a well deserved reminder of Boseman's legacy in the Marvel Universe. But it also touched on his memorable moments behind-the-scenes. The end of the new introduction showed Boseman delivering the 'Wakanda Forever' salute. The introduction will remain on the opening credits on Disney Plus, but there is not yet any announcement about whether it will also be featured in the upcoming Black Panther sequel. Free Virtual Wealth Creation Workshops Committee Investigating Tennessee State Funding from State Meeting Dec. 8
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
1,180
Добърчин е село в Западна България. То се намира в Община Своге, Софийска област. География Село Добърчин се намира в Стара планина над село Искрец. Надморско равнище: 723 m. Площ: 17,859 km² (НСИ). Над селото се издига връх Върпина. Природата е чудесна има много сърни, елени, зайци. В горите може да се срещнат диворастящи ягоди. Климатът е уникален, поради което в подножието на селото е изградена Специализирана болница по пневмофтизиатрични заболявания за долекуване, продължително лечение и рехабилитация "Цар Фердинанд I" с. Искрец. Болницата е своеобразна граница между с. Искрец и с. Добърчин. Тя е създадена през 1908 г. като противотуберкулозен санаториум с указ и дарение от Цар Фердинанд I. Мястото за създаване на санаториума е избрано от специално назначена от Цар Фердинанд I комисия, която е пътувала из България и е извършвала измервания на климатичните параметри. История Меча поляна Меча поляна е заличено село, което през 1986 година е присъединено като махала към село Добърчин. Към момента на присъединяването населението на Меча поляна е 5 души . В махалата днес има 20-ина постройки, сред които средновековна църква и училище. Меча поляна не е било електрафицирано. Има водоснабдяване. Пътят до бившето село тръгва от махала Лилашковци и е с дължина малко над 2 километра. Източници Външни препратки Снимки от село Добърчин. Пътепис през част от махалите не селото Снимки от махалите на село Добърчин и Меча Поляна. Историческа информация и пътепис Села в Софийска област Населени места в община Своге
{ "redpajama_set_name": "RedPajamaWikipedia" }
5,873
Selensky bzw. Selenskyj ist der Familienname folgender Personen: Anja Selensky (* 1993), deutsche Fußballspielerin Wolodymyr Selenskyj (* 1978), Präsident der Ukraine, Jurist und ehemaliger ukrainischer Schauspieler und Produzent Siehe auch: Selenska Selenski Żeleński Zelinsky Zieliński
{ "redpajama_set_name": "RedPajamaWikipedia" }
7,429
A língua de sinais de Selangor (SSL, ou em Portugal: língua lestual de Selangor, conhecida também como a língua de sinais de Kuala Lumpur - KLSL), é uma língua de sinais usada na Malásia. Originalmente foi baseada na ASL mas divergiu significativamente, o bastante para ser considerada agora uma língua autónoma. É usada principalmente no estado de Selangor, mais do que em Kuala Lumpur, explicando assim porque os próprios surdos a chamam de língua de sinais de Selangor. Tal como a língua de sinais de Penang é agora usada principalmente por povos mais idosos, embora muitos povos mais novos consigam compreendê-la. Ligações externas Ver também Malásia Selangor
{ "redpajama_set_name": "RedPajamaWikipedia" }
9,447
A Concrete Contribution: St Marys Cement Pledges $250,000 to Bowmanville Hospital BOWMANVILLE, Wednesday, October 10, 2018 – On Friday, September 28, 2018, Bowmanville Hospital Foundation hosted its 32nd Annual Fundraising Gala at the Ajax Convention Centre. Community leaders, donors, volunteers and guests enjoyed a fun-filled evening with all funds going to support the redevelopment and expansion project for Bowmanville Hospital. St Marys Cement, part of international building materials supplier, Votorantim Cimentos, and the event's Dinner Sponsor, topped the evening when Vice President of Operations, Fabio Garcia surprised Foundation Board Members, staff and guests when he announced a $250,000 pledge to the Hospital's redevelopment project. St Marys Cement has been operating in Bowmanville since 1968 and this year is celebrating its 50th Anniversary. For 21 of those 50 years, St Marys has proudly supported Bowmanville Hospital, Foundation and Gala events. "We understand the importance of the support and funding the Foundation and Hospital receives from corporations, service clubs, families and individuals," said Mr. Garcia. "And what that support and funding translates into for healthcare for people and families across Clarington and beyond is priceless. Care requires funding and at St Marys Cement, we are proud to play a part in the delivery of excellent healthcare now and for decades to come." "We are incredibly thankful to have such a generous organization within our community," said Chris Kooy, Board Chair, Bowmanville Hospital Foundation. "St Marys Cement has been part of the landscape of Clarington for decades and in that time, established themselves with diverse connections, community partnerships and generous support for many local charities. The Foundation is very grateful for their commitment to elevate healthcare for the people of Clarington and beyond." (905) 623-3331 ext. 21884 Bowmanville Hospital is a fully accredited hospital, servicing the Municipality of Clarington. It is run by Lakeridge Health Corporation, part of the Central East Local Health Integration Network. About Bowmanville Hospital Foundation Bowmanville Hospital Foundation is a charitable organization that raises funds annually to fund equipment needs, facility enhancements, and other high priority needs for Bowmanville Hospital. For more information about Bowmanville Hospital Foundation, please visit www.bowHF.com. St Marys Cement is a leading manufacturer of cement and related construction products in Canada and the United States. Headquartered in Toronto, Ontario, Canada, St Marys Cement supplies support new construction in infrastructure modernization in Ontario and across the Great Lakes Region. The St Marys Cement Bowmanville Plant was established in 1968 and, today, is among the largest employers in Bowmanville. St Marys Cement is a part of international building materials supplier, Votorantim Cimentos.
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
6,443
\section{Introduction} \emph{Bayesian inference and data analysis} has become an integral part of modern science. This is partly due to the ability of Markov Chain Monte Carlo (MCMC) algorithms to generate samples from intractable probability distributions. MCMC methods produce a sequence of samples, called a \emph{Markov chain}, that has the target distribution as its equilibrium distribution. The more samples are included, the more closely the distribution of the samples approaches the target distribution. The Markov chain can then be used to numerically approximate expectation values (e.g. parameter uncertainties, marginalised distributions). Common MCMC methods entail a significant amount of time spent hand-tuning the hyperparameters of the algorithm to optimize its efficiency with respect to a target distribution. The emerging and routine use of such mathematical tools in science calls for the development of black-box MCMC algorithms that require no hand-tuning at all. This need led to the development of adaptive MCMC methods like the Adaptive Metropolis algorithm \citep{haario2001adaptive} which tunes its proposal scale based on the sample covariance matrix. Unfortunately, most of those algorithms still include a significant number of hyperparameters (e.g. components of the covariance matrix) rendering the adaptation noisy. Furthermore, the tuning is usually performed on the basis of prior knowledge, such as one or more long preliminary runs which further slow down the sampling. Last but not least, there is no reason to believe that a single Metropolis proposal scale is optimal for the whole distribution (i.e. the appropriate scale could vary from one part of the distribution to another). Another approach to deal with those issues would be to develop methods that by construction require no or minimal hand-tuning. An archetypal such method is the Slice Sampler \citep{neal2003slice}, which has only one hyperparameter, the initial length scale. It should be noted that powerful adaptive methods that require no hand-tuning (although they do require preliminary runs) already exist. Most notable of them is the No U-Turn Sampler (NUTS) \citep{hoffman2014no}, an adaptive extension of Hamiltonian Monte Carlo (HMC) \citep{neal2011mcmc}. However, such methods rely on the gradient of the log probability density function. This requirement is the reason why these methods are limited in their application in quantitative fields such as physics, astrophysics and cosmology, which are dominated by computationally expensive non-differentiable models. Thus, our objective in this paper is to introduce a parallel, black-box and gradient-free method that can be used in the aforementioned scientific fields. This paper presents Ensemble Slice Sampling (ESS), an extension of the Standard Slice Sampling method. ESS naturally inherits most of the benefits of Standard Slice Sampling, such as the acceptance rate of $1$, and most importantly the ability to adapt to the characteristics of a target distribution without any hand-tuning at all. Furthermore, we will show that ESS's performance is insensitive to linear correlations between the parameters, thus enabling efficient sampling even in highly demanding scenarios. We will also demonstrate ESS's performance in strongly multimodal target distributions and show that the method samples efficiently even in high dimensions. Finally, the method can easily be implemented in parallel taking advantage of multiple CPUs thus facilitating Bayesian inference in cases of computationally expensive models. Our implementation of ESS is inspired by \citet{tran2015reunderstanding}. However, our method improves upon that by extending the direction choices (e.g. Gaussian and global move), adaptively tuning the initial proposal scale, and parallelising the algorithm. \citet{nishihara2014parallel} developed a general algorithm based on the elliptical slice sampling method~\citep{murray2010elliptical} and a Gaussian mixture approximation to the target distribution. ESS utilises an ensemble of parallel and interacting chains, called walkers. Other methods that are based on the ensemble paradigm include the Affine Invariant Ensemble Sampler \citep{goodman2010ensemble} and the Differential Evolution MCMC \citep{ter2006markov} along with its various extensions \citep{ter2008differential, vrugt2009accelerating}, as well as more recent approaches that are based on langevin diffusion dynamics \citep{garbuno2020interacting, garbuno2020affine} and the time discretization of stochastic differential equations \citep{leimkuhler2018ensemble} in order to achieve substantial speedups. In Section \ref{sec:slice}, we will briefly discuss the Standard Slice Sampling algorithm. In Section \ref{sec:ensemble}, we will introduce the Ensemble Slice Sampling method. In Section \ref{sec:empirical} we will investigate the empirical evaluation of the algorithm. We reserve Sections \ref{sec:discussion} and \ref{sec:conclusion} for discussion and conclusion, respectively. \section{Standard Slice Sampling} \label{sec:slice} \textit{Slice Sampling} is based on the idea that sampling from a distribution $p(x)$ whose density is proportional to $f(x)$ is equivalent to uniformly sampling from the region underneath the graph of $f(x)$. More formally, in the univariate case, we introduce an auxiliary variable, the height $y$, thus defining the joint distribution $p(x,y)$, which is uniform over the region $U = \{ (x,y) : 0 < y < f(x) \}$. To sample from the marginal density for $x$, $p(x)$, we sample from $p(x,y)$ and then we ignore the $y$ values. Generating samples from $p(x,y)$ is not trivial, so we might consider defining a Markov chain that will converge to that distribution. The simplest, in principle, way to construct such a Markov chain is via Gibbs sampling. Given the current $x$, we sample $y$ from the conditional distribution of $y$ given $x$, which is uniform over the range $(0, f(x) )$. Then we sample the new $x$ from the \textit{slice} $S=\{ x : y < f(x)\}$. Generating a sample from the slice $S$ may still be difficult, since we generally do not know the exact form of $S$. In that case, we can update $x$ based on a procedure that leaves the uniform distribution of $S$ invariant. \citet{neal2003slice} proposed the following method: \begin{description} \item Given the current state $x_{0}$, the next one is generated as: \begin{enumerate} \item Draw $y_{0}$ uniformly from $(0,f(x_{0}))$, thus defining the horizontal slice $S = \{ x : y_{0} < f(x)\}$, \item Find an interval $I = (L,R)$ that contains all, or much, of $S$ (e.g. using the stepping-out procedure defined bellow), \item Draw the new point $x_{1}$ uniformly from $I \cap S$. \end{enumerate} \end{description} \begin{figure} \centering \includegraphics[scale=0.45]{slice.pdf} \caption{The plot shows the univariate slice sampling method. Given an initial value $x_{0}$, a value $y_{0}$ is uniformly sampled along the vertical slice $(0,f(x_{0}))$ (green dashed line) thus defining the initial point (blue star). An interval $(L,R)$ is randomly positioned horizontally around the initial point, and then it is expanded in steps of size $\mu=R-L$ until both of its ends $L', R'$ are outside the slice. The new point (green star) is generated by repeatedly sampling uniformly from the expanded interval $(L',R')$ until a point is found inside the slice. Points outside the slice (e.g. the red star) are used to shrink the interval $(L',R')$ by moving $L'$ or in this case $R'$ to that point and accelerate the sampling procedure.} \label{fig:slice} \end{figure} In order to find the interval $I$, \citet{neal2003slice} proposed to use the \emph{stepping-out} procedure that works by randomly positioning an interval of length $\mu$ around the point $x_{0}$ and then expanding it in steps of size $\mu$ until both ends are outside of the slice. The new point $x_{1}$ is found using the \emph{shrinking} procedure, in which points are uniformly sampled from $I$ until a point inside $S$ is found. Points outside $S$ are used to shrink the interval $I$. The stepping-out and shrinking procedures are illustrated in Figure \ref{fig:slice}. By construction, the stepping-out and shrinking procedures can adaptively tune a poor estimate of the length scale $\mu$ of the initial interval. The length scale $\mu$ is the only free hyperparameter of the algorithm. For a detailed review of the method we direct the reader to \cite{neal2003slice} and \cite{mackay2003information} (also Exercise 30.12 in that text). It is important to mention here that for multimodal distributions there is no guarantee that the slice would cross any of the other modes, especially if the length scale is underestimated initially. Ideally, in order to provide a large enough initial value of the scale factor $\mu$, prior knowledge of the distance between the modes is required. As we will show in the next section, Ensemble Slice Sampling does not suffer from this complication and can handle strongly multimodal distributions efficiently. \section{Ensemble Slice Sampling} \label{sec:ensemble} The univariate slice sampling scheme can be used to sample from multivariate distributions by sampling repeatedly along each coordinate axis in turn (one parameter at a time) or by sampling along randomly selected directions \citep{mackay2003information}. Using either of those choices, the Standard Slice Sampler performs acceptably in cases with no strong correlations in parameter space. The overall performance of the algorithm generally depends on the number of expansions and contractions during the stepping-out and shrinking procedures, respectively. Ideally we would like to minimize that number. A reasonable initial estimate of the length scale is still required in order to reduce the amount of time spent expanding or contracting the initial interval. However, when strong correlations are present two issues arise. First, there is no single value of the initial length scale that minimizes the computational cost of the stepping-out and shrinking procedures along all directions in parameter space. The second problem concerns the choice of direction. In particular, neither the component-wise choice (one parameter at a time) nor the random choice is suitable in strongly correlated cases. Using such choices results in highly autocorrelated samples. Our approach would be to target each of those two issues individually. The resulting algorithm, Ensemble Slice Sampling (ESS), is invariant under affine transformations of the parameter space, meaning that its performance is not sensitive to linear correlations. Furthermore, ESS minimizes the computational cost of finding the slice by adaptively tuning the initial length scale. Last but not least, unlike most MCMC methods, ESS is trivially parallelizable, thus enabling the data analyst to take advantage of modern high performance computing facilities with multiple CPUs. \subsection{Adaptively tuning the length scale} \label{sec:approx} Let us first consider the effect of the initial length scale on the performance of the univariate slice sampling method. For instance, if the initial length scale is $\lambda$ times smaller than the actual size of the slice, then the stepping-out procedure would require $\mathcal{O}(\lambda)$ steps in order to fix this. However, in this case, since the final interval is an accurate approximation of the slice there would probably be no contractions during the shrinking phase. On the other hand, when the initial length scale is larger than the actual slice then the number of expansions would be either one or zero. In this case though, there would be a number of contractions.\\ \noindent\textbf{Stochastic approximation:} As the task is to minimize the total number of expansions and contractions we employ and adapt the \emph{Robbins--Monro} stochastic approximation algorithm \citep{robbins1951stochastic} of \citet{tibbits2014automated}. Ideally, based on the reasoning of the previous paragraph, only one expansion and one contraction will take place. Therefore, the target ratio of number of expansions to total number of expansions and contractions is $1/2$. To achieve this, we update the length scale $\mu$ based on the following recursive formula: \begin{equation} \label{eq:approx} \mu^{(t+1)} = 2 \mu^{(t)}\frac{N_{e}^{(t)}}{N_{e}^{(t)}+N_{c}^{(t)}}\, , \end{equation} where $N_{e}^{(t)}$ and $N_{c}^{(t)}$ are the number of expansions and contractions during iteration $t$. It is easy to see that when the fraction $N_{e}^{(t)}/(N_{e}^{(t)}+N_{c}^{(t)})$ is larger than $1/2$ the length scale $\mu$ will be increased. In the case where the fraction is smaller than $1/2$ the length scale $\mu$ will be decreased accordingly. The optimization can stop either when the fraction is close to $1/2$ within a threshold or when a maximum number of tuning steps has been completed. The pseudocode for the first case is shown in \Algo{approximate}. In order to preserve detailed balance it is important to be sure that the adaptation stops after a finite number of iterations. In practice this happens after $\mathcal{O}(10)$ iterations. An alternative would be to use diminishing adaptation \citep{roberts2007coupling} but we found that our method is sufficient in practice (see Section 4.3 for more details). \begin{algorithm} \caption{Function to tune the length scale $\mu$.} \algolabel{approximate} \begin{algorithmic}[1] \STATE{\textbf{function} TuneLengthScale($t$, $\mu^{(t)}$, $N_{e}^{(t)}$, $N_{c}^{(t)}$, $M^{\text{adapt}}$)} \IF{$t\leq M^{\text{adapt}}$} \STATE{Compute $\mu^{(t+1)}$ using Equation \ref{eq:approx},} \STATE{\bf{return} $\mu^{(t+1)}$} \ELSE \STATE{\bf{return} $\mu^{(t)}$} \ENDIF \end{algorithmic} \end{algorithm} \subsection{The choice of direction \& parallelization} \label{sec:direction} In cases where the parameters are correlated we can accelerate mixing by moving more frequently along certain directions in parameter space. One way of achieving this is to exploit some prior knowledge about the covariance of the target distribution. However, such an approach would either require significant hand-tuning or noisy estimations of the sample covariance matrix during an initial run of the sampler. For that reason we employ a different approach to exploit the covariance structure of the target distribution and preserve the hand-tuning-free nature of the algorithm.\\ \noindent\textbf{Ensemble of walkers:} Following the example of \cite{goodman2010ensemble} we define an ensemble of parallel chains, called walkers. In our case though, each walker is a individual slice sampler. The sampling proceeds by moving one walker at a time by slice sampling along a direction defined by a subset of the rest of walkers of the ensemble. As long as the aforementioned direction does not depend on the position of the current walker, the resulting algorithm preserves the detailed balance of the chain. Moreover, assuming that the distribution of the walkers resembles the correlated target distribution, the chosen direction will prefer directions of correlated parameters. We define an ensemble of $N$ parallel walkers as the collection $S = \lbrace \mathbf{X_{1}}, \dots, \mathbf{X_{N}}\rbrace$. The position of each individual walker $\mathbf{X_{k}}$ is a vector $\mathbf{X_{k}}\in \mathbb{R}^{D}$ and therefore we can think of the ensemble $S$ as being in $\mathbb{R}^{N D}$. Assuming that each walker is drawn independently from the target distribution with density $p$, then the target distribution for the ensemble would be the product \begin{equation} \label{eq:product} P(\mathbf{X_{1}}, \dots, \mathbf{X_{N}}) = \prod_{k=1}^{N}p(\mathbf{X_{k}}) \,. \end{equation} The Markov chain of the ensemble would preserve the product density of equation \ref{eq:product} without the individual walker trajectories being Markov. Indeed, the position of $\mathbf{X_{k}}$ at iteration $t+1$ can depend on $\mathbf{X_{j}}$ at iteration $t$ with $j\neq k$. Given the walker $\mathbf{X_{k}}$ that is to be updated there are arbitrary many ways off defining a direction vector from the complementary ensemble $S_{[k]}=\lbrace \mathbf{X_{j}},\: \forall j\neq k\rbrace$. Here we will discuss a few of them. Following the convention in the ensemble MCMC literature we call those recipes of defining direction vectors, \emph{moves}. Although the use of the ensemble might seem equivalent to that of a sample covariance matrix in the Adaptive Metropolis algorithm \citep{haario2001adaptive} the first has a higher information content as it encodes both linear and non-linear correlations. Indeed, having an ensemble of walkers allows for arbitrary many policies for choosing the appropriate directions along which the walkers move in parameter space. As we will shortly see, one of the choices (i.e. the Gaussian move, introduced later in this Section) is indeed the slice sampling analogue of a covariance matrix. However, other choices (i.e. Differential move or Global move) can take advantage of the non-Gaussian nature of the ensemble distribution and thus propose more informative moves. As it will be discussed later in this section, those advanced moves make no assumption of Gaussianity for the target distribution. Furthermore, as we will show in the last part of this section, the ensemble can also be easily parallelised. \\ \begin{algorithm} \caption{Function to return a differential move direction vector.} \algolabel{differential} \begin{algorithmic}[1] \STATE{\textbf{function} DifferentialMove($k$, $\mu$, $S$)} \STATE{Draw two walkers $\mathbf{X_{l}}$, and $\mathbf{X_{m}}$ uniformly and without replacement from the complementary ensemble $S$}, \STATE{Compute direction vector $\bm{\eta}_{k}$ using Equation \ref{eq:diff},} \STATE{\bf{return} $\bm{\eta}_{k}$} \end{algorithmic} \end{algorithm} \noindent\textbf{Affine transformations and invariance}: Affine invariance is a property of certain MCMC samplers first introduced in the MCMC literature by \citet{goodman2010ensemble}. An MCMC algorithm is said to be affine invariant if its performance is invariant under the bijective mapping $g:\mathbb{R}^{D}\rightarrow \mathbb{R}^{D}$ of the form $\mathbf{Y}=A \mathbf{X} + b$ where $A\in \mathbb{R}^{D\times D}$ is a matrix and $b\in\mathbb{R}^{D}$ is a vector. Linear transformations of this form are called affine transformations and describe rotations, rescaling along specific axes as well as translations in parameter space. Assuming that $\mathbf{X}$ has the probability density $p(\mathbf{X})$, then $\mathbf{Y}=A\mathbf{X}+b$ has the probability density \begin{equation} \label{eq:affinedensity} p_{A,b}(\mathbf{Y})=p(A\mathbf{X}+b)\propto p(\mathbf{X})\,. \end{equation} Given a density $p$ as well as an MCMC transition operator $\mathcal{T}$ such that $\mathbf{X}(t+1) = \mathcal{T} \big(\mathbf{X}(t);p\big)$ for any iteration $t$ we call the operator $\mathcal{T}$ affine invariant if \begin{equation} \label{eq:invariance} \mathcal{T}\big(A\mathbf{X}+b;p_{A,b}\big) = A\:\mathcal{T}\big(\mathbf{X};p\big)+b \end{equation} for $\forall A\in \mathbb{R}^{D\times D}$ and $\forall b \in \mathbb{R}^{D}$. In case of an ensemble of walkers we define an affine transformation from $\mathbb{R}^{N D}$ to $\mathbb{R}^{N D}$ as \begin{equation} \label{eq:transformensemble} S = \lbrace \mathbf{X_{1}}, \dots, \mathbf{X_{N}}\rbrace \xrightarrow{A, b} \lbrace A \mathbf{X_{1}}+b, \dots, A\mathbf{X_{N}}+b\rbrace \,. \end{equation} The property of affine invariance is of paramount importance for the development of efficient MCMC methods. As we have discussed already, proposing samples more frequently along certain directions can accelerate sampling by moving further away in parameter space. Given that most realistic applications are highly skewed or anisotropic and are characterised by some degree of correlation between their parameters, affine invariant methods are an obvious choice of a tool that can be used in order to achieve high levels of efficiency. \\ \noindent\textbf{Differential move:} The differential direction choice works by moving the walker $\mathbf{X}_{k}$ based on two randomly chosen walkers $\mathbf{X}_{l}$ and $\mathbf{X}_{m}$ of the complementary ensemble $S_{[k]}=\lbrace \mathbf{X_{j}},\: \forall j\neq k\rbrace$ \citep{gilks1994adaptive}, see Figure \ref{fig:diff} for a graphical explanation. In particular, we move the walker $\mathbf{X}_{k}$ by slice sampling along the vector $\bm{\eta}_{k}$ defined by the difference between the walkers $\mathbf{X}_{l}$ and $\mathbf{X}_{m}$. It is important to notice here that the vector $\bm{\eta}_{k}$ is not a unit vector and thus carries information about both the length scale and the optimal direction of movement. It will also prove to be more intuitive to include the initial length scale $\mu$ in the definition of the direction vector in the following way: \begin{equation} \label{eq:diff} \bm{\eta}_{k}= \mu \big( \mathbf{X}_{l}-\mathbf{X}_{m}\big)\, . \end{equation} The pseudocode for a function that, given the value of $\mu$ and the complementary ensemble $S$, returns a differential direction vector $\bm{\eta}_{k}$ is shown in \Algo{differential}. Furthermore, the Differential move is clearly affine invariant. Assuming that the distribution of the ensemble of walkers follows the target distribution and the latter is highly elongated or stretched along a certain direction then the proposed direction given by equation \ref{eq:diff} will share the same directional asymmetry. \\ \begin{figure}[t!] \centering \includegraphics[scale=0.55]{diff.pdf} \caption{The plot shows the differential direction move. Two walkers (red) are uniformly sampled from the complementary ensemble (blue). Their positions define the direction vector (solid black). The selected walker (magenta) then moves by Slice Sampling along the parallel direction (dashed black).} \label{fig:diff} \end{figure} \noindent\textbf{Gaussian move:} The direction vector $\bm{\eta}_{k}$ can also be drawn from a normal distribution with the zero mean and the covariance matrix equal to the sample covariance of the complementary ensemble $S_{[k]}$, \begin{equation} \label{eq:cov} \mathbf{C}_{S}=\frac{1}{|S|}\sum_{j\in S}\big(\mathbf{X}_{j}-\bar{\mathbf{X}}_{S}\big)\big(\mathbf{X}_{j}-\bar{\mathbf{X}}_{S}\big)^{t}\, . \end{equation} We chose to include the initial length scale $\mu$ in this definition as well: \begin{equation} \label{eq:gaussian} \frac{\bm{\eta}_{k}}{2\mu} \sim \mathcal{N}\big(\mathbf{0},\mathbf{C}_{S} \big)\, . \end{equation} The factor of $2$ is used so that the magnitude of the direction vectors are consistent with those sampled using the differential direction choice in the case of Gaussian-distributed walkers. The pseudocode for a function that, given the value of $\mu$ and the complementary ensemble $S$, returns a Gaussian direction vector $\bm{\eta}_{k}$ is shown in \Algo{gaussian}. See Figure \ref{fig:gauss} for a graphical explanation of the method. Moreover, just like the Differential move, the Gaussian move is also affine invariant. In the limit in which the number of walkers is very large and the target distribution is normal, the first reduces to the second. Alternatively, assuming that the distribution of walkers follows the target distribution then the covariance matrix of the ensemble would be the same as that of independently drawn samples from the target density. Therefore any anisotropy characterising the target density would also be present in the distribution of proposed directions given by equation \ref{eq:gaussian}. \begin{algorithm} \caption{Function to return a Gaussian Move direction vector.} \algolabel{gaussian} \begin{algorithmic}[1] \STATE{\textbf{function} GaussianMove($k$, $\mu$, $S$)} \STATE{Estimate sample covariance $\mathbf{C}_{S}$ of the walkers in the complementary ensemble $S$ using Equation \ref{eq:cov},} \STATE{Sample $\bm{\eta}_{k}/(2\mu)\sim \mathcal{N}\big(\mathbf{0},\mathbf{C}_{S} \big)$,} \STATE{\bf{return} $\bm{\eta}_{k}$} \end{algorithmic} \end{algorithm} \begin{figure}[t!] \centering \includegraphics[scale=0.55]{gauss.pdf} \caption{The plot shows the Gaussian direction move. A direction vector (solid black) is sampled from the Gaussian-approximated distribution of the walkers of the complementary ensemble (green). The selected walker (magenta) then moves by Slice Sampling along the parallel direction (dashed black).} \label{fig:gauss} \end{figure} \noindent\textbf{Global move:} ESS and its variations described so far (i.e. differential move, Gaussian move) have as much difficulty traversing the low probability regions between modes/peaks in multimodal distributions as most local MCMC methods (e.g. Metropolis, Hamiltonian Monte Carlo, Slice Sampling, etc.). Indeed, multimodal distributions are often the most challenging cases to sample from. Fortunately, Ensemble Slice Sampling's flexibility allows to construct advanced moves which are specifically designed to handle multimodal cases even in moderate to high dimensional parameter spaces. The \emph{global move} is such an example. We first fit a \emph{Gaussian Mixture} to the distribution of the walkers of the complementary ensemble $S_{[k]}$ using \emph{Variational Inference}. To avoid defining the number of components of the Gaussian Mixture we use a \emph{Dirichlet process} as the prior distribution for the Gaussian Mixture weights\footnote{To this end we use the Scikit-Learn implementation of the Dirichlet process Gaussian mixture.} \citep{gorur2010dirichlet}. The exact details of the construction of the Dirchlet process Gaussian mixture (DPGM) are beyond the scope of this work and we direct the reader to \citet{gorur2010dirichlet} and \citet{bishop2006pattern} for more details. One of the major benefits of fitting the DPGM using variational inference compared to the expectation--maximisation (EM) algorithm~\citep{dempster1977maximum} that is often used is the improved stability. In particular, the use of priors in the variational Bayesian treatment guarantees that Gaussian components do not collapse into specific data points. This regularisation due to the priors leads to component covariance matrices that do not diverge even when the number of data points (i.e. walkers in our case) in a component is lower than the number of dimensions. In our case, this means that even if the number of walkers located in a mode of the target distribution is small DPGM would still identify that mode correctly. In such cases, the covariance of the component that corresponds to that mode would be over--estimated. This however does not affect the performance of the Global move as the latter does not rely on exact estimates of the component covariance matrices.\footnote{Indeed the covariance matrix of a component only enters through equation \ref{eq:globalB} but then it is re--scaled by the factor $\gamma$.} In practice, we recommend using more than the minimum number of walkers in cases of multimodal distributions (e.g. at least two times as many in bimodal cases). We found that the computational overhead introduced by the variational fitting of the DPGM is negligible compared to the computational cost of the evaluation of the model and posterior distribution in common problems in physics, astrophysics and cosmology. Indeed the cost is comparable, and only a few times higher than the Differential or Gaussian move. The reason for that is the relatively small number of walkers (i.e. $\mathcal{O}(10-10^3)$) that simplifies the fitting procedure. Once fitting is done, we have a list of the means and covariance matrices of the components of the Gaussian Mixture. As the ensemble of walkers traces the structure of the target distribution, we can use the knowledge of the means and covariance matrices of the Gaussian Mixture to construct efficient direction vectors. Ideally, we prefer direction vectors that connect different modes. This way, the walkers will be encouraged to move along those directions that would otherwise be very unlikely to be chosen. We uniformly select two walkers of the complementary ensemble and identify the Gaussian components to which they belong, say $i$ and $j$. There are two distinct cases and we will treat them as such. In case A, $i = j$, meaning that the selected walkers originate from the same component. In case B, $i \neq j$, meaning that the two walkers belong to different components and thus probably different peaks of the target distribution. As we will show next, only in case B, we can define a direction vector that favors mode-jumping behaviour. In case A, we can sample a direction vector from the Gaussian component that the two select walkers belong to\footnote{In practice we use uniformly sample two walkers from the list of walkers that DPGM identified in that mode. This step removes any dependency on covariance matrix estimates.}: \begin{equation} \label{eq:globalA} \frac{\bm{\eta}_{k}}{2\mu} \sim \mathcal{N}\big( \bm{0}, \bm{C}_{i=j} \big)\, , \end{equation} where $\bm{C}_{i=j}$ is the covariance matrix of the i$_{\rm th}$ (or equivalently j$_{\rm th}$) component. Just as in the Gaussian move, the mean of the proposal distribution is zero so that we can interpret $\bm{\eta}$ as a direction vector. In case B, where the two selected walkers belong to different components, $i \neq j$, we will follow a different procedure to facilitate long jumps in parameter space. We will sample two vectors, one from each component: \begin{equation} \label{eq:globalB} \bm{\eta}_{k, n} \sim \mathcal{N}\big( \bm{\mu}_{n}, \gamma \bm{C}_{n} \big)\, , \end{equation} for $n=i$ or $n=j$. Here, $\bm{\mu}_{n}$ is the mean of the nth component and $\bm{C}_{n}$ is its covariance matrix. In practise, we also re-scale the covariance by a factor of $\gamma = 0.001$, which results in direction vectors with lower variance in their orientation. $\gamma < 1$ ensures that the chosen direction vector is close to the vector connecting the two peaks of the distribution. Finally, the direction vector will be defined as: \begin{equation} \label{eq:global} \bm{\eta}_{k} = 2 \big( \bm{\eta}_{k,i} - \bm{\eta}_{k,j} \big)\, . \end{equation} The factor of $2$ here is chosen to better facilitate mode-jumping. There is also no factor of $\mu$ in the aforementioned expression, since in this case there is no need for the scale factor to be tuned. The pseudocode for a function that, given the complementary ensemble $S$, returns a Global direction vector $\bm{\eta}_{k}$ is shown in \Algo{global}. See Figure \ref{fig:global} for a graphical explanation of the method. It should be noted that for the global move to work at least one walker needs to be present on each well separated mode. \begin{algorithm} \caption{Function to return a global move direction vector.} \algolabel{global} \begin{algorithmic}[1] \STATE{\textbf{function} GlobalMove($k$, $\mu$, $S$)} \STATE{Fit Dirichlet process Gaussian mixture (DPGM) to the complementary ensemble $S_{[k]}$,} \STATE{If $N$ is the number of components of the DPGM then select two components $i, j$ uniformly such that $i \neq j$,} \IF{$i = j$} \STATE{Sample $\bm{\eta}_{k}/(2\mu) \sim \mathcal{N}\big( \bm{0}, \bm{C}_{i=j} \big)$,} \ELSE \STATE{Sample $\bm{\eta}_{k, n} \sim \mathcal{N}\big( \bm{\mu}_{n}, \gamma \bm{C}_{n} \big)$ for $n=i, j$,} \STATE{Compute direction vector $\bm{\eta}_{k}$ using Equation \ref{eq:global},} \ENDIF \STATE{\bf{return} $\bm{\eta}_{k}$} \end{algorithmic} \end{algorithm} \begin{figure}[t!] \centering \includegraphics[scale=0.55]{global.pdf} \caption{The plot shows the global direction move assuming that the uniformly selected pair of walkers of the complementary ensemble belongs to different components (blue and green). A position (red) is sampled from each component (using the re-scaled by $\gamma$ covariance matrix). Those two points (red) define the direction vector (black) connecting the two modes (blue and green). The selected walker (magenta) then moves by slice sampling along the parallel direction (dashed).} \label{fig:global} \end{figure} Here we introduced three general and distinct moves that can be used in a broad range of cases. In general, the global move requires a higher number of walkers than the differential or Gaussian move in order to perform well. We found that the differential and Gaussian moves are good choices for most target distributions whereas the global move is only necessary in highly dimensional and multimodal cases. One can use the information in the complementary ensemble to construct more moves tailor-made for specific problems. Such additional moves might include Kernel Density Estimation or Clustering methods and as long as the information used comes from the complementary ensemble (and not from the walker that would be updated) the detailed balance is preserved.\\ \noindent\textbf{Parallelizing the ensemble:} Instead of evolving the ensemble by moving each walker in turn we can do this in parallel. A naive implementation of this would result in a subtle violation of detailed balance. We can avoid this by splitting the ensemble into two sets of walkers \citep{foreman2013emcee} of $n_{\text{Walkers}} / 2$ each. We can now update the positions of all the walkers in the one set in parallel along directions defined by the walkers of the other set (the complementary ensemble). Then we can perform the same procedure for the other set. In accordance with equation \ref{eq:product}, the stationary distribution of the split ensemble would be \begin{equation} \label{eq:split} P(\mathbf{X_{1}},\dots,\mathbf{X_{N}}) = \prod_{k=1}^{N/2}p(\mathbf{X_{k}}) \prod_{k=1+N/2}^{N}p(\mathbf{X_{k}})\,. \end{equation} The method generates samples from the target distribution by simulating a Markov chain which leaves this product distribution invariant. The transition operator $\mathcal{T}_{1}$ that updates the walkers of the first set (i.e. $k=1,\dots,N/2$) uses the walkers of the complementary ensemble (i.e. $k=1+N/2,\dots,N$) and vice versa for the transition operator $\mathcal{T}_{2}$ that acts on the second set. In the context of ESS the aforementioned transition operators correspond to a single iteration of \Algo{final} coupled with one of the moves (e.g. Differential move). It follows from the ensemble splitting technique that the maximum number of CPUs used without any of them being idle is equal to the total number of walkers updated concurrently, that is $n_{\text{Walkers}} / 2$. We will also verify this empirically in Section \ref{sec:empirical}. Of course, this does not mean that if there are more CPUs available they cannot be used as we can always increase the size of the ensemble to match the available CPUs. Combining this technique with the stochastic approximation solution of Subsection \ref{sec:approx} and the choices (moves) of direction and ensemble-splitting technique of this subsection leads to the Ensemble Slice Sampling method of \Algo{final}\footnote{Perhaps a small detail, but we have included the length scale in the definition of the direction vector $\mathbf{\eta}$ and therefore it does not appear in the definition of the $(L, R)$ interval.}. Of course, another move (e.g. Gaussian, global) can be used instead of the differential move in \Algo{final}. Finally, the minimum number of walkers used should be twice the number of parameters. Using fewer walkers than that could lead to erroneous sampling from a lower dimensional parameter space \citep{ter2006markov}. In general, parallelizing a slice sampler is not trivial (e.g. as it is for Metropolis) because each update requires an unknown number of probability density evaluations. However, because of the affine invariance (i.e. performance unaffected by linear correlations) induced by the existence of the ensemble, all iterations require on average the same number of probability density evaluations (i.e. usually $5$ if the stochastic approximation for the length scale $\mu$ is used). Therefore, the parallelization of Ensemble Slice Sampling is very effective in practice. Furthermore, the benefit of having parallel walkers instead of parallel independent chains (e.g. such as in Metropolis sampling) is clear, the walkers share information about the covariance structure of the distribution thus accelerating mixing. \begin{algorithm}[ht!] \caption{Single Iteration $t$ of Ensemble Slice Sampling.} \algolabel{final} \begin{algorithmic}[1] \STATE{Given $t$, $f$, $\mu^{(t)}$, $S_{[0]}$, $S_{[1]}$, $M^{\rm adapt}$:} \STATE{Initialise $N_{e}^{(t)} = 0$ and $N_{c}^{(t)} = 0$}, \FOR{$i=0, 1$} \FOR{$k=1, ..., N/2$} \STATE{$k \leftarrow k + i N/2$} \STATE{Compute direction vector $\bm{\eta}_{k}\leftarrow$ DifferentialMove($k$, $\mu^{(t)}$, $S_{[i]}$)} \STATE{Sample $Y \sim \text{Uniform}(0,f(\mathbf{X_{k}}^{(t)}))$} \STATE{Sample $U \sim \text{Uniform}(0,1)$} \STATE{Set $L \leftarrow - U$, and $R \leftarrow L + 1$} \WHILE{$Y < f(L)$} \STATE{$L \leftarrow L - 1$} \STATE{$N_{e}^{(t)} \leftarrow N_{e}^{(t)} + 1$} \ENDWHILE \WHILE{$Y < f(R)$} \STATE{$R \leftarrow R + 1$} \STATE{$N_{e}^{(t)} \leftarrow N_{e}^{(t)} + 1$} \ENDWHILE \WHILE{True} \STATE{Sample $X' \sim \text{Uniform}(L,R)$} \STATE{Set $Y' \leftarrow f(X'\bm{\eta}_{k} + \mathbf{X_{k}}^{(t)})$} \IF{$Y<Y'$} \STATE{\bf{break}} \ENDIF \IF{$X'<0$} \STATE{$L \leftarrow X'$} \STATE{$N_{c}^{(t)} \leftarrow N_{c}^{(t)} + 1$} \ELSE \STATE{$R \leftarrow X'$} \STATE{$N_{c}^{(t)} \leftarrow N_{c}^{(t)} + 1$} \ENDIF \ENDWHILE \STATE{Set $\mathbf{X_{k}}^{(t+1)} \leftarrow X' \bm{\eta}_{k} + \mathbf{X_{k}}^{(t)}$} \ENDFOR \ENDFOR \STATE{$\mu^{(t+1)} \leftarrow$ TuneLengthScale($t$, $\mu^{(t)}$, $N_{e}^{(t)}$, $N_{c}^{(t)}$, $M^{adapt}$),} \end{algorithmic} \end{algorithm} \section{Empirical evaluation} \label{sec:empirical} To empirically evaluate the sampling performance of the Ensemble Slice Sampling algorithm we perform a series of tests. In particular, we compare its ability to sample from two demanding target distributions, namely the \emph{autoregressive process of order 1} and the \emph{correlated funnel}, against the Metropolis and Standard Slice Sampling algorithms. The Metropolis' proposal scale was tuned to achieve the optimal acceptance rate, whereas the initial length scale of Standard Slice Sampling was tuned using the stochastic scheme of \Algo{approximate}. Ensemble Slice Sampling significantly outperforms both of them. These tests help establish the characteristics and advantages of Ensemble Slice Sampling. Since our objective was to develop a gradient-free black-box method we then proceed to compare Ensemble Slice Sampling with a list of gradient-free ensemble methods such as \emph{Affine Invariant Ensemble Sampling} (AIES), \emph{Differential Evolution Markov Chain} (DEMC) and \emph{Kernel Density Estimate Metropolis} (KM) on a variety of challenging target distributions. Moreover, we are also interested in assessing the convergence rate of the length scale $\mu$ during the first iterations as well as the parallel scaling of the method in the presence of multiple CPUs. Unless otherwise specified we use the differential move for the tests. Unlike ESS that has an acceptance rate of $1$, AIES's and DEMC's acceptance rate is related to the number of walkers. For that reason, and for the sake of a fair comparison, we made sure the selected number of walkers in all examples would yield the optimal acceptance rate for AIES and DEMC. As we will discuss further in Section \ref{sec:discussion} it makes sense to increase the number of walkers in cases of multimodal distributions or strong non-linear correlations. In general though, we recommend using the minimum number of walkers (i.e. twice the number of dimensions) as the default choice and increase it only if it is required by a specific application. For more rules and heuristics about the initialisation and number of walkers we direct the interested reader to Section \ref{sec:discussion}. \subsection{Performance tests} \noindent\textbf{Autoregressive process of order 1:} In order to investigate the performance of ESS. in high dimensional and correlated scenarios we chose a highly correlated Gaussian as the target distribution. More specifically, the target density is a discrete-time \emph{autoregressive process of order 1}, also known as AR(1). This particular target density is ideally suited for benchmarking MCMC algorithms since the posterior density in many scientific studies often approximates a correlated Gaussian. Apart from that, the AR(1) is commonly used as a prior for time-series analysis. The AR(1) distribution of a random vector $\bm{X}=(X_{1},...,X_{N})$ is defined recursively as follows: \begin{equation} \label{eq:ar1} \begin{split} X_{1} \sim &\;\mathcal{N}(0,1)\, , \\ X_{2}|X_{1} \sim &\;\mathcal{N}(\alpha X_{1},\beta^{2})\, , \\ &\vdots \\ X_{N}|X_{N-1} \sim &\;\mathcal{N}(\alpha X_{N-1},\beta^{2})\, , \end{split} \end{equation} where the parameter $\alpha$ controls the degree of correlation between parameters and we chose it to be $\alpha = 0.95$. We set $\beta = \sqrt{1-\alpha^{2}}$ so that the marginal distribution of all parameters is $\mathcal{N}(0,1)$. We also set the number of dimensions to $N=50$. \begin{figure*}[t!] \centering \includegraphics[width=.3\textwidth]{ar1_metropolis.pdf} \includegraphics[width=.3\textwidth]{ar1_standard_slice.pdf} \includegraphics[width=.3\textwidth]{ar1_ensemble_slice.pdf} \caption{The plots compare the 1-sigma and 2-sigma contours generated by the optimised random-walk Metropolis (left), Standard Slice (centre) and Ensemble Slice Sampling (right) methods to those obtained by Independent Sampling (blue) for the AR(1) distribution. All samplers used the same number of probability density evaluations, $3\times 10^{5}$. Only the first two dimensions are shown here.} \label{fig:ar1} \end{figure*} \begin{table}[ht!] \centering \caption{The table shows a comparison of the optimally tuned Metropolis, Standard Slice, and Ensemble Slice Sampling with the differential move (ESS-D) and the Gaussian move (ESS-G) respectively in terms of the integrated autocorrelation time (IAT) and the number of effective samples per evaluation of the probability density (efficiency) multiplied by $10^4$. These metrics are formally defined in Appendix \ref{app:ess}. The target distributions are the 50--dimensional autoregressive process of order 1 and the 25--dimensional correlated funnel distribution. The total number of iterations was set to $10^{7}$.} \def1.1{1.1} \begin{tabular}{lccccc} \toprule[0.75pt] & Metropolis & Slice & \textbf{ESS-D} & \textbf{ESS-G} \\ \midrule[0.5pt] \multicolumn{4}{l}{Autoregressive process of order 1} \\ \midrule[0.5pt] IAT & 4341 & 2075 & $\mathbf{111}$ & $\mathbf{107}$ \\ efficiency & 2.3 & 1.0 & $\mathbf{17.5}$ & $\mathbf{17.8}$ \\ \midrule[0.5pt] \multicolumn{4}{l}{Correlated funnel distribution} \\ \midrule[0.5pt] IAT & - & 3905 & $\mathbf{129}$ & $\mathbf{141}$ \\ efficiency & - & 0.5 & $\mathbf{15.3}$ & $\mathbf{14.0}$ \\ \bottomrule[0.75pt] \end{tabular} \label{tab:table1} \end{table} For each method, we measured the mean \emph{integrated autocorrelation time} (IAT), and the number of effective samples per evaluation of the probability density function, also termed \emph{efficiency} (see Appendix \ref{app:ess} for details). For this test we ran the samplers for $10^{7}$ iterations. In this example we used the minimum number of walkers (i.e. 100 walkers) for ESS and the equivalent number of probability evaluations for Metropolis and Slice Sampling with each walker initialised at a position sampled from the distribution $\mathcal{N}(0,1)$. The results are presented in Table \ref{tab:table1}. The chain produced by Ensemble Slice Sampling has a significantly shorter IAT ($20-40$ times) compared to either of the other two methods. Furthermore, Ensemble Slice Sampling, with either Differential or Gaussian move, generates an order of magnitude greater number of independent samples per evaluation of the probability density. In this example the Differential and Gaussian moves have achieved almost identical IAT values and efficiencies. To assess the mixing rate of Ensemble Slice Sampling, we set the maximum number of probability density evaluations to $3\times 10^{5}$ and show the results in Figure \ref{fig:ar1}. We compare the results of Ensemble Slice Sampling with those obtained via the optimally tuned Metropolis and Standard Slice Sampling methods. Ensemble Slice Sampling significantly outperforms both of them, being the only one with a chain resembling the target distribution in the chosen number of probability evaluations.\\ \noindent\textbf{Correlated funnel:} The second test involves a more challenging distribution, namely the correlated funnel distribution adapted from \citet{neal2003slice}. The funnel, tornado like, structure is common in Bayesian hierarchical models and possesses characteristics that render it a particularly difficult case. The main difficulty originates from the fact that there is a region of the parameter space where the volume of the region is low but the probability density is high, and another region where the opposite holds. Suppose we want to sample an N--dimensional vector $\bm{X}=(X_{1},...,X_{N})$ from the correlated funnel distribution. The marginal distribution of $X_{1}$ is Gaussian with mean zero and unit variance. Conditional on a value of $X_{1}$, the vector $\bm{X}_{2-N}=(X_{2},...,X_{N})$ is drawn from a Gaussian with mean zero and a covariance matrix in which the diagonal elements are $\exp(X_{1})$, and the non-diagonal equal to $\gamma\exp(X_{1})$. If $\gamma=0$, the parameters $X_{2}$ to $X_{N}$ conditional on $X_{1}$ are independent and the funnel distribution resembles the one proposed by \citet{neal2003slice}. The value of $\gamma$ controls the degree of correlation between those parameters. When $\gamma = 0$ the parameters are uncorrelated. For the following test we chose this to be $\gamma = 0.95$. We set the number of parameters $N$ to $25$. Using $10^{7}$ iterations, we estimated the IAT and the efficiency of the algorithms for this distribution as shown in Table \ref{tab:table1}. Just like in the AR(1) case we used the minimum number (i.e. 50) of walkers for ESS with each walker initialised at a position sampled from the distribution $\mathcal{N}(0,1)$. Since the optimally-tuned Metropolis fails to sample from this particular distribution, we do not quote any results. The Metropolis sampler is unable to successfully explore the region of parameter space with negative $X_{1}$ values. The presence of strong correlations renders the Ensemble Slice Sampler $30$ times more efficient than the Standard Slice Sampling algorithm on this particular example. In this example, the Differential move outperforms the Gaussian move in terms of efficiency, albeit by a small margin. In general, we expect the former to be more flexible than the latter since it makes no assumption about the Gaussianity of the target-distribution and recommend it as the default configuration of the algorithm. \begin{figure*}[t!] \centering \includegraphics[width=.3\textwidth]{funnel_metropolis.pdf} \includegraphics[width=.3\textwidth]{funnel_standard_slice.pdf} \includegraphics[width=.3\textwidth]{funnel_ensemble_slice.pdf} \caption{The plots compare the 1-sigma and 2-sigma contours generated by the optimised random-walk Metropolis (left), Standard Slice (centre) and Ensemble Slice Sampling (right) methods to those obtained by Independent Sampling (blue) for the correlated funnel distribution. All samplers used the same number of probability density evaluations, $3\times 10^{5}$. Only the first two dimensions are shown here.} \label{fig:funnel} \end{figure*} To assess the mixing rate of the algorithm on this demanding case, we set the maximum number of evaluations of the probability density function to $3\times 10^{5}$. As shown in Figure \ref{fig:funnel}, the Ensemble Slice Sampling is the only algorithm out of the three whose outcome closely resembles the target distribution. The results of Metropolis were incorrect for both, the limited run with $3\times 10^{5}$ iterations and the long run with $10^{7}$ iterations. In particular, the chain produced using the Metropolis method resemble a converged chain but in fact it is biased in favour of positive values of $x_{1}$. The problem arises because of the vanishing low probability of accepting a point with highly negative value of $x_{1}$. This indicates the inability of Metropolis to handle this challenging case. For a more detailed discussion of this problem we direct the reader to Section 8 of \cite{neal2003slice}. In general, the correlated funnel is a clear example of a distribution in which a single Metropolis proposal scale is not sufficient for all the sampled regions of parameter space. The locally adaptive nature of ESS solves this issue. \subsection{Comparison to other ensemble methods} So far we have demonstrated Ensemble Slice Sampling's performance in simple, yet challenging, target distributions. The tests performed so far demonstrate ESS's capacity to sample efficiently from highly correlated distributions compared with standard methods such as Metropolis and Slice Sampling. Although the use of Metropolis and Slice Sampling is common, these methods are not considered to be state-of-the-art. For this reason, we will now compare ESS with state-of-the-art gradient-free ensemble MCMC methods. By far, the two most popular choices\footnote{For instance, in the fields of Astrophysics and Cosmology where most models are not differentiable and gradient methods (e.g. Hamiltonian Monte Carlo or NUTS) are not applicable the default choice is the Affine-Invariant Ensemble Sampler (AIES) \citep{goodman2010ensemble} as implemented in emcee.} of gradient-free ensemble methods are the Affine-Invariant Ensemble Sampling (AIES) \citep{goodman2010ensemble} method and the Differential Evolution Monte Carlo (DEMC) \citep{ter2006markov} algorithm supplemented with a Snooker update \citep{ter2008differential}. In cases of strongly multimodal target distributions we will also test our method against Sequential Monte Carlo\footnote{As there are many different flavours of SMC, we decided to use the one implemented in \texttt{PyMC3} which utilises importance sampling, simulated annealing and Metropolis sampling.} (SMC) \citep{liu1998sequential, del2006sequential} and Kernel Density Estimate Metropolis (KM) \citep{kombine} which are particle methods specifically designed to handle strongly multimodal densities. \\ \noindent\textbf{Ring distribution:} Although, all three of the compared methods (i.e. ESS, AIES, DEMC) are affine invariant and thus unaffected by linear correlations, they do however differ significantly in the way they handle non-linear correlations. In particular, only Ensemble Slice Sampling (ESS) is locally adaptive because of its stepping-out procedure and therefore able to handle non-linear correlations efficiently. To illustrate ESS's performance in a case of strong non-linear correlations we will use the 16--dimensional ring distribution defined by: \begin{equation} \begin{split} \ln \mathcal{L} = & - \Bigg[ \frac{(x_{n}^{2} + x_{1}^{2} - a)^{2}}{b}\Bigg]^{2} \\ & -\sum_{i=1}^{n-1} \Bigg[ \frac{(x_{i}^{2} + x_{i+1}^{2} - a)^{2}}{b}\Bigg]^{2}\, , \end{split} \label{eq:ring} \end{equation} where $a=2$, $b=1$ and $n=16$ is the total number of parameters. We also set the number of walkers to be $64$ and run the samplers for $10^{7}$ steps discarding the first half of the chains. Here we followed the heuristics discussed at the beginning of this section and increased the number of walkers from the minimum of $2\times 16$ to $4\times 16$ due to the presence of strong non-linear correlations in order to achieve the optimal acceptance rate for AIES and DEMC. The number of iterations is large enough for all samplers to converge and provide accurate estimates of the autocorrelation time. The results are shown in Table \ref{tab:table2} and verify that ESS' performance is an order of magnitude better than that of the other methods. \\ \begin{table}[ht!] \centering \caption{The table shows a comparison of the Affine Invariant Ensemble Sampling (AIES), Differential Evolution Markov Chain (DEMC), and Ensemble Slice Sampling methods in terms of the integrated autocorrelation time (IAT) and the number of effective samples per evaluation of the probability density (efficiency) multiplied by $10^5$. These metrics are formally defined in Appendix \ref{app:ess}. The target distributions are the 16--dimensional ring distribution, the 10--dimensional Gaussian shells distribution and the 13--dimensional hierarchical Gaussian process regression distribution. In all cases the total number of iterations was set to $10^{7}$. It should be noted that in the case of the Gaussian shells the global move was used instead of the differential move.} \def1.1{1.1} \begin{tabular}{lccc} \toprule[0.75pt] & AIES & DEMC & \textbf{ESS} \\ \midrule[0.5pt] \multicolumn{4}{l}{Ring distribution} \\ \midrule[0.5pt] IAT & 49470 & 91128 & $\mathbf{1675}$ \\ efficiency & 2.0 & 1.1 & $\mathbf{12.2}$ \\ \midrule[0.5pt] \multicolumn{4}{l}{Gaussian shells distribution} \\ \midrule[0.5pt] IAT & 33046 & 2760 & $\mathbf{89}$ \\ efficiency & 3.0 & 36.0 & $\mathbf{731.0}$ \\ \midrule[0.5pt] \multicolumn{4}{l}{Hierarchical Gaussian process regression} \\ \midrule[0.5pt] IAT & 55236 & 30990 & $\mathbf{547}$ \\ efficiency & 1.8 & 3.2 & $\mathbf{38.0}$ \\ \bottomrule[0.75pt] \end{tabular} \label{tab:table2} \end{table} \noindent\textbf{Gaussian shells distribution:} Another example that demonstrates ESS's performance in cases of non-linear correlations is the Gaussian Shells distribution defined as: \begin{equation} \mathcal{L}(\mathbf{\Theta}) = \text{circ}(\mathbf{\Theta}|\mathbf{c}_{1}, r_{1}, w_{1})+\text{circ}(\mathbf{\Theta}|\mathbf{c}_{2}, r_{2}, w_{2}), \label{eq:shells} \end{equation} where \begin{equation} \text{circ}(\mathbf{\Theta}|\mathbf{c}, r, w) = \frac{1}{\sqrt{2\pi}w} \exp \Bigg[-\frac{1}{2} \frac{(|\Theta - \mathbf{c}| - r)^{2}}{w^{2}}\Bigg]. \label{eq:shell} \end{equation} We choose the centres, $\mathbf{c}_{1}$ and $\mathbf{c}_{2}$ to be $-3.5$ and $3.5$ in the first dimension respectively and zero in all others. We take the radius to be $r=2.0$ and the width $w=0.1$. In two dimensions, the aforementioned distribution corresponds to two equal-sized Gaussian Shells. In higher dimensions the geometry of the distribution becomes more complicated and the density becomes multimodal. For our test, we set the number of dimensions to $10$ and the number of walkers to $40$ due to the existence of two modes. Since this target distribution exhibits some mild multimodal behaviour we opt for the global move instead of the default differential move although the latter also performs acceptably in this case. The total number of iterations was set to $10^{7}$ and the first half of the chains was discarded. The results are presented in Table \ref{tab:table2}. ESS's autocorrelation time is $2-3$ orders of magnitude lower than that of the other methods and the efficiency is higher by $1-2$ orders of magnitude respectively. \\ \noindent\textbf{Hierarchical Gaussian process regression:} To illustrate ESS's performance in a real-world example we will use a modelling problem concerning the concentration of $CO_{2} $ in the atmosphere adapted from Chapter 5 of \cite{rasmussen2003gaussian}. The data consist of monthly measurements of the mean $CO_{2}$ concentration in the atmosphere measured at the \emph{Mauna Loa Observatory} \citep{keeling2004atmospheric} in \emph{Hawaii} since 1958. Our goal is to model the concentration of $CO_{2}$ as a function of time. To this end, we will employ a \emph{hierarchical Gaussian process} model with a composite covariance function designed to take care of the properties of the data. In particular, the covariance function (kernel) is the sum of following four distinct terms: \begin{equation} k_1(r) = \theta_1^2 \exp \left(-\frac{r^2}{2\theta_2} \right)\, , \label{eq:kernel1} \end{equation} where $r=x-x'$ that describes the smooth trend of the data, \begin{equation} k_2(r) = \theta_3^2 \exp \left[-\frac{r^2}{2 \theta_4} -\theta_5\sin^2\left( \frac{\pi r}{\theta_6}\right) \right]\, , \label{eq:kernel2} \end{equation} that describes the seasonal component, \begin{equation} k_3(r) = \theta_7^2 \left [ 1 + \frac{r^2}{2\theta_8 \theta_9} \right ]^{-\theta_8}\, , \label{eq:kernel3} \end{equation} which encodes medium-term irregularities, and finally: \begin{equation} k_4(r) = \theta_{10}^2 \exp \left(-\frac{r^2}{2 \theta_{11}} \right) + \theta_{12}^2\delta_{ij}\, , \label{eq:kernel4} \end{equation} that describes the noise. We also fit the mean of the data, having in total 13 parameters to sample. We sample this target distribution using $36$ walkers for $10^{7}$ iterations and we discard the first half of the chains. The number of walkers that was used corresponds to $1.5$ times the minimum number. We found that this value results in the optimal acceptance rate for AIES and DEMC. For this example we use the differential move of ESS. The results are presented in Table \ref{tab:table2}. The integrated autocorrelation time of ESS is $2$ orders of magnitude lower than that of the other methods and its efficiency is more than an order of magnitude higher. The performance is weakly sensitive to the choice of the number of walkers. \\ \noindent\textbf{Bayesian object detection:} Another real world example with many applications in the field of \emph{astronomy} is \emph{Bayesian object detection}. The following model adapted from \cite{feroz2008multimodal} can be used with a few adjustments to detect astronomical objects in telescope images often hidden in background noise. We assume that the 2D circular objects present in the image are described by the Gaussian profile: \begin{equation} \mathbf{G}(x,y; \bm{\theta})= A \exp\bigg[-\frac{(x-X)^{2}+(y-Y)^{2}}{2 R^{2}} \bigg]\, , \label{eq:template} \end{equation} where $\mathbf{\theta}=(X, Y, A, R)$ are parameters that define the coordinate position, the amplitude and the size of the object, respectively. Then the data can be described as: \begin{equation} \mathbf{D}=\mathbf{N} + \sum_{i=1}^{n_{\text{Obj}}} \mathbf{G}(\bm{\theta_{i}})\, , \label{eq:data} \end{equation} where $n_{\text{Obj}}$ is the number of objects in the image and $\mathbf{N}$ is an additive Gaussian noise term. Assuming a $200 \times 200$ pixel-wide image, we can create a simulated dataset by sampling the coordinate positions $(X, Y)$ of the objects from $\mathcal{U}(0, 200)$ and their amplitude $A$ and size $R$ from $\mathcal{U}(1, 2)$ and $\mathcal{U}(3, 7)$, respectively. We sample $n_{\text{Obj}} = 8$ objects in total. Finally, we sample the noise $\mathbf{N}$ from $\mathcal{N}(0,4)$. In practice we create a dataset of $100$ such images and one such example is shown in Figure \ref{fig:objects}. Notice that the objects are hardly visible as they are obscured by the background noise, this makes the task of identifying those objects very challenging. \begin{figure}[thb!] \centering \includegraphics[scale=0.51]{object_detect.pdf} \caption{The plot shows a simulated image used in the Bayesian object detection exercise. There are $8$ circular objects included here. As the objects are hardly visible due to the background noise their centres are marked with red stars.} \label{fig:objects} \end{figure} Following the construction of the simulated dataset, the posterior probability density function is defined as: \begin{equation} P(\bm{\theta} | \mathbf{D}) \propto \exp \bigg\{\frac{[\mathbf{G}(\bm{\theta})-\mathbf{D}]^{2}}{2 \sigma^{2}}\bigg\} P(\bm{\theta})\, , \label{eq:obj_post} \end{equation} where $\sigma = 2$ is the standard deviation of the $\mathbf{N}$ noise term. The prior $P(\bm{\theta})$ can be decomposed as the product of prior distributions of $X$, $Y$, $A$, and $R$. We used uniform priors for all of these parameters with limits $(0,200)$ for $X$ and $Y$, $(1,2)$ for $A$, and $(2,9)$ for $R$. It is important to mention here that the posterior does not include any prior information about the exact or maximum number of objects in the data. In that sense, the sampler is agnostic about the exact number, positions and characteristics (i.e. amplitude and size) of the objects that it seeks to detect. We sampled the posterior distribution using $200$ walkers (initialised from the prior distribution) for each image in our dataset (i.e. 100 images in total) using Ensemble Slice Sampling (ESS), Affine Invariant Ensemble Sampling (AIES), and Differential Evolution Markov Chain (DEMC). Although the posterior distribution is multimodal (i.e. $8$ modes) we used the differential move since the number of dimensions is low and there is no reason to use more sophisticated moves like the global move. We used a large enough ensemble of walkers due to the potential presence of multiple modes so that all three samplers are able to resolve them. We ran each sampler for $10^{4}$ iterations in total and we discarded the first half of the chains. We found that, on average for the 100 images, ESS identifies correctly $7$ out of $8$ objects in the image, whereas AIES and DEMC identify $4$ and $5$, respectively. In cases where the objects are well-separated ESS often identifies correctly $8$ out of $8$. Its accuracy falls to $7/8$ in cases where two of the objects are very close to each other or overlap. In those cases ESS identifies the merged object as a single object. \\ \noindent\textbf{Gaussian Mixture:} One strengths of ESS is its ability to sample from strongly multimodal distributions in high dimensions. To demonstrate this, we will utilise a Gaussian Mixture of two components centred at $\mathbf{-0.5}$ and $\mathbf{+0.5}$ with standard deviation of $\mathbf{0.1}$. We also put $1/3$ of the probability mass in one mode and $2/3$ in the other. We first set this distribution at $10$ dimensions and we sample this using $80$ walkers for $10^{5}$ steps. The distance between the two modes in this case is approximately $32$ standard deviations. We then increase the number of dimensions to $50$ and we sample it using $400$ walkers for $10^{5}$ iterations. In this case, the actual distance between the two modes is approximately $71$ standard deviations. The total number of iterations was set to $10^{7}$ for all methods but the SMC. This problem consists of two, well separated, modes and thus requires using at least twice the minimum number of walkers (i.e. at least 40 for the 10--dimensional case and 200 for the 50--dimensional one). Although the aforementioned configuration was sufficient for ESS to provide accurate estimates, we opted instead for twice that number (i.e. 80 walkers for the 10--dimensional cases and 400 for the 50--dimensional one) in order to satisfy the requirements of the other samplers, mainly the Kernel Density Estimate Metropolis (KM), but also AIES and DEMC. For the Sequential Monte Carlo (SMC) sampler we used $2000$ and $20000$ independent chains for the low and high dimensional case respectively. The temperature ladder that interpolates between the prior and posterior distribution was chosen adaptively guaranteeing an effective sample size of $90\%$ the physical size of the ensemble. Our implementation of SMC was based on that of \texttt{PyMC3} using an independent Metropolis mutation kernel. The results for the 10--dimensional and 50--dimensional cases are plotted in Figures \ref{fig:10dmixture} and \ref{fig:50dmixture}, respectively. In the 10--dimensional case, both ESS (differential and global move) and SMC managed to sample from the target whereas AIES, DEMC and KM failed to do so. In the 50--dimensional case, only the Ensemble Slice Sampling with the global move manages to sample correctly from this challenging target distribution. In practice $\text{ESS}_{G}$ is able to handle similar cases in even higher number of dimensions and with more than $2$ modes. \begin{figure*}[thb!] \centering \includegraphics[scale=0.55]{mixture10.pdf} \caption{The plot compares the results of 6 samplers, namely Sequential Monte Carlo (SMC, red), Affine-Invariant Ensemble Sampling (AIES, yellow), Differential Evolution Markov Chain (DEMC, purple), Kernel Density Estimate Metropolis (KM, orange), Ensemble Slice Sampling using the differential move (ESS, green), and Ensemble Slice Sampling using the global move (ESS, blue). The target distribution is a 10--dimensional Gaussian Mixture. The figure shows the 1D marginal distribution for the first parameter of the 10.} \label{fig:10dmixture} \end{figure*} \begin{figure*}[t!] \centering \includegraphics[scale=0.55]{mixture50.pdf} \caption{The plot compares the results of 6 samplers, namely Sequential Monte Carlo (SMC, red), Affine-Invariant Ensemble Sampling (AIES, yellow), Differential Evolution Markov Chain (DEMC, purple), Kernel Density Estimate Metropolis (KM, orange), Ensemble Slice Sampling using the differential move (ESS, green), and Ensemble Slice Sampling using the global move (ESS, blue). The target distribution is a 50--dimensional Gaussian Mixture.The figure shows the 1D marginal distribution for the first parameter of the 50.} \label{fig:50dmixture} \end{figure*} \subsection{Convergence of the Length Scale \texorpdfstring{$\mu$}{u}} Figure \ref{fig:scale} plots the convergence of the length scale during the first 20 iterations. The target distribution in this example is a 20--dimensional correlated normal distribution. The length scale $\mu$ was initialised from a wide range of possible values. Adaptation is significantly faster when the initial length scale is larger than the optimal one rather than smaller. Another benefit of using a larger initial estimate would be the reduced number of probability evaluations during the first iterations. This is due to the fact that the shrinking procedure is generally faster than the stepping-out procedure. \begin{figure}[t!] \centering \includegraphics[scale=0.45]{scalefactor.pdf} \caption{The plot shows the adaptation of the length scale $\mu$ as a function of the number of iterations and starting from a wide range of initial values. Each trace is an independent run and the y-axis shows the value of $\mu$ divided by the final value of $\mu$. The target distribution in this example is a 20--dimensional correlated normal distribution. Starting from larger $\mu$ values leads to significantly faster adaptation.} \label{fig:scale} \end{figure} \subsection{Parallel Scaling} By construction, Ensemble Slice Sampling can be used in parallel computing environments by parallelising the ensemble of walkers as discussed in Section \ref{sec:direction}. The maximum number of CPUs used without any of them being idle is equal to the size of complementary ensemble, $n_{\text{Walkers}}/2$. In order to verify this empirically and investigate the scaling of the method for any number of CPUs, we sampled a 10--dimensional Normal distribution for $10^{5}$ iterations with varying number of walkers. The results are plotted in Figure \ref{fig:parallel}. We sampled the aforementioned distribution multiple times in order to get estimates of the confidence integrals shown in Figure \ref{fig:parallel}. The required time to do the pre-specified number of iterations scales as $\mathcal{O}(1/n_{\text{CPUs}})$ as long as $n_{\text{CPUs}}\leq n_{\text{Walkers}}/2$. This result does not depend on the specific distribution. We can always use all the available CPUs by matching the size of the complementary ensemble (i.e. half the number of walkers) to the number of CPUs. \begin{figure}[t!] \centering \includegraphics[scale=0.45]{parallel.pdf} \caption{The plot shows the time $t_{f}$ required for ESS to complete a pre-specified number of iterations as a function of the ratio of the number of available CPUs $n_{\rm CPUs}$ to the total number of walkers $n_{\rm Walkers}$. The results are normalised with respect to the single CPU case $t_{1}$. The method scales as $\mathcal{O}(1/n_{\text{CPUs}})$ as long as $n_{\text{CPUs}}\leq n_{\text{Walkers}}/2$ (dashed line). The shaded areas show the $2-\sigma$ intervals.} \label{fig:parallel} \end{figure} \section{Discussion} \label{sec:discussion} In Section \ref{sec:empirical} we provided a quantitative comparison of the efficiency of Ensemble Slice Sampling compared to other methods. In this Section we will provide some qualitative arguments to informally demonstrate the advantages of Ensemble Slice Sampling over other methods. Furthermore, we will briefly discuss some general aspects of the algorithm and place our work in the context of other related algorithms. After the brief adaptation period is over and the length scale $\mu$ is fixed, the Ensemble Slice Sampling algorithm performs on average $5$ evaluations of the probability density per walker per iteration, assuming that either the differential or Gaussian move is used. This is in stark contrast with Metropolis-based MCMC methods that perform $1$ evaluation of the probability density per iteration. However, the non-rejection nature of Ensemble Slice Sampling more than compensates for the higher number of evaluations as shown in Section \ref{sec:empirical}, thus yielding a very efficient scheme. One could think of the number of walkers as the only free hyperparameter of Ensemble Slice Sampling. However, choosing the number of walkers is usually trivial. As we mentioned briefly at the end of Section \ref{sec:ensemble}, there is a minimum limit to that number. In particular, in order for the method to be ergodic, the ensemble should be made of at least $2\times D$ walkers\footnote{The reason that the minimum limit is $2\times D$ instead of $D+1$ has to do with the ensemble splitting procedure that we introduced in order to make the method parallel. Splitting the ensemble into two equal parts means that each walker is updated based on the relative displacements of half the ensemble.}, where $D$ is the number of dimensions of the problem. Assuming that the initial relative displacements of the walkers span the parameter space (i.e. they do not belong to a lower-than-$D$-dimensional space) the resulting algorithm would be ergodic. As shown in Section \ref{sec:empirical}, using a value close to the minimum number of walkers, meaning twice the number of parameters, is generally a good choice. Furthermore, we suggest to increase the number of walkers by a multiplicative factor equal to the number of well separated modes (e.g. four times the number of dimensions in a bimodal density). Other cases in which increasing the number of walkers can improve the sampling efficiency include target distributions with strong non-linear correlations between their parameters. Regarding the initial positions of the walkers, we found that we can reduce the length of the burn-in phase by initialising the walkers from a tight sphere (i.e. Normal distribution with a very small variance) close to the \emph{Maximum a Posteriori} (MAP) estimate. In high dimensional problems, the MAP estimate will not reside in the typical set and the burn-in phase might be longer. We found that the tight sphere initialisation is still an efficient strategy compared to a more dispersed initialisation \citep{foreman2013emcee}. Other approaches include initialising the walkers by sampling from the prior distribution or the \emph{Laplace approximation} of the posterior distribution. In multimodal cases, a prior initialisation is usually a better choice. A brief simulated annealing phase can also be very efficient, particularly in cases with many well separated modes. Recent work on the No U-Turn Sampler \citep{hoffman2014no} has attempted to reduce the hand-tuning requirements of Hamiltonian Monte Carlo \citep{betancourt2017conceptual} using the dual averaging scheme of \citet{nesterov2009primal}. In order to achieve a similar result, we employed the much simpler stochastic approximation method of \citet{robbins1951stochastic} to tune the initial length scale $\mu$. The Affine Invariant Ensemble Sampler \citep{goodman2010ensemble} and the Differential Evolution MCMC \citep{ter2006markov} use an ensemble of walkers to perform Metropolis updates. Our method differs by using the information from the ensemble to perform Slice Sampling updates. So why does ESS perform better, as demonstrated, compared to those other methods? The answer lies in the locally adaptive and non-rejection nature of the algorithm (i.e. stepping out and shrinking) that enables both efficient exploration of non-linear correlations and large steps in parameter space (e.g. using the global move)\footnote{Indeed, large steps like the ones in the 50--dimensional Gaussian Mixture example would not have been possible without the non-rejection aspect of the method as most attempts to jump to the other mode would have missed it using Metropolis updates.}. For all numerical benchmarks in this paper we used the publicly available, open source \texttt{Python} implementation of Ensemble Slice Sampling called \texttt{zeus}\footnote{The code is available at \url{https://github.com/minaskar/zeus}.}~\citep{karamanis2021zeus}. \section{Conclusion} \label{sec:conclusion} We have presented Ensemble Slice Sampling (ESS), an extension of Standard Slice Sampling that eliminates the latter's dependence on the initial value of the length scale hyperparameter and augments its capacity to sample efficiently and in parallel from highly correlated and strongly multimodal distributions. In this paper we have compared Ensemble Slice Sampling with the optimally-tuned Metropolis and Standard Slice Sampling algorithms. We found that, due to its affine invariance, Ensemble Slice Sampling generally converges faster to the target distribution and generates chains of significantly lower autocorrelation. In particular, we found that in the case of AR(1), Ensemble Slice Sampling generates an order of magnitude more independent samples per evaluation of the probability density than Metropolis and Standard Slice Sampling. Similarly, in the case of the correlated funnel distribution, Ensemble Slice Sampling outperforms Standard Slice Sampling by an order of magnitude in terms of efficiency. Furthermore, in this case, Metropolis-based proposals fail to converge at all, demonstrating that a single Metropolis proposal scale is often not sufficient. When compared to state-of-the-art ensemble methods (i.e. AIES, DEMC) Ensemble Slice Sampling outperforms them by $1-2$ orders of magnitude in terms of efficiency for target distributions with non-linear correlations (e.g. the Ring and Gaussian shells distributions). In the real world example of hierarchical Gaussian process regression, ESS's efficiency is again superior by $1-2$ orders of magnitude. Furthermore, in the Bayesian object detection example ESS achieved higher accuracy compared to AIES and DEMC. Finally, in the strongly multimodal case of the Gaussian Mixture, ESS outperformed all other methods (i.e. SMC, AIES, DEMC, KM) and was the only sampler able to produce reliable results in $50$ dimensions. The consistent high efficiency of the algorithm across a broad range of different problems along with its parallel, black-box and gradient-free nature, renders Ensemble Slice Sampling ideal for use in scientific fields such as physics, astrophysics and cosmology, which are dominated by a wide range of computationally expensive and almost always non-differentiable models. The method is flexible and can be extended further using for example tempered transitions~\citep{iba2001extended} or subspace sampling~\citep{vrugt2009accelerating}. \begin{acknowledgements} The authors thank Iain Murray and John Peacock for providing constructive comments on an early draft. The authors would also like to extend their gratitude to the anonymous reviewer and editor for providing comments that helped improve the quality of the manuscript. FB is a Royal Society University Research Fellow. FB is supported by the European Research Council (ERC) under the European Union's Horizon 2020 research and innovation programme (Grant agreement No. 853291). \end{acknowledgements}
{ "redpajama_set_name": "RedPajamaArXiv" }
6,475
\section{Introduction} Topological quantum computation (TQC) is expected to be physically realized on quantum systems in \emph{topological phases}. For example, the quasi-particle excitations in fractional quantum hall liquids are conjectured to exhibit the topological behavior necessary to support TQC. A definition of topological phase is found in \cite{dSetal}: ``\textit{...a system is in a topological phase if its low-energy effective field theory is a topological quantum field theory (TQFT)}''. Thus all observable properties of topological phases should be expressible in terms of the structure of the corresponding TQFT. On the other hand, it is known \cite{Tur} that \emph{modular categories} faithfully encode (3D) TQFTs in algebraic terms. These relationships between modular categories, TQFT, topological phases and topological quantum computers are illustrated in Figure \ref{fig:1}. The solid arrows represent well-established or (tautological) one-to-one correspondences, while the dashed arrows represent theoretical expectations. \begin{figure}[t0] \centerline{\includegraphics[width=3.45in]{figure1.eps}} \caption{(a) equivalent by \cite{Tur}, (b) essentially by definition, see \cite{dSetal}, (c) idea originated in \cite{FPNAS}, (d) first described in \cite{Kit} } \label{fig:1} \end{figure} While the algebraic axioms defining modular categories may seem quite distant from condensed matter physics and quantum computation, certain natural statistics in modular categories appear to correspond to important computational properties in TQC. Most significantly, the images of the braid group representations associated to a modular category are intimately related to the computational power of the corresponding TQC. We illustrate this with two well-known examples: \begin{example} Consider the (unitary) modular category $\mathcal{C}(\mathfrak{sl}_2,e^{\pi i/5})$ obtained as a subquotient of the representation category of the quantum group $U_q\mathfrak{sl}_2$ with $q=e^{\pi i/5}$ (associated with the ${\rm SU}(2)$-Chern-Simons-Witten TQFT at level $3$). We note the following: \begin{enumerate} \item The images of the associated braid group representations are as large as possible, i.e. \emph{dense} in the group of special unitaries \cite{FLW2}. \item A topological quantum computer realized upon a physical system algebraically modeled by $\mathcal{C}(\mathfrak{sl}_2,e^{\pi i/5})$ is \textit{universal}. \item The associated link invariant is $J_L(e^{2\pi i/5})$ the Jones polynomial evaluated at $q^2$, which has computational complexity $\#P$-hard \cite{JVW}. \end{enumerate} \end{example} Moreover, approximate computation of the Jones polynomial at a $5$th root of unity is known to be $BQP$-complete, so that it is essentially the hardest problem any quantum computer can hope to solve. \begin{example} Consider the (unitary) modular category $\mathcal{C}(\mathfrak{sl}_2,e^{\pi i/4})$ obtained as a subquotient of the representation category of the quantum group $U_q\mathfrak{sl}_2$ with $q=e^{\pi i/4}$ (associated with the ${\rm SU}(2)$-Chern-Simons-Witten TQFT at level $2$). We note the following: \begin{enumerate} \item The images of the associated braid group representations factor over finite groups \cite{jones86}. \item A topological quantum computer realized upon a physical system algebraically modeled by $\mathcal{C}(\mathfrak{sl}_2,e^{\pi i/4})$, while highly-entangling (see \cite{FRW}, \cite{KL}) is not universal. \item The associated link invariant is $J_L(i)$ the Jones polynomial evaluated at $q^2=i$, which can be computed in polynomial time \cite{JVW}. \end{enumerate} \end{example} Two paradigms based upon these examples might then associate density of the braid group image with universal quantum computers and $\#P$-hard computational problems, and finite braid group images with non-universal (but potentially entangling) quantum devices and polynomial-time computational problems. Indeed, all evaluations of the Jones, HOMFLYPT and Kauffman link invariants that are polynomial-time computable on a classical computer are associated with ``classical'' link invariants (see \cite[Theorems 6.3.2, 6.3.5 and 6.3.6]{Welsh}), by which we mean link invariants pre-dating quantum topology. Moreover, the corresponding braid group images in these cases have been shown to be finite in essentially all cases (see \cite{FLW2}, \cite{GoldJones}, \cite{LRW}, \cite{LR}, \cite{jones86}). However, a deeper examination of further examples reveals that this is not quite correct and a slight refinement is necessary. In this paper we intend to describe such a refinement of these two paradigms. Our aim is three-fold: to give paradigms that can be of theoretical value to physicists, to describe a few conjectures of mathematical interest, and to present one perspective on the landscape of inter-related fields represented in the topological quantum computation endeavor. Any attempt to be fully self-contained would require the introduction of many concepts from category theory, low-dimensional topology, complexity theory and condensed matter physics. For brevity's sake, we will content ourselves with providing the reader with a few references. For an excellent survey of the physical and theoretical set-up for TQCs, see \cite{dSetal}. For the categorical and topological concepts, see \cite{BK} and \cite{Tur}. For complexity theory applied to topological invariants, see \cite{Welsh}. \subsubsection*{Acknowledgments} The author would like to thank the following people for their generosity in valuable correspondence and conversations: S. Witherspoon, L. Goldberg, Y. Zhang, J. Ospina, M. Rojas, A. Bulatov, Z. Wang, G. Kuperberg, T. Stanford, and M. Thistlethwaite. \section{Background} We briefly describe some of the important features of modular categories and their relationships with topological phases, link invariants and topological quantum computers. A unitary modular category (UMC) $\mathcal{C}$ is a semisimple $\mathbb{C}$-linear rigid ribbon category of finite rank satisfying a certain non-degeneracy condition, such that the morphism spaces are equipped with a positive definite hermitian form compatible with the other structures. The representation category of a finite group is an example of a category that satisfies all but one of the defining axioms of unitary modular tensor categories: namely it fails the \emph{modularity} (non-degeneracy) condition. UMCs are constructed in a diversity of ways from various fields of mathematics. \subsection{Constructions of UMCs} Often very different constructions yield equivalent categories, so we will only list a few well-known explicit constructions. \begin{enumerate} \item \textbf{Quantum groups.} To any finite dimensional simple Lie algebra $\mathfrak{g}$ and a root of unity $q=e^{\pi i/\ell}$ one may associate a pre-modular category $\mathcal{C}(\mathfrak{g},q)$. These are obtained as subquotients of the category of finite dimensional representations of the quantum group $U_q\mathfrak{g}$, see \cite{Rsurvey} for a survey. Such a category may fail to be modular or unitary (see \cite{Rowell1} and \cite{RJPAA}), but such circumstances can be avoided by certain restrictions on $\ell$. Specifically, define $m=1$ for Lie types $A,D$ and $E$, $m=2$ for Lie types $B,C$ and $F_4$ and $m=3$ for Lie type $G_2$. Then $\mathcal{C}(\mathfrak{g},q)$ is a UMC provided $m\mid\ell$ (see \cite{Wenzlcstar}). \item \textbf{Finite groups.} Fix a finite group $G$ and a 3-cocycle $\omega$. Then the twisted double of $G$, $D^\omega G$, is a finite dimensional quasi-triangular quasi-Hopf algebra. The representation category ${\rm Rep}(D^\omega G)$ is always a UMC, see \cite{BK} for details. \item \textbf{Doubled spherical categories.} There is a doubling procedure from which one obtains a modular category $\mathcal{Z}(\mathcal{S})$ from a \emph{spherical category} $\mathcal{S}$ (see \cite{BarWest} for the precise definition, and \cite{Muger2} for the double construction). Briefly, a spherical category is a tensor category that is not necessarily braided but for which one has a canonical trace function. Examples are ribbon categories and certain categories obtained from von Neumann algebras (see \cite{Izumi} for a description of the latter). In fact, the representation categories of twisted doubles of finite groups can be obtained as the double of the spherical category ${\rm Rep}(\mathbb{C}[G])$ of representations of the group algebra of $G$. Very few explicit ``new'' examples of modular categories obtained in this way have been worked out. A few infinite families can be found in \cite{Izumi}, and the analysis of two examples are worked out in detail in \cite{HRW}. If the spherical category $\mathcal{S}$ is unitary the double $\mathcal{Z}(\mathcal{S})$ will be a UMC. \end{enumerate} \subsection{Braid Group Representations} The axioms of a UMC imply that for any object $X$ in a UMC $\mathcal{C}$ one obtains a (highly non-degenerate) unitary representation $\phi_X^n:\mathcal{B}_n\rightarrow{\rm U}({\rm End}(X^{\otimes n}))$. Recall that $\mathcal{B}_n$, the braid group on $n$-strands, is the group with $n-1$ generators $\sigma_1,\ldots,\sigma_{n-1}$ satisfying: \begin{enumerate} \item[(B1)] $\sigma_i\sigma_j=\sigma_j\sigma_i$ if $|i-j|\ge 2$ \item[(B2)] $\sigma_i\sigma_{i+1}\sigma_i=\sigma_{i+1}\sigma_i\sigma_{i+1}$ for $1\leq i\leq n-2$. \end{enumerate} The braiding on $\mathcal{C}$ requires that there is are natural braiding isomorphisms $C_{X,Y}:X\otimes Y\cong Y\otimes X$. In particular one obtains natural isomorphisms $$R_X^i:=Id_X^{\otimes (i-1)}\otimes C_{X,X}\otimes Id_X^{\otimes (n-i-1)}\in{\rm End}(X^{\otimes n})$$ so that the left action of ${\rm End}(X^{\otimes n})$ on itself induces the representation $\phi_X^n$ by $$\phi_X^n(\sigma_i)f=R_X^i\circ f.$$ The unitarity of $\phi_X^n$ is due to the fact that ${\rm End}(X^{\otimes n})$ is a Hilbert space, the naturality of the braiding isomorphisms and the compatibility of the hermitian form with the other structures. Given such a representation it is natural to ask \begin{question} What is the closure of $\phi_X^n(\mathcal{B}_n)$ in ${\rm U}({\rm End}(X^{\otimes n}))$? \end{question} Indeed, this question was asked by Jones in \cite{jones86} long before its relevance to quantum computing was realized. Let us suppose that we have a decomposition ${\rm End}(X^{\otimes n})=\bigoplus_k V_k$ into irreducible $\mathcal{B}_n$-representations, and fix one irreducible subrepresentation $V_k$. Denote by $\Gamma_k$ the closure of the image of $\mathcal{B}_n$ in ${\rm U}(V)$. Then $\Gamma_k$ modulo its center is exactly one of the following: \begin{enumerate} \item A finite abelian group \item A finite non-abelian group \item An infinite compact group containing ${\rm SU}(V)$ \item An infinite compact group not containing ${\rm SU}(V)$ \end{enumerate} These motivate the following: \begin{definition} \begin{enumerate} \item If $\Gamma_k/Z(\Gamma_k)$ is always a finite group for all objects $X$ in $\mathcal{C}$, all $n\in\mathbb{N}$, and all irreducible subrepresentations $V_k\subset{\rm End}(X^{\otimes n})$ then we say $\mathcal{C}$ has \textbf{property F}. \item If $\Gamma_k/Z(\Gamma_k)$ is always a finite \emph{abelian} group for all objects $X$ in $\mathcal{C}$, all $n\in\mathbb{N}$, and all irreducible subrepresentations $V_k\subset{\rm End}(X^{\otimes n})$ then we say $\mathcal{C}$ has \textbf{property A}. (Observe that this is the case whenever $\dim V_k=1$ for all $X,n$ and $k$.) \item If there exists an object $X$ in $\mathcal{C}$ and $N\in\mathbb{N}$ such that for all $n\geq N$ and for each irreducible subrepresentation $V_k\subset{\rm End}(X^{\otimes n})$ the group $\Gamma_k/Z(\Gamma_k)$ contains ${\rm SU}(V_k)$ we say $\mathcal{C}$ has the \textbf{density property}. \end{enumerate} \end{definition} In nearly all cases one encounters in the literature, $\mathcal{C}$ has either property \textbf{F} or the density property (see e.g. \cite{jones86}, \cite{FLW2}, \cite{LRW}, \cite{ERW} and \cite{LR}). \begin{remark} One may generalize the construction above in the following way. The \emph{pure braid group} $\mathcal{P}_n$ is the (normal) subgroup of $\mathcal{B}_n$ generated by the conjugacy class of $\sigma_1^2$, or equivalently, the kernel of the obvious homomorphism $\mathcal{B}_n\rightarrow S_n$ that sends $\sigma_i$ to the transposition $(i,i+1)$. So geometrically $\mathcal{P}_n$ consists of the braids whose strands begin and end at the same position. Now fix any set of $n$ objects $X_{i(1)},\ldots,X_{i(n)}$. Then $\mathcal{P}_n$ acts on ${\rm End}(\bigotimes_j X_{i(j)})$ in the obvious way using the braiding operators of the form $(C_{X,Y})^2$ and their conjugates. One might ask if the image of $\mathcal{P}_n$ is finite or infinite for all $n$ and all choices of $X_{i(j)}$. But this is not a more general question: If we define $X=\bigoplus_jX_{i(j)}$ then if $\mathcal{C}$ has property $F$, $\mathcal{B}_n$ has finite image on ${\rm End}(X^{\otimes n})$, so that by restricting to $\mathcal{P}_n$ and to the subspace ${\rm End}(\bigotimes_j X_{i(j)})\subset{\rm End}(X^{\otimes n})$, one sees that the $\mathcal{P}_n$ image is finite as well. Obviously the converse is true as well: since $\mathcal{P}_n$ has finite index, we may take $X_{i(j)}=Y$ for all $j$ and so finiteness of the $\mathcal{P}_n$ image implies finiteness of the $\mathcal{B}_n$ image. Similar statements can be made if we replace $\mathcal{P}_n$ by any finite index subgroup of $B_n$ obtained as a pull-back of a subgroup of $S_n$ via the homomorphism above. For example, the subgroup of $\mathcal{B}_n$ generated by those elements with the first strand beginning and ending at the same vertical position is the pull-back of the subgroup of $S_n$ that fixes $1$. \end{remark} \subsection{Link Invariants} Associated to any modular category $\mathcal{C}$ is a $3D$-TQFT, which gives rise to $3$-manifold and link invariants. In essence the link invariants are obtained by representing a link $L$ as the closure of a braid $\beta\in\mathcal{B}_n$, and then taking the trace of the image of $\beta$ in one of the representations $\phi_X^n$ of $\mathcal{B}_n$ described above. More generally, one colors each component of $L$ with objects $X_{i(j)}$ of $\mathcal{C}$ and represents the colored link as the closure of a braid $\gamma$ where the strands of $\gamma$ must respect the given coloring. Then one takes the trace of the image of $\gamma$ in the appropriate endomorphism space. See \cite[Chapter II]{Tur} for full details. There are two standard choices that will appear below. We consider the invariants corresponding to coloring all components with either a fixed simple object $X_i$ or the sum of all simple objects. The link invariants associated to the modular categories mentioned above are as follows, where $q=e^{\pi i/\ell}$: \begin{enumerate} \item The link invariant associated to $\mathcal{C}(\mathfrak{sl}_2,q)$ where we color each component with the object analogous to the irreducible $2$-dimensional representation of $\mathfrak{sl}_2$ is the Jones polynomial $J_L(q^2)$. \item More generally, the link invariant associated to $\mathcal{C}(\mathfrak{sl}_n,q)$ is a one-variable specialization of the (reparameterized) HOMFLYPT polynomial $P^\prime_L(q,n)$. As above, the invariant $P^\prime_L$ corresponds to coloring each strand with the object analogous to the $n$-dimensional representation of $\mathfrak{sl}_n$. In the setting of Hecke algebras, this corresponds to the $n$-row quotient. \item Consider the category $\mathcal{C}(\mathfrak{g},q)$ where $\mathfrak{g}$ is of Lie type $B,C$ or $D$, and in the first two cases $\ell$ is even, and let $X$ be the object analogous to the vector representation of $\mathfrak{g}$. Then the invariant associated to $X$ is a specialization of the (Dubrovnik version) of the Kauffman polynomial $F_L(q^k,q)$, where $k$ depends on the rank of $\mathfrak{g}$. \item Link invariants associated with $\mathcal{C}(\mathfrak{g},q)$ for $\mathfrak{g}$ of other Lie types have not been extensively studied, nor have invariants associated with objects other than those analogous to the vector representation. There are two exceptions. Explicit skein relations have been worked out by G. Kuperberg for Lie type $G_2$. Also, the invariant associated with the object analogous to the fundamental spin representation of $\mathfrak{so}_p$ in $\mathcal{C}(\mathfrak{so}_{p},q)$ with $\ell=2p$, $p$ an odd prime is known to be related to the homology modulo $p$ of the double cyclic cover $M_L$ of $S^3$ branched over the given link $L$ (see \cite{dBG} and \cite{GoldJones}). \item The link invariants associated to the modular categories ${\rm Rep}(D^\omega G)$ with $\omega=0$ are described in \cite{FQ}. Specifically, if we color each component of $L$ with the sum of the simple objects (or with $DG$ itself), one gets (a normalization of) the classical link invariant $$H_L(G)=|{\rm Hom}(\pi_1(S^3\setminus L),G)|.$$ That is, for a fixed link $L$ it counts the homomorphisms from the fundamental group of the link-complement to the finite group $G$. \item the TQFTs associated with doubled spherical categories are usually called Turaev-Viro(-Ocneanu) TQFTs. The associated link invariants are not well-studied, although some attention has been paid to two of these ``exotic'' examples, see \cite{HRW}. \end{enumerate} Later the computational complexity of evaluating these link invariants will be discussed. Two important complexity classes are $FP$ and $\#P$. The class of functions that are computable in polynomial time in the length of the input are of complexity $FP$, which is most closely associated with decision problems of complexity $P$. The class of counting functions of complexity $\#P$ are related to decision problems of complexity $NP$, where instead of asking if there exists a ``yes'' answer one counts the number of ``yes'' answers. For example, deciding if a given Boolean expression $E$ has an assignment of truth values that satisfy $E$ is $NP$-complete, while counting the number of such assignments is $\#P$-complete. \section{The Paradigms} The two paradigms are shown in Figures \ref{fig:dense} and \ref{fig:finite} respectively. Each has three boxes representing braid group images, complexity of link invariants, and utility in quantum computation. Our limited expertise in physics led us to exclude any corresponding speculations from the paradigm, however, see Remark \ref{conclusions} below. \subsection{Dense Image Paradigm} \begin{figure}[t0] \centerline{\includegraphics[width=3.45in]{figure3.eps}} \caption{Dense Image Paradigm} \label{fig:dense} \end{figure} In Figure \ref{fig:dense}, ``Braid group image dense'' represents those unitary modular categories which have the density property. The ``Link invariant'' box requires some explanation. We say computation of the link invariants are $\#P$-hard because in each known case the exact computation of the invariant can be reduced to a counting problem. For example, an evaluation of the Jones polynomial of a link $L$ at a root of unity $q^2$ is an integer linear combination of the Galois conjugates of $q$, so that computing each coefficient may be regarded as a counting problem. That such an evaluation is \emph{hard} means that if we could find an efficient algorithm for such a problem, we could (in principle) adapt our algorithm to efficiently solve any $\#P$ problem. However, comparing the quantum computation of a link invariant to classical exact computation is at some level unrealistic for at least two reasons: 1) quantum computation is probabilistic, while classical computation is deterministic and 2) most quantum computations will involve approximate application of some quantum gate (unitary operator), so that the output will be an approximate evaluation as well. A more relevant question to ask is: does a link invariant $f$ have a \emph{fully polynomial randomized approximation scheme} (FPRAS)? That is, does there exist an algorithm whose input is a link $L$ with braid index at most $n$ and an error threshold $\varepsilon>0$, whose output is a number $Y$ so that $$Pr\left(\frac{1}{1+\varepsilon}\le \frac{Y}{f(L)}\le 1+\varepsilon\right)>3/4$$ that runs in polynomial time in $n$ and $1/\varepsilon$? Of course by running such an algorithm multiple times, one may improve the certainty that the approximation of $f(L)$ is correct within an $\varepsilon$ factor of $f(L)$. The associated decision problem complexity class is $RP$ (\emph{radomized polynomial time}). It is widely believed that $RP\neq NP$, and the non-existence of an $FPRAS$ for a given problem is usually proved under this assumption. \subsection{Finite Image Paradigm} Most of the relationships in the Finite Image paradigm (Figure \ref{fig:finite}) can be understood from the remarks on the Dense Image paradigm above. Notice that we have excluded the finite abelian braid group images from the description. This is because the cases where the images of the braid group are finite abelian are mathematically trivial, corresponding to \emph{abelian anyons}. Firstly, the link invariant will essentially count components or at best linking numbers, which can be done classically in polynomial time. Secondly, the representations of the braid group in these cases are all $1$-dimensional. Because of this, there is no ground state degeneracy and hence any device based upon such systems would not even be capable of efficiently storing information, i.e. they would be non-entangling. It is interesting to note that, to date, the only topological phases that have been convincingly shown to exist are abelian anyons (see \cite{dSetal}). Non-universal quantum devices that can at least produce entangled qubits could potentially be used to store quantum information and even be useful in quantum error correction (see e.g. \cite{YRWGW}). A well-known example is the Bell basis change matrix which is related to the Jones polynomial at $t=i$. \begin{figure}[t0] \centerline{\includegraphics[width=3.45in]{figure2.eps}} \caption{Finite Image Paradigm} \label{fig:finite} \end{figure} \subsection{Evidence} Analyses of the braid group images and the computational complexity of the link invariant evaluations associated to many of the modular categories described above have been carried out. We discuss each in turn, recording the precise evidence for the two paradigms in Table \ref{evidence} where speculations are in bold type. For notational convenience, set $q=e^{\pi i/\ell}$, and denote by $c$ the number of components of a link $L$. Let $d_k$ be the dimension of the homology space modulo $k$ of the double cyclic cover of $S^3$ branched over $L$. $K(L)$ is a classical invariant that only depends on the linking matrix of $L$. \subsubsection{\textbf{Jones polynomial}} For $\mathcal{C}(\mathfrak{sl}_2,q)$ with $q=e^{\pi i/\ell}$, $3\leq\ell$ and $X_1$ the object corresponding to the fundamental $2$-dimensional representation of $\mathfrak{sl}_2$ the algebra ${\rm End}(X_1^{\otimes n})$ is isomorphic to the Temperley-Lieb algebra $TL_n(q^2)$. Jones determined precisely when the braid group images are finite in \cite{jones86}, and in all other cases it is shown in \cite[Theorem 0.1]{FLW2} that the braid group images are dense. The (exact) computational complexity of the corresponding link invariant the Jones polynomial $J_L(q^2)$ was worked out in \cite{JVW}, where it is shown that, except for $\ell\in\{1,2,3,4,6\}$ the complexity class is $\#P$-hard. This was accomplished by using a result of Thistlethwaite that evaluating the Jones polynomial at $t=q^2$ for $L$ an alternating link is essentially equivalent to computing the Tutte polynomial of an associated plane graph $G(L)$ at $(-t,-1/t)$, which is shown to be $\#P$-hard except at the special points described above. The Jones polynomial at these special points degenerates to a ``classical'' link-invariant that is computable in polynomial time. We conjecture the following: \begin{conjecture} There is no FPRAS for evaluating $J_L(q^2)$ except at the special points described above, provided $RP\neq NP$. \end{conjecture} This conjecture is partially motivated by the belief that quantum computers are strictly more powerful than classical computers. If this conjecture were false, there would be an $FPRAS$ for a $BQP$-complete problem. A second, less philosophical, piece of evidence is found in \cite{GJ}, where it is shown that, away from the positive quadrant in the rational $xy$-plane and a few exceptional curves, no $FPRAS$ exists for evaluating the Tutte polynomial at $(x,y)$. This result does not apply to complex pairs $(x,y)$ and so does not give any information for the Jones polynomial at roots of unity, but is nonetheless compelling evidence for our conjecture. \subsubsection{\textbf{HOMFLYPT polynomial}} Generalizations of the results above to the categories $\mathcal{C}(\mathfrak{sl}_n,q)$ with $q=e^{\pi i/\ell}$ and the corresponding specializations of the the HOMFLYPT polynomial are found in \cite{Welsh} (due to Vertigan), \cite{FLW2}, and \cite{GoldJones}. The role of the Temperley-Lieb algebra is taken by specializations of the two-parameter Hecke-algebra (see \cite{jones87}), and the results are of the same format with one exception: for $n\geq 3$ one may have infinite braid group images that are not dense, see \cite[Theorem 4.1]{FLW2}. See also \cite{MOO} for a related invariant obtained by summing over all simple objects. Since the Jones polynomial can be obtained as a specialization of the HOMFLYPT polynomial, FPRASability of the HOMFLYPT polynomial would imply the same for the Jones polynomial. \subsubsection{\textbf{Kauffman polynomial}} The computational complexity of evaluating of the Kauffman polynomial has been worked out by Vertigan, see \cite{Welsh}. The relevant modular categories are obtained from the categories of the form $\mathcal{C}(\mathfrak{g},q)$ with $\mathfrak{g}\in\{\mathfrak{so}_N,\mathfrak{sp}_{2N}\}$. In these cases the algebras ${\rm End}(X^{\otimes n})$ are related to specializations of the form $r=q^k$ of $BMW$-algebras $C_n(r,q)$ (see \cite[Prop. 2.1]{wenzlsurvey}), where as usual $X$ is the quantum analogue of the vector representation. The braid group images are worked out in all non-trivial cases except $r=\pm i$ in \cite{J2}, \cite{LRW}, \cite{LR} and \cite{Jthesis}. In general the images are either finite or dense, although exceptions are found in \cite{LR}, and are expected for $r=\pm i$. Again, as the Jones polynomial can be obtained as a specialization of the Kauffman polynomial, FPRASability of the Kauffman polynomial would imply the same for the Jones polynomial. \subsubsection{$\mathbf{d_n=\dim H_1(M_L,\mathbb{Z}_n)}$} The categories $\mathcal{C}(\mathfrak{so}_{2n+1},e^{\pi i/\ell})$ with $\ell=2(2n+1)$ may be regarded as the extension of the series of modular categories whose first two terms are $\mathcal{C}(\mathfrak{sl}_2,e^{\pi i/6})$ and $\mathcal{C}(\mathfrak{sp}_4,e^{\pi i/10})$. At least for $2n+1=p\geq 7$ prime, the corresponding link invariants are $\pm(\sqrt{p})^{d_p}$ where $d_p=\dim H_1(M_L,\mathbb{Z}_p)$ with $M_L$ the double cyclic cover of $S^3$ branched over $L$, see \cite{GoldJones} and \cite{dBG}. Polynomial algorithms exist for computing the dimension of these homology spaces, and the braid group images are shown to be finite (symplectic) groups in \cite{GoldJones}. It seems reasonable that this should hold for arbitrary $2n+1$ as well. \subsubsection{$\mathbf{H_L(G)=|{\rm Hom}(\pi_1(S^3\setminus L),G)|}$} While the fact that the invariant corresponding to the modular category ${\rm Rep}(DG)$ for $G$ a finite group is the classical invariant $H_L(G)$ has been known for some time, the computational complexity has not been studied to our knowledge. Moreover, the fact that ${\rm Rep}(DG)$ has property $F$ was shown only recently \cite{ERW}. Recent results suggest the following: \begin{conjecture} Let $G$ be a finite group and $L$ a link. \begin{enumerate} \item[(a)] There exists an $FPRAS$ for computing $H_L(G)$ for any group $G$. \item[(b)] Suppose $G$ is solvable. Then there is a polynomial algorithm for exact computation of $H_L(G)$. \end{enumerate} \end{conjecture} We support this conjecture with the following list of facts: \begin{enumerate} \item Clearly if $G$ is an abelian group and $L$ has $k$ components, then $H_L(G)=|{\rm Hom}(H_1(S^3\setminus L),G)|=|{\rm Hom}(Z^k,G)|=|G|^k$. \item It is shown in \cite{Eis} that if $G$ is nilpotent and $L$ is a knot then $H_L(G)=|G|$ is constant. So at least for knots, $H_L(G)$ is polynomial time computable for $G$ nilpotent. \item In \cite{MS} an algorithm for computing the number of homomorphisms from a given finitely presented group $\Gamma$ to a finite solvable group is given. It is not clear if this algorithm finishes in polynomial time (in, say, the number of generators of $\Gamma$), but it certainly supports the case for (b). Moreover, in preliminary computations (worked out with S. Witherspoon) for $G$ a generalized dihedral group we found that the corresponding braid group representation is equivalent to a finite field evaluation of the Burau representation. This is significant, as the Burau representation supports the Alexander polynomial, which is known to be polynomial-time computable. \item Even in the non-solvable case, an algorithm exists: $\pi_1(S^3\setminus L)$ has presentation $\langle x_1,\ldots,x_n: R_1,\ldots,R_m\rangle$ with $n$ and $m$ are bounded by $N+M$ where $N$ is the number of strands in some projection of $L$ and $M$ is the number of crossings. One checks all $|G|^n$ $n$-tuples against the $m$ relations to find homomorphisms. One could improve this algorithm slightly by applying automorphisms of $G$, but the algorithm would still be exponential in $n$. Perhaps a randomization of this algorithm where one samples a moderately-sized subset of the $n$-tuples of elements of $G$ and then approximates $H_L(G)$ by proportion would provide an $FPRAS$. Whether this could be done efficiently and accurately would require some analysis. We should mention that it is widely believed that an $FPRAS$ exists for computing $H_L(G)$ (\cite{Kup}). \end{enumerate} \begin{table}\label{evidence} \begin{tabular}{*{2}{|p{1.9cm}}|p{3.2cm}|p{2.2cm}|p{1.75cm}|} \hline \rule[-2mm]{0mm}{6mm}UMC & Restrictions & Invariant & Complexity & $\mathcal{B}_n$ Image \\ \hline\hline \rule[-2mm]{0mm}{6mm}$\mathcal{C}(\mathfrak{sl}_2,q)$ & $5\leq\ell\neq 6$ & $V_L(q^2)$ & \raggedright{$\#P$-hard \\ \textbf{no FPRAS?}} & dense \\ \hline \rule[-2mm]{0mm}{6mm}\raggedright{$\mathcal{C}(\mathfrak{sl}_n,q)$, $3\leq n$} & \raggedright{$n+2\leq\ell$,\\ $\ell\neq 6$} & $P^\prime_L(q,n)$ & \raggedright{$\#P$-hard \\ \textbf{no FPRAS?}}& infinite \quad not dense \\ \hline \rule[-2mm]{0mm}{6mm}\raggedright{$\mathcal{C}(\mathfrak{so}_{2n+1},q)$, $2\leq n$} & \raggedright{$\ell$ even,\\ $2n+2\leq\ell$,\\ $\ell\not=4n$} & $F_L(q^{2n},q)$ & \raggedright{$\#P$-hard \\ \textbf{no FPRAS?}}& dense \\ \hline \rule[-2mm]{0mm}{7mm}\raggedright{$\mathcal{C}(\mathfrak{sp}_{2n},q)$, $2\leq n$} & \raggedright{$\ell$ even,\\ $2n+6\leq\ell$,\\ $\ell\not=4n+2$} & $F_L(q^{-2n-1},q)$ & \raggedright{$\#P$-hard \\ \textbf{no FPRAS?}} & dense \\ \hline \rule[-2mm]{0mm}{7mm}\raggedright{$\mathcal{C}(\mathfrak{so}_{2n},q)$, $3\leq n$}& \raggedright{$2n+2\leq\ell$,\\ $\ell\not=4n-2$} & $F_L(q^{2n-1},q)$ & \raggedright{$\#P$-hard \\ \textbf{no FPRAS?}} & dense \\ \hline \rule[-2mm]{0mm}{7mm}$\mathcal{C}(\mathfrak{so}_{4},q)$& $7\leq\ell$ & $(-1)^{c-1}[V_L(-q^{-2})]^2$ & \raggedright{$\#P$-hard \\ \textbf{no FPRAS?}} & infinite \quad not dense \\ \hline \rule[-2mm]{0mm}{7mm}$\mathcal{C}(\mathfrak{sl}_2,q)$ & $\ell=3$ & $(-1)^{c-1}$ & $FP$ & finite\quad abelian \\ \hline \rule[-2mm]{0mm}{7mm}$\mathcal{C}(\mathfrak{sl}_2,q)$ & $\ell=4$ & $(-\sqrt{2})^{c-1}(-1)^{\text{Arf}(L)}$\quad or $0$ & $FP$ & finite \\ \hline \rule[-2mm]{0mm}{7mm}$\mathcal{C}(\mathfrak{sl}_n,q)$ & $\ell=6$ & $\pm(i)^{c-1}(i\sqrt{3})^{d_3}$ & $FP$ & finite \\ \hline \rule[-2mm]{0mm}{7mm}$\mathcal{C}(\mathfrak{sp}_4,q)$ & $\ell=10$ & $\pm(\sqrt{5})^{d_5}$ & $FP$ & finite \\ \hline \rule[-2mm]{0mm}{7mm}$\mathcal{C}(\mathfrak{sl}_n,q)$ & $\ell=n+1$ & $e^{\pi i K(L)/n}$ & $FP$ & finite \quad abelian \\ \hline \rule[-2mm]{0mm}{7mm}\raggedright{$\mathcal{C}(\mathfrak{so}_{p},q)$,\quad $3\leq p$ prime}\\ & $X$ spin rep., $\ell=2p$ & $\pm(\sqrt{p})^{d_{p}}$ & $FP$ & finite \\ \hline \rule[-2mm]{0mm}{7mm}${\rm Rep}(DG)$ & $G$ finite & $H_L(G)$ & \textbf{FPRAS?} & finite \\ \hline \end{tabular} \end{table} \begin{remark}\label{conclusions} \begin{enumerate} \item We speculate that an appropriate physical aspect of the paradigm would be as follows: When the braid group image is dense, then it is unlikely that there is an efficient way to approximately simulate the corresponding physical system. Our expertise in the subject is not sufficient to say anything authoritative, but it seems reasonable that an efficient approximate simulation of the physical system could be used to construct an FPRAS for the link invariant. When the braid group image is finite, we might expect that efficient numerical methods (such as quantum Monte Carlo) exist for (approximately) simulating the corresponding quantum mechanical systems (see e.g. \cite{JOVVC}). \item There is a related conjecture characterizing UMCs with property $F$ by the categorical dimensions of their simple objects. This is beyond our current scope, but details will appear in \cite{propF}. \end{enumerate} \end{remark} \pagebreak
{ "redpajama_set_name": "RedPajamaArXiv" }
5,170
\section{#1}} \newcommand{\subsect}[1]{\subsection{#1}} \newcommand{\subsubsect}[1]{\subsubsection{#1}} \renewcommand{\theequation}{\arabic{section}.\arabic{equation}} \newtheorem{proposition}{Proposition} \def\begin{equation}{\begin{equation}} \def\end{equation}{\end{equation}} \def\begin{eqnarray}{\begin{eqnarray}} \def\end{eqnarray}{\end{eqnarray}} \def{\rm I\kern-.2em R}{{\rm I\kern-.2em R}} \def\'{\i}{\'{\i}} \defc_1{c_1} \defc_3{c_3} \defc_2{c_2} \defc_4{c_4} \defc_6{c_6} \defc_{5}{c_{5}} \defa_+{a_+} \defa_-{a_-} \defb_+{b_+} \defb_-{b_-} \defa{a} \defb{b} \def${\mbox{I}_+}${${\mbox{I}_+}$} \def${\mbox{I}_-}${${\mbox{I}_-}$} \def${\mbox{II}}${${\mbox{II}}$} \defJ_3{J_3} \defJ_+{J_+} \defJ_-{J_-} \defI{I} \defM{M} \begin{document} \thispagestyle{empty} \ \hfill\ \ \vspace{1cm} \begin{center} {\LARGE{\bf{Quantum harmonic oscillator algebras}}} {\LARGE{\bf{as non-relativistic limits of}}} {\LARGE{\bf{multiparametric $gl(2)$ quantizations}}} \end{center} \bigskip\bigskip \begin{center} Angel Ballesteros$^\dagger$, Francisco J. Herranz$^\dagger$ and Preeti Parashar$^{\dagger\ddagger}$ \end{center} \begin{center} {\it {$^\dagger$Departamento de F\1sica, Universidad de Burgos} \\ Pza. Misael Ba\~nuelos, E-09001 Burgos, Spain} \end{center} \begin{center} {\it {$^\ddagger$Departamento de F\1sica Te\'orica, Universidad Aut\'onoma de Madrid} \\ Cantoblanco, E-28049 Madrid, Spain} \end{center} \bigskip \begin{abstract} Multiparametric quantum $gl(2)$ algebras are presented according to a classification based on their corresponding Lie bialgebra structures. From them, the non-relativistic limit leading to quantum harmonic oscillator algebras is implemented in the form of generalized Lie bialgebra contractions. \end{abstract} \bigskip\bigskip \newpage \section{Introduction} The $gl(2)$ Lie algebra can be viewed as the natural relativistic analogue of the one-dimensional harmonic oscillator algebra $h_4$ \cite{Ala}. Reciprocally, $h_4$ can be obtained from $gl(2)$ through a generalized In\"on\"u-Wigner contraction that translates into mathematical terms the non-relativistic limit $c\to \infty$. Explicitly, if we consider the commutation relations and second-order Casimir of the $gl(2)$ Lie algebra \begin{eqnarray} &&[J_3,J_+]=2J_+ ,\quad [J_3,J_-]=-2J_- ,\quad [J_+,J_-]=J_3 ,\quad [I,\cdot\,]=0 ,\cr &&{\cal C}=J_3^2 + 2 J_+ J_- + 2 J_- J_+, \label{ab} \end{eqnarray} and we apply the map defined by \begin{equation} A_+ = \varepsilon J_+ ,\qquad A_- = \varepsilon J_- ,\qquad N = (J_3 + I)/2 ,\qquad M = \varepsilon^2 I , \label{na} \end{equation} then the limit $\varepsilon\to 0$ ($\varepsilon=1/c$) leads to the harmonic oscillator algebra $h_4$ \begin{equation} [N,A_+]=A_+ ,\qquad [N,A_-]=-A_- ,\qquad [A_-,A_+]=M , \qquad [M,\cdot\,]=0 . \label{nb} \end{equation} The Casimir of $h_4$ is also obtained by computing $\lim_{\varepsilon\to 0} \frac 12 {\varepsilon^2} ( - {{\cal C}} +I^2 )$: \begin{equation} {\cal C}=2NM -A_+A_- - A_- A_+ . \end{equation} Recently, a systematic and constructive approach to multiparametric quantum $gl(2)$ algebras based on the classification of their associated Lie bialgebra structures has been presented \cite{bhp}. In that paper, the question concerning the generalization of the Lie bialgebra contraction procedure to multiparametric structures has been also solved. Now, we make use of those results in order to obtain several quantum $h_4$ algebras and their associated deformed Casimir operators. We emphasize that all these quantum $h_4$ algebras are endowed with a Hopf algebra structure, which can be related to integrability properties of associated models. In particular, note that the quantum group symmetry of the spin $1/2$ Heisenberg XXZ and XXX chains with twisted periodic boundary conditions \cite{AGR,PS} is given by quantum $gl(2)$ algebras \cite{bhp,MRplb} whose non-relativistic limit will be analysed. \section{Quantum $gl(2)$ algebras} In this section we present some relevant quantum ${gl}(2)$ Hopf algebras \cite{bhp}. Deformed Casimir operators, essential for the construction of integrable systems \cite{orl}, and quantum $R$-matrices are also explicitly given. \subsection{Family ${\mbox{I}_+}$\ quantizations} \subsubsection{Standard subfamily $U_{a_+,a}({gl}(2))$ with $a_+\ne 0$, $a\ne 0$} The quantum algebra $U_{a_+,a}({gl}(2))$ and its Casimir are given by \begin{eqnarray} &&\!\!\!\! \!\!\!\! \Delta(J_3')=1\otimes J_3' + J_3'\otimes 1 ,\qquad \Delta(J_+) =e^{ a J_3' /2}\otimes J_+ + J_+\otimes e^{- a J_3' /2} ,\cr &&\!\!\!\! \!\!\!\! \Delta(I)=1\otimes I + I\otimes 1 , \qquad \Delta(J_-) =e^{ a J_3' /2}\otimes J_- + J_-\otimes e^{- a J_3' /2} ,\cr &&\!\!\!\! \!\!\!\! [J_3',J_+]=2J_+ ,\quad [J_3',J_-]=-2J_- -\frac{a_+}{a}\, \frac{\sinh(a J_3'/2)}{a/2} - \frac{a_+^2}{a^2}J_+ ,\quad [I,\cdot\,]=0 ,\cr &&\!\!\!\! \!\!\!\! [J_+,J_-]=\frac{\sinh a J_3' }{a} +\frac{a_+}{ a} \left(\frac{e^{a}-1}{2a}\right) \left(e^{ -a J_3' /2}J_+ + J_+ e^{ a J_3' /2}\right) , \label{ba}\\ &&\!\!\!\! \!\!\!\! {\cal C}_{a_+,a}= \frac{2}{a\tanha}\left(\cosh(aJ_3') -1\right)+ 2 (J_+ J_- + J_- J_+)+\frac{a_+^2}{a^2}J_+^2\cr &&\!\!\!\! \!\!\!\! \qquad\quad + \frac{a_+}{a} \left(\frac{\sinh(a J_3'/2)}{a/2}J_+ +J_+\frac{\sinh(a J_3'/2)}{a/2}\right) , \nonumber \end{eqnarray} where $J_3'=J_3- \frac{a_+}{a}J_+$. This quantum algebra is just a superposition of the standard and non-standard deformations of $sl(2,{\rm I\kern-.2em R})$ since the underlying standard classical $r$-matrix is $r=\frac 12(a_+ J_3'\wedge J_+ - 2 a J_+\wedge J_-)$. This fact can be clearly appreciated by considering the $4\times 4$ quantum $R$-matrix associated to $U_{a_+,a}({gl}(2))$ \cite{bhp}: \begin{equation} {\cal R}=\left(\begin{array}{cccc} 1&h&-qh&h^2\\ 0&q&1-q^2&qh\\ 0&0&q&-h\\ 0&0&0&1 \end{array}\right) ,\qquad q=e^{a}, \qquad h=\frac{a_+}{2}\left(\frac {e^{a}-1}{a}\right) . \end{equation} The limit $a_+\to 0$ yields the standard $R$-matrix of $sl(2,{\rm I\kern-.2em R})$, while taking $a\rightarrow 0$ gives rise to the non-standard one. This quantum algebra underlies the construction of non-standard $R$-matrices out of standard ones introduced in \cite{AKS,ACC}. \subsubsection{Non-standard subfamily $U_{a_+,b_+}({gl}(2))$ with $a_+\ne 0$} The Hopf algebra $U_{a_+,b_+}({gl}(2))$, whose Lie bialgebra is generated by the triangular classical $r$-matrix $r=\frac 12(a_+ J_3\wedge J_+ +b_+ J_+\wedge I)$, is given by \begin{eqnarray} &&\Delta(J_+)=1\otimes J_+ + J_+\otimes 1 ,\qquad \Delta(I)=1\otimes I + I\otimes 1 ,\cr &&\Delta(J_3)=1\otimes J_3 + J_3\otimes e^{a_+ J_+} - b_+ I\otimes \left(\frac {e^{a_+ J_+}-1}{a_+}\right),\cr &&\Delta(J_-)=1\otimes J_- + J_-\otimes e^{a_+ J_+} - \frac {b_+}2 \left( J_3 - \frac {b_+}{a_+} I \right)\otimes I e^{a_+ J_+} ,\cr &&[J_3, J_+] =2 \frac {e^{a_+ J_+} - 1} {a_+} , \qquad [J_3, J_-] = - 2 J_- + \frac{a_+}{2}\left( J_3 - \frac{b_+}{a_+}I\right)^2 , \cr &&[J_+, J_-] = J_3 + {b_+} I \frac { e^{a_+ J_+} - 1}{a_+} , \qquad [I,\,\cdot \,] = 0, \label{bb}\\ &&{\cal C}_{a_+,b_+}= \left(J_3-\frac{b_+}{a_+} I\right) e^{-a_+J_+}\left(J_3-\frac{b_+}{a_+} I\right) + 2 \frac{b_+}{a_+} J_3I\cr &&\qquad +2\frac{1-e^{-a_+J_+} }{a_+}J_-+ 2J_- \frac{1-e^{-a_+J_+} }{a_+} +2(e^{-a_+J_+}-1). \nonumber \end{eqnarray} This quantum algebra has been also obtained in \cite{Dobrev,boson,Preeti} and its universal quantum $R$-matrix can be found in \cite{boson,Preeti}. \subsection{Family ${\mbox{II}}$\ quantizations} \subsubsection{Standard subfamily $U_{a,b}({gl}(2))$ with $a\ne 0$} The corresponding coproduct, commutation rules and Casimir are given by \begin{eqnarray} &&\Delta(I)=1\otimes I + I\otimes 1 ,\qquad \Delta(J_3)=1\otimes J_3 + J_3\otimes 1 ,\cr &&\Delta(J_+)=e^{(a J_3 - b I)/2}\otimes J_+ + J_+\otimes e^{-(a J_3 - b I)/2} ,\cr && \Delta(J_-)=e^{(a J_3 + b I)/2}\otimes J_- + J_-\otimes e^{-(a J_3 + b I)/2} , \label{bc}\\ &&[J_3,J_+]=2J_+ ,\quad [J_3,J_-]=-2J_- ,\quad [J_+,J_-]=\frac{\sinh aJ_3}{a}, \quad [I,\cdot\,]=0,\cr && {\cal C}_{a}=\cosh a \left(\frac{\sinh (aJ_3/2)}{a/2} \right)^2 +2\,\frac{\sinh a}{a}\,(J_+ J_- + J_- J_+). \nonumber \end{eqnarray} This quantum algebra, together with its universal quantum $R$-matrix, has been obtained in \cite{CJ}; it is just the quantum algebra underlying the XXZ Heisenberg Hamiltonian with twisted boundary conditions \cite{MRplb}. This deformation can be thought of as a Reshetikhin twist of the usual standard deformation since in the associated $r$-matrix, $r=- \frac 12 b J_3\wedge I - a J_+\wedgeJ_-$, the second term generates the standard deformation and the exponential of the first one gives us the Reshetikhin twist. \subsubsection{Non-standard subfamily $U_{b_+,b}({gl}(2))$} The coproduct reads \begin{eqnarray} &&\Delta(I)=1\otimes I + I \otimes 1 ,\qquad \Delta(J_+)=1\otimes J_+ + J_+ \otimes e^{b I} ,\cr &&\Delta(J_3)=1\otimes J_3 + J_3 \otimes 1 + b_+ J_+\otimes \left(\frac {e^{b I}-1}{b}\right) ,\cr &&\Delta(J_-)=1\otimes J_- + J_- \otimes e^{-b I} + b_+ J_3\otimes \left(\frac {e^{-b I}-1}{2b}\right) \cr &&\qquad \qquad+ b_+^2 J_+\otimes \left(\frac {1 - \cosh {b I}}{2b^2}\right) , \label{bd} \end{eqnarray} and the associated commutation rules and Casimir are non-deformed ones (\ref{ab}). A twisted XXX Heisenberg Hamiltonian invariant under $U_{b_+,b}({gl}(2))$ has been constructed in \cite{bhp}. The $r$-matrix is $r=-\frac 12 (b J_3 - b_+ J_+)\wedge I$ and the universal $R$-matrix turns out to be ${\cal R}= \exp\{r\}$, which in the fundamental representation reads \begin{equation} {\cal R}=\left(\begin{array}{cccc} 1&-e^{-b}\,p&p&-e^{-b}\,p^2\\ 0&e^{-b}&0&e^{-b}\,p\\ 0&0&e^{b}&-p\\ 0&0&0&1 \end{array}\right) ,\qquad p=\frac{b_+}{2}\left(\frac{e^{b}-1}{ b}\right). \end{equation} \section{Contractions to quantum oscillator algebras} In the sequel, we work out the contractions from the above quantum $gl(2)$ algebras to quantum $h_4$ algebras (a systematic approach to the latter structures can be found in \cite{osc}). In order to contract a given quantum algebra we have to consider the In\"on\"u-Wigner contraction (e.g.\ (\ref{na})) together with a mapping $a=\varepsilon^{n} a'$ on {\em each} initial deformation parameter $a$ where $n$ is any real number and $a'$ is the contracted deformation parameter \cite{LBC}. The convergency of both the classical $r$-matrix and the cocommutator $\delta$ under the limit $\varepsilon \to 0$ have to be analysed separately, since starting from a coboundary bialgebra, the contraction can lead to either another coboundary bialgebra (both $r$ and $\delta$ converge) or to a non-coboundary one ($r$ diverges but $\delta$ converges). Hence we have to find out the minimal value of the number $n$ such that $r$ converges, the minimal value of $n$ such that $\delta$ converges, and finally to compare both of them. \subsection {Standard family ${\mbox{II}}$: $U_{a,b}({gl}(2))\to U_{\xi,\vartheta}(h_4)$ with $\xi\ne 0$} Let us illustrate our procedure starting with the quantum algebra $U_{a,b}({gl}(2))$. We consider the maps \begin{equation} a= -\varepsilon^{n_{a}}\xi , \qquad b=-\varepsilon^{n_{b}}\vartheta , \label{sa} \end{equation} where $\vartheta$, $\xi$ are the contracted deformation parameters, and $n_{a}$, $n_{b}$ are real numbers to be determined by imposing the convergency of $r$. We introduce the maps (\ref{na}) and (\ref{sa}) in the classical $r$-matrix associated to $U_{a,b}({gl}(2))$: \begin{equation} \begin{array}{l} r=- \frac 12 b J_3\wedge I - a J_+\wedgeJ_-\cr \quad = \frac 12 \varepsilon^{n_{b}}\vartheta (2 N - M \varepsilon^{-2})\wedge M \varepsilon^{-2} +\varepsilon^{n_{a}}\xi A_+ \varepsilon^{-1}\wedge A_- \varepsilon^{-1}\cr \quad = \varepsilon^{n_{b}-2}\vartheta N \wedge M +\varepsilon^{n_{a}-2}\xi A_+ \wedge A_- . \end{array} \end{equation} Hence the minimal values of the indices $n_{a}$, $n_{b}$ which ensure the convergency of $r$ under the limit $\varepsilon \to 0$ are $n_{a}=2$, $n_{b}=2$. Now we have to analyse the convergency of the cocommutator $\delta$ associated to $U_{a,b}({gl}(2))$. Thus we consider the maps (\ref{sa}) and look for the minimal values of $n_{a}$, $n_{b}$ which allow $\delta$ to converge under the limit $\varepsilon \to 0$. It can be checked that they are again $n_{a}=2$, $n_{b}=2$, so that the resulting $h_4$ bialgebra is coboundary (both $n_{a}$, $n_{b}$ coincide for $r$ and $\delta$). Therefore the transformations of the deformation parameters so obtained are $a= -\varepsilon^2\xi$ and $b=-\varepsilon^2\vartheta$. Finally, we introduce these maps together with (\ref{na}) in $U_{a,b}({gl}(2))$ and we obtain the following quantum oscillator algebra $U_{\xi,\vartheta}(h_4)$: \begin{eqnarray} &&\Delta(N)=1\otimes N + N\otimes 1 ,\qquad \Delta(M)=1\otimes M + M\otimes 1 ,\cr &&\Delta(A_+)=e^{(\vartheta+\xi)M/2} \otimes A_+ + A_+\otimes e^{-(\vartheta+\xi)M/2} ,\cr &&\Delta(A_-)=e^{-(\vartheta-\xi)M/2}\otimes A_- + A_-\otimes e^{(\vartheta-\xi)M/2}, \end{eqnarray} \begin{equation} [N,A_+]=A_+,\quad [N,A_-]=-A_-,\quad [A_-,A_+]=\frac{\sinh \xi M}{\xi},\quad [M,\cdot\,]=0 . \end{equation} The deformed oscillator Casimir comes from $\lim_{\varepsilon\to 0} \frac 12{\varepsilon^2} \bigl( -{\cal C}_{a} + \bigl(\frac{\sinh (a I/2)}{a/2}\bigr)^2 \bigr)$: \begin{equation} {\cal C}_{\xi}=2 N \frac{\sinh \xi M}{\xi}- A_+A_- - A_-A_+ . \end{equation} If $\vartheta=0$, the quantum oscillator introduced in \cite{Enrico,GS} is recovered. Hereafter we give the transformations of the deformation parameters for the remaining quantum $gl(2)$ algebras together with the resulting quantum $h_4$ algebras; we stress that in all cases the contractions are found to have a coboundary character. \subsection {Non-standard family ${\mbox{II}}$: $U_{b_+,b}({gl}(2))\to U_{\beta_+,\vartheta}(h_4)$} The transformations of the deformation parameters are $b_+= 2\varepsilon^3\beta_+$ and $b=-\varepsilon^2\vartheta$. The coproduct of the quantum oscillator algebra $U_{\beta_+,\vartheta}(h_4)$ reads \begin{eqnarray} && \Delta(M)=1\otimes M + M\otimes 1,\qquad \Delta(A_+)=1\otimes A_+ + A_+\otimes e^{- \vartheta M},\cr &&\Delta(A_-)=1\otimes A_- + A_-\otimes e^{\vartheta M} +\beta_+ M \otimes \left(\frac{e^{\vartheta M} - 1}{\vartheta} \right),\cr &&\Delta(N)=1\otimes N + N\otimes 1+ \beta_+ A_+\otimes \left(\frac{1- e^{-\vartheta M}}{\vartheta} \right) . \end{eqnarray} Commutation rules and Casimir of $U_{\beta_+,\vartheta}(h_4)$ are the non-deformed ones (\ref{nb}). \subsection {Standard family ${\mbox{I}_+}$: $U_{a_+,a}({gl}(2))\to U_{\beta_+,\xi}(h_4)\to U_{\xi}(h_4)$ with $\xi\ne 0$} In this case, the maps $a_+ = 2 \varepsilon^3 \beta_+$ and $a=-\varepsilon^2 \xi$ lead to $U_{\beta_+,\xi}(h_4)$: \begin{eqnarray} && \!\!\!\! \!\!\!\! \Delta(M)=1\otimes M + M\otimes 1 , \qquad \Delta(A_\pm)=e^{ \xiM/2} \otimes A_\pm + A_\pm \otimes e^{- \xiM/2} ,\cr &&\!\!\!\! \!\!\!\! \Delta(N)=1\otimes N + N \otimes 1 +\beta_+\left(\frac{1-e^{\xiM/2}}{\xi}\right)\otimes A_+ +\beta_+ A_+\otimes \left(\frac{1-e^{-\xiM/2}}{\xi}\right),\cr &&\!\!\!\! \!\!\!\! [N,A_+]=A_+,\qquad [A_-,A_+]=\frac{\sinh \xi M}{\xi},\qquad [M,\cdot\,]=0 , \label{masi}\\ &&\!\!\!\! \!\!\!\! [N,A_-]=-A_- + \frac{\beta_+}{\xi}\left( \frac{\sinh \xi M}{\xi} - \frac{\sinh (\xi M/2)}{\xi/2} \right),\cr &&\!\!\!\! \!\!\!\! {\cal C}_{\beta_+,\xi}=2 N \frac{\sinh \xi M}{\xi} - A_+A_- - A_-A_+ + 2 A_+ \frac{\beta_+}{\xi}\left( \frac{\sinh \xi M}{\xi} - \frac{\sinh (\xi M/2)}{\xi/2} \right) , \nonumber \end{eqnarray} where the Casimir is provided by $\lim_{\varepsilon\to 0} \frac 12{\varepsilon^2} \bigl( -{\cal C}_{a_+, a} + \bigl(\frac{\sinh (a I/2)}{a/2}\bigr)^2 \bigr)$. However the parameter $\beta_+$ is irrelevant and it can be removed from (\ref{masi}) by applying the change of basis defined by \begin{equation} N'=N+\frac{\beta_+}{\xi}A_+ ,\quad A'_+=A_+ , \quad A'_-=A_-+\frac{\beta_+}{\xi}\frac{\sinh(\xiM/2)}{\xi/2} ,\quad M'=M . \end{equation} Thus we recover $U_{\xi}(h_4)$, already obtained in sec.\ 3.1 as $U_{\vartheta,\xi}(h_4) \to U_{\vartheta=0,\xi}(h_4)$. \subsection {Non-standard family ${\mbox{I}_+}$: $U_{a_+,b_+}({gl}(2))\to U_{\alpha_+}(h_4)$ with \break $\alpha_+\ne 0$} The transformations of the deformation parameters turn out to be $a_+=\varepsilon \alpha_+$ and $b_+=-\varepsilon\alpha_+$. Hence, we obtain the ``Jordanian $q$-oscillator" \cite{osc} $U_{\alpha_+}(h_4)$: \begin{eqnarray} &&\Delta(A_+)=1\otimes A_+ + A_+\otimes 1,\qquad \Delta(M)=1\otimes M + M\otimes 1 ,\cr &&\Delta(A_-)=1\otimes A_- + A_-\otimes e^{\alpha_+ A_+} +\alpha_+ N\otimes M e^{\alpha_+ A_+},\cr &&\Delta(N)=1\otimes N+ N\otimes e^{\alpha_+ A_+}, \qquad [M,\cdot\,]=0,\\ &&[N,A_+]=\frac{e^{\alpha_+ A_+}-1}{\alpha_+},\quad [N,A_-]=-A_-,\quad [A_-,A_+]=M e^{\alpha_+ A_+}. \nonumber \end{eqnarray} The quantum Casimir is computed as $\lim_{\varepsilon\to 0} \varepsilon^2\left( - \frac{1}{2}{{\cal C}_{a_+,b_+}} +I^2\right)$, and reads, \begin{equation} {\cal C}_{\alpha_+}=2NM +\frac{e^{-\alpha_+ A_+ }- 1}{\alpha_+} A_- +A_- \frac{e^{-\alpha_+ A_+} - 1}{\alpha_+} . \end{equation} \bigskip \bigskip \noindent {\Large{{\bf Acknowledgments}}} \bigskip A.B. and F.J.H. have been partially supported by DGICYT (Project PB94--1115) from the Ministerio de Educaci\'on y Cultura de Espa\~na and by Junta de Castilla y Le\'on (Projects CO1/396 and CO2/297). P.P. has been supported by a fellowship from AECI, Spain. \bigskip \bigskip
{ "redpajama_set_name": "RedPajamaArXiv" }
9,793
Posted on 9th January 2019 at 10:30 am. Award winning creatives designing experiences through resilient architectural landscapes. Landscape Architects – delivering the public space through understanding the community's values, needs and aspirations. Providing next generation neighbourhoods for rent with inclusive, connected, convenience to enjoy your city on your terms.
{ "redpajama_set_name": "RedPajamaC4" }
3,325
{"url":"https:\/\/scirate.com\/arxiv\/1709.09660\/scites","text":"# GW170814: A Three-Detector Observation of Gravitational Waves from a Binary Black Hole Coalescence\n\nhttps:\/\/scirate.com\/arxiv\/1709.09660","date":"2018-02-25 11:32:55","metadata":"{\"extraction_info\": {\"found_math\": false, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.840320348739624, \"perplexity\": 2279.9018489068353}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2018-09\/segments\/1518891816370.72\/warc\/CC-MAIN-20180225110552-20180225130552-00066.warc.gz\"}"}
null
null
Every home has a history associated to it. This stone wall residential home was originally constructed in 1945. It's original architects utilized hand tools and manual labor; every nail was hand driven! Our client contacted Advocate Construction to perform a complete renovation of this home and bring it up to today's modern living standards. This vintage home was originally constructed in 1945 by the Grandfather of our client. It's vintage walls and neat history create both austerity and character for our client. It's original architect utilized only hand tools, such as hammer & nails, stone and hand mixed mortar to create the structure. It was certainly well built considering that it has held up this well for the past 73 years! It goes without saying that our renovation on this home is a delicate process. We want to preserve as much of the original structure as possible while adding on some modern touches. It's pretty amazing to think that this home has seen such a dynamic era of modern history since it was built during WWII. Ironically, it is located down the street from another hand built home that once housed Bonnie & Clyde. This portion of town has certainly seen it's share of history. Utilizing Advocate Construction to perform a complete interior remodel, our client intends for this house to see much more in the coming century. Our team is utilizing "bracing" to provide support around this structure. Due to the age of the home, this is merely a precautionary measure. When we remove the roof, the structure will be less stable so we want to provide as much support as possible. This structure has stood for over 73 years – it's not going anywhere on our watch! We've already hinted at the age of this stone wall home. We'd like to point out how similar yet different construction is now-a-days versus 1945. Choosing the type of mortar material depends solely on the application, type of structure you are creating as well as the longevity of the structure. There are many other differences between the two eras. We will be sure to include some additional insights throughout our coming articles over this job! Our first phase of this project is the Demolition phase. We are tasked with delicately removing old portions of this home that no longer provide structure integrity or purpose. We want to essentially gut this home except for the exterior walls and built it from the inside out. Fun stuff! As you can see, bridging the gap between eras of history on a renovation job is a tricky task. It's not as simple as adding new insulation, a roof or electrical to the home. Our goal must be to preserve as much of the original structure as possible. This entails trying to closely match our current materials to match both the finish, color and style of the original architect. Research is our best friend at this point. It's this kind of effort that separates Advocate Construction from any other construction company in the industry. Needless to say, we've got our work cut out for us! This project has a tentative deadline of October 2018. This gives our crews merely 5 months from start to finish to complete all of the renovation phases, listed above. Completing this many tasks requires both attention to detail and focus on completing each task to code. Our crews are organized, communicate well and capable of completing complex phases the Advocate Way! Adding in these new elements will make this home both comfortable and valuable! It's not everyday that you stumble across a mid-century home with so much character interwoven throughout it's structure! Our client will most likely see a very nice return on his investment should he ever decide to sell it. There are a lot of stages tied to a project of this scale. We informed our client of each stage and reinforced the purpose of each measure. We believe that our clients deserve to understand each phase of construction. It helps to instill knowledge and patience – and give our client appreciation for building things the Advocate way! We are glad that you're here, and we appreciate you taking the time to read this insightful article. We'd like to point out that we believe strongly in our company mission: we are an Advocate for each and every one of our clients. This not only means that you'll receive expert construction services, but you're partnered with a team that truly cares about you and your investment. Our goal is always to make the best decisions for our clients and ensure that they are happy with their final product. We work diligently to install this mission in each and every one of our employees. If you need any type of construction service for your home, we're the Advocates you can trust! Please feel free to read up on our other industry articles to see for yourself why many other people are turning to Advocate for construction services on their homes and commercial facilities! Shimming & Concrete Caps – Our Tools for Foundation Success!
{ "redpajama_set_name": "RedPajamaC4" }
2,523
Beautiful and diverse coral reef organisms are attractive to tourists and there are quite substantial numbers of visitors snorkeling and SCUBA diving over coral reefs. A total of 384,733 stay and non-stay tourists, including both domestic and foreign, were recorded by the Tourism Authority of Thailand in 2000. There are numerous hotels and resorts, especially along the western coast of Koh Chang.
{ "redpajama_set_name": "RedPajamaC4" }
4,091
Der Bezirk Radautz (rumänisch: Rădăuț; ruthenisch: Radiwci) war ein Politischer Bezirk im Herzogtum Bukowina. Der Bezirk umfasste Gebiete im Osten der Bukowina. Sitz der Bezirkshauptmannschaft war die Stadt Radautz (Rădăuți). Das Gebiet wurde nach dem Ersten Weltkrieg Rumänien zugeschlagen und ist heute Teil des rumänischen Anteils der Bukowina im Norden Rumäniens (Kreis Suceava). Geschichte Die modernen, politischen Bezirke der Habsburgermonarchie wurden um das Jahr 1868 im Zuge der Trennung der politischen von der judikativen Verwaltung geschaffen. Der Bezirk Radautz wurde 1868 aus den Gerichtsbezirken Radautz und Solka (Solca) gebildet. 1886 wurde die Errichtung eines weiteren Gerichtsbezirks bestimmt, wofür drei Gemeinden des Gerichtsbezirks Radautz zum Gerichtsbezirk Seletin zusammengeschlossen wurden. Die Verordnung wurde dabei per 1. Juni 1888 amtswirksam. Per 1. Oktober 1893 wurden der Gerichtsbezirk Solka aus dem Bezirk Radautz herausgelöst und mit dem Gerichtsbezirk Gurahumora zum Bezirk Gurahumora vereint, woraufhin der Bezirk Radautz wieder aus zwei Gerichtsbezirken bestand. Im Bezirk Radautz lebten im Jahr 1869 73.601 Menschen, bis zum Jahr 1900 erhöhte sich die Einwohnerzahl auf 82.152 Personen. Von der Bevölkerung hatten 1900 47.919 Rumänisch (58,3 %) als Umgangssprache angegeben, 21.493 Personen sprachen Deutsch (26,2 %), 8.864 Ruthenisch (10,8 %) und 3.326 eine andere Sprache (4,0 %). Der Bezirk umfasste 1900 eine Fläche von 184,097 km² sowie zwei Gerichtsbezirke mit 28 Gemeinden und fünf Gutsgebieten. Ortschaften Auf dem Gebiet des Bezirks bestanden 1910 Bezirksgerichte in Radautz und Seletin, diesen waren folgende Orte zugeordnet: Gerichtsbezirk Radautz: Stadt Radautz Gerichtsbezirk Seletin: Cameral Schipoth Seletin Straza Einzelnachweise Literatur k. k. Statistische Zentralkommission (Hrsg.): Orts-Repertorium des Herzogthums Bukowina. Auf Grundlage der Volkszählung vom 31. Dezember 1869 bearbeitet. Cernowitz 1872 k. k. Statistische Zentralkommission (Hrsg.): Special-Orts-Repertorium der Bukowina. Wien 1885 k. k. Statistische Zentralkommission (Hrsg.): Special-Orts-Repertorium der Bukowina. Neubearbeitung auf Grund der Ergebnisse der Volkszählung vom 31. December 1890. Wien 1894 k. k. Statistische Zentralkommission (Hrsg.): Gemeindelexikon der Bukowina. Bearbeitet auf Grund der Ergebnisse der Volkszählung vom 31. Dezember 1900. Wien 1907 Radautz
{ "redpajama_set_name": "RedPajamaWikipedia" }
4,794
{"url":"https:\/\/aldream.github.io\/presentations\/2014-07-11%20-%20Survey%20on%20Random%20Number%20Generators%20(School%20Seminar)\/index.html","text":"# Random NumberGenerators\n\n#### The Art of Mathematical Computing\n\nby Benjamin (Bill) Planche\nfeat. Philipp Jovanovic\nSlides at github.com\/Aldream\/presentations\n\n# Structure\n\n1. Randomness - defs + pseudo-randomness\n2. RNGs & PRNGs - defs + implementation\n3. Test Tools - suites + implementation\n\n# Randomness\n\nLack of pattern, predictability or determinism in events.\n\nHowever.\n\nIs it really random?\n\nOr are we simply ignorant of the underlying pattern?\n\n## Random Sequences\n\n#### Information Theory\n\nBasic definition\nSequence of independent random variables\nFormal definition\n???\n\n## Definition by Von Mises\n\nBased on Theory of Large Numbers\n\nAn infinite sequence can be considered random if:\n\n\u2022 It has the Frequency Stability Property\n\u2022 $S \\in \\mathbb{A}^n$ with $\\mathbb{A}$ alphabet of $m$ symbols\n\u2022 $\\forall a \\in \\mathbb{A}, \\lim_{n \\to \\infty} |\\{s, s \\in S \\land s = a\\}| = \\frac{1}{m}$\n\u2022 Any sub-seq. selected by a proper method isn't biased too. ex:\n\u2022 $S = (1,0,1,0,1,0,1,0)$ not biased\n\u2022 $f(X) \\to (X^i | \\forall i \\leq |X| \\land i \\equiv 0 \\pmod 2)$\n\u2022 $\\implies f(S) \\to (1,1,1,1)$ biased\n\nHowever.\n\n\u2022 How to mathematize this proper method of selection?\n\u2022 Yields an empty set (demo by Jean Ville in 1939)\n\n## Definition by Martin-L\u00f6f\n\nA Random Sequence has no \"exceptional and effectively verifiable\" property\n\u2022 No properties verifiable by a recursive algorithm\n\u2022 Frequency \/ measure-theoretic\n\u2022 Quite satisfactory\n\n## Definition by Levin\/Chaitin\n\n### Complexity of Kolmogorov\n\n\u2022 Important measure for Information Theory\nLength of the shortest program able to generate the sequence.\n\n### Resulting Definition\n\nA finite string is random\nif it requires a program at least as long as itself to be computed\n\n$\\exists c \\geq 0$, such as $H(S_0^n) \\geq n - c$\nwith $H$ complexity of Kolmogorov\n\n\u2022 \"Incomprehensible informational content\"\n\u2022 Complexity \/ Compressibility approach\n\n## Statistical Randomness\n\nA sequence is statistically random if it has no recognizable patterns.\n\u2022 Less strict than previous definitions\n\u2022 Doesn't imply objective unpredictability\n\n... leaves room to the concept of ...\n\n## Pseudo-Randomness\n\n#### and Pseudo-Random Sequence\n\nExhibits statistical randomness...\n\n... though generated by a deterministic causal method.\n\n# Random NumberGenerators\n\n## Definition\n\nDevice which can produce a sequence of random numbers, i.e. a without deterministic properties and patterns.\n\n## Categories\n\n### Generators based on physical phenomena\n\nDice tossing, coin flipping, bird trajectories, ...\n\n\u2022 Often only random in appearances\n\u2022 Cheating by knowing the rules \/ initial state\n\n#### Quantic Phenomena\n\nNuclear decay, Behavior of photons hitting a semi-transparent mirror, ...\n\n\u2022 Golden solutions\n\u2022 Globally too costly to be democratized\n\n#### Noisy Phenomena\n\nThermal signal from transistor, radio noise, Analog-to-digital conversion noise, ...\n\n\u2022 Easier to detect\n\u2022 Offer good results\n\n#### OS Implementations\n\n\u2022 Unix Systems\n\u2022 $\/dev\/urandom$ & $\/dev\/random$\n\u2022 Device files probing analog sources (mouse, keyboard, disk accesses, etc.)\n\u2022 Windows Systems\n\u2022 $CryptGenRandom$\n\u2022 Gather through system state (CPU counters, env. var, threads IDs, etc.)\n\n\u2022 Based on the unpredictable IO + behavior of the users\n\u2022 Harvest entropy, Output random bytes\n\nIn both cases, entropy decreases during inactivity\n\n... Shortages.\n\n### Pseudo-Random Number Generators\n\nRamdomness through Determinism...??\n\nPseudo-Random Sequences\n\n\u2022 Clever implementations $\\rightarrow$ Long-enough period\n\u2022 Determinism $\\rightarrow$ totally defined by init config\n\u2022 State\n\u2022 Seed\n\n## LFSR\n\n#### Linear Feedback Shift Register\n\n\u2022 Sequential shift register\n\u2022 New bit = linear function of previous state\n\u2022 Combinational logic\n\u2022 $\\mathfrak{F}$ mapping in vector space of binary $n$-tuples\n\u2022 $f$ feedback function = boolean operation of $n$ variables\n\n$$\\mathfrak{F}:\\mathbb{F}_2^n \\to \\mathbb{F}_2^n$$ $$\\mathfrak{F}:(x_1, x_2, ..., x_n)\\mapsto (x_2, x_3, ..., x_n, f((x_1, x_2, ..., x_n))$$\n\n\u2022 $f$ $\\equiv$ poly mod 2 in finite field arithmetic\n\u2022 Feedback polynomial\n\u2022 Taps = bits of the register used in the linear operation\n\u2022 Conditions to maximal-length LFSR (period $2^n-1$):\n\u2022 Having an even number of taps\n\u2022 Using a relatively-prime set of taps\n##### Example\n\n$f(x) = x_{16} + x_{14} + x_{13} + x_{11} + 1$\n\n## NLFSR\n\n#### Non-Linear Feedback Shift Register\n\nSame theory as for LFSRs\n\nOnly one difference\n\nThe feedback function $f$ is non-linear\n\nex: $f(x) = x^4 + x^1 \\cdot x^2 + 1$\n\n\u2022 Makes NLFSRs harder to predict than LFSRs\n\u2022 Makes it harder to ensure a max period of $2^n-1$ bits.\n\n## Applications and Uses\n\nApplications in every area\nwhere unpredictable behavior is desirable\/required\n\ncryptographic systems, gambling applications, statistical sampling, simulation, ...\n\nVarious applications $\\rightarrow$ Various requirements\n\n\u2022 Crypto-secure RNGs for security applications\n\u2022 Outputs uniqueness for shuffling methods\n\u2022 ...\n\n\u2022 RNGs $\\rightarrow$ ~ safer but less abundant\n\u2022 PRNGs $\\rightarrow$ ~ weaker but lighter\n\n# Testing Randomness\n\n## About the Difficulty to Test Randomness\n\n### Reasons\n\n\u2022 Def. depending on the field $\\rightarrow$ Which one to test?\n\u2022 Large number of possibilities $\\rightarrow$ Impossible to fully cover\n\n### Solutions\n\n\u2022 Statistical tests or complexity evaluations\n\u2022 Battery of tests to identify statistical bias\n\u2022 Checking hypothesis of perfect behavior\n\n### Limitations\n\n\u2022 Impossible to fully cover $\\rightarrow$ no universal battery of tests\n\u2022 Good RNGs $\\approx$ pass complicated or numerous tests\n\n## Common Tests\n\n\u2022 DIEHARD Tests\n\u2022 TestU1 Suite\n\n### Berlekamp-Massey Algorithm\n\n#### Definition\n\n\u2022 Break linearly recurrent sequences in $\\mathbb{F}_n$\n\u2022 Find min degree $L$ and annihilator poly $F(x)$ of the seq $S$\n\n#### Algorithm\n\n$S$ sequence, $F(x)$ polynomial, $\\beta_i^j$ discrepancy\n\u2022 Make a first guess for $F(x)$\n\u2022 At each iteration $l$:\n\u2022 Generate $S_l'$ of $l$ elements, using reverse of $F(x)$\n\u2022 Compare $S$ and $S_l'$: $\\beta_0^l(S, S_l')$\n\u2022 We know $S_l'$ correct up to the $(l-1)^{th}$ symbol\n\u2022 If $l^{th}$ symbol not correct, ie $\\beta_0^l(S, S_l') = (0,0,0,...,1)$:\n\u2022 Last iteration $m$ when this happened, we had $\\beta_0^m(S, S_m') = (0,0,0,...,1)$\n\u2022 So $\\beta_0^l(S, S_l') + \\beta_{l-m}^l(S, S_m') = (0,0,0,...,0) \\to$ correction to apply\n\u2022 $F(x) \\gets F(x) + x^mF_l(x)$\n##### Let's play\n\nIteration 0\n$\\beta$ = 0\n(stopped)\n\n# Conclusion\n\n### Overview of a large topic\n\n\u2022 Various characteristics \/ Various Uses\n\u2022 Choose wisely!\n\u2022 Don't implement your own RNG!\n\u2022 ... especially for crypto!\n\u2022 ... but if you try, test test test!\n\n# References\n\nPresentation based on a personal survey: https:\/\/github.com\/Aldream\/random-number-generator\n1. Downey, R.: Some recent progress in algorithmic randomness. In Mathematical Foundations of Computer Science 2004. Springer Berlin Heidelberg (2004)\n2. Wikipedia: Random Number Generation (2014)\n3. Aumasson, J.P.: Crypto for Developers - Part 2, Randomness. AppSec Forum Switzerland 2013 (2013)\n4. Raymond, S., Andrew, S., Patrick, C., Jason, M.: Linear Feedback Shift Register (2001)\n5. Joux, A.: Algorithmic cryptanalysis. CRC Press (2009)\n6. Szmidt, J.: The Search and Construction of Nonlinear Feedback Shift Registers. Military Communication Institute, Zegrze, Poland (2013)\n7. Wikipedia: Linear Feedback Shift Register (2014)\n8. Dubrova, E.: A List of Maximum Period NLFSRs. IACR Cryptology ePrint Archive 2012 (2012) 166\n9. Ritter, T.: Randomness Tests: A Literature Survey (2007)\n10. L'Ecuyer, P, S.R.: TestU01 - A Software Library in ANSI C for Empirical Testing of Random Number Generators (2002)\n11. Marsaglia, G.: The Marsaglia Random Number CDROM including the Diehard Battery of Tests of Randomness (2005)\n12. Soto, J.: Statistical Testing of Random Number Generators. Proceedings of the 22nd National Information Systems Security Conference NIST, 1999 (1999)\n13. Berlekamp, E.R.: Nonbinary BCH decoding. University of North Carolina. Department of Statistics (1967)\n14. Massey, J.L.: Shift-register synthesis and BCH decoding. Information Theory. IEEE Transactions 15(1) (1969) 122-127\n15. Feng, G.-L., T.K.: A generalization of the Berlekamp-Massey algorithm for multisequence shift-register. Information Theory, IEEE Transactions 37(5) (2012) 1274-1287\n16. Rodrigez, S.: Implementation of a decoding algorithm for codes from algebraic curves in the programming language Sage. diploma thesis, Faculty of San Diego State University (2013)\n\n## Thanks for you attention!\n\n#### Questions?\n\n@b_aldream | git:Aldream | aldream.net\n\n# Annexe\n\n### Randomness defined by Schnorr\n\nA random sequence must not be predictable.\nNo effective strategy should lead to an infinite gain if we bet on its symbols.\n\u2022 Predictability approach\n\n### LFSR - Implementation\n\n##### Python\ndef createLFSRgenerator(taps, seed):\n\"\"\" Returns a LFSR generator, defined by the given sequence of taps and initial value.\n@param taps (Tuple[int]): Sequence of taps defining the register.\nex: (1, 0, 0, 1) -> f(x) = x^4 + x^3 + 1\n@param seed (int): Initial value given to the register\n@return LFSR Generator \"\"\"\ndef lfsrGen(): \"\"\" @yield Pseudo-Random value from the defined LFSR \"\"\"\ndeg = len(taps) # Degree of the feedback polynomial\nperiod = math.pow(2,deg) - 1 # Max period of the LFSR\nvalue = seed # Initial value\nit = 0\nwhile (it < period): # Computing new value of most-significant bit:\nbit = 0\nfor j in range(deg): # AND-operation between the current value and the taps-tuple\nif taps[j]:\nbit ^= value >> j\nbit &= 1 # XOR-operation to get the new value of the bit\n# Final value in register by popping less-sign bit and appending the new most-sign one:\nvalue = (value >> 1) | (bit << (deg-1))\nit += 1\nyield value\nreturn lfsrGen\n##### Javascript\nfunction createLFSRGenerator(taps, seed) {\n\/** Returns a LFSR generator, defined by the given sequence of taps and initial value.\n@param taps (Tuple[int]): Sequence of taps defining the register.\nex: (1, 0, 0, 1) -> f(x) = x^4 + x^3 + 1\n@param seed (int): Initial value given to the register\n@return LFSR Generator *\/\nreturn function *lfsrGen() { \/** @yield Pseudo-Random value from the defined LFSR *\/\nvar deg = taps.length, \/\/ Degree of the feedback polynomial\nperiod = Math.pow(2, deg) - 1, \/\/ Max period of the LFSR\nvalue = seed; \/\/ Initial value\nfor (var it = 0; it < period; it++) { \/\/ Computing new value of most-significant bit:\nvar bit = 0;\nfor (var j = 0; j < deg; j++) { \/\/ AND-operation between the current value and the taps-tuple\nif (taps[j])\nbit ^= value >> j;\n}\nbit &= 1; \/\/ XOR-operation to get the new value of the bit\n\/\/ Final value in register by popping less-sign bit and appending the new most-sign one:\nyield (value = (value >> 1) | (bit << (deg - 1)));\n}\n}\n}\n\n### NLFSR - Implementation\n\n##### Python\ndef createNLFSRgenerator(taps, seed):\n\"\"\" Returns a NLFSR generator, defined by the given combination of taps and initial value.\n@param taps (Tuple[Array[int]]): Sequence of combination of taps defining the non-linear register.\nex: ([0,0],[],[2],[1,2]) -> f(x) = x^4*x^4 + x^2 + x^1*x^2 + 1 (poor choice)\n@param seed (int): Initial value given to the register\n@return NLFSR Generator \"\"\"\ndef nlfsrGen(): \"\"\" @yield Pseudo-Random value generated by a pre-defined NLFSR \"\"\"\ndeg = len(taps) # Degree of the feedback polynomial\nperiod = math.pow(2,deg) - 1 # Max Period of the NLFSR (read Warning above)\nvalue = seed # Initial value\nit = 0\nwhile (it < period): # Computing the new value of the most-significant bit:\nbit = 0\nfor tap in taps:\n# Computing the binary multiplication x^K_0 * x^K_1 * ... * x^K_n with [K_0, K_1, ..., K_n] the j-th taps array\nif len(tap):\nelement = 1\nfor k in tap:\nif not (value >> k & 1):\nelement = 0 # Binary multiplication of terms returns 1 iif none of the terms is null.\nbreak # So if we encounter a null bit, we simply return 0, else 1.\nelse:\nelement = 0\nbit ^= element # Binary addition of the multiplication results\nbit &= 1\n# Getting the final value in the register by popping the less-significant bit and appending the new most-significant one:\nvalue = (value >> 1) | (bit << (deg-1))\nit += 1\nyield value\nreturn nlfsrGen\n##### Javascript\nfunction createNLFSRgenerator(taps, seed) {\n\/** Returns a NLFSR generator, defined by the given combination of taps and initial value.\n@param taps (Tuple[Array[int]]): Sequence of combination of taps defining the non-linear register.\nex: ([0,0],[],[2],[1,2]) -> f(x) = x^4*x^4 + x^2 + x^1*x^2 + 1 (poor choice)\n@param seed (int): Initial value given to the register\n@return NLFSR Generator *\/\nreturn function *nlfsrGen() { \/** @yield Pseudo-Random value generated by a pre-defined NLFSR *\/\nvar deg = taps.length, \/\/ Degree of the feedback polynomial\nperiod = Math.pow(2,deg) - 1, \/\/ Max Period of the NLFSR (read Warning above)\nvalue = seed, \/\/ Initial value\nit = 0\nwhile (it < period) { \/\/ Computing the new value of the most-significant bit:\nvar bit = 0\nfor (var j = 0; j < taps.length; j++) {\nvar\telement = 1;\nif (taps[j].length) { \/\/ Computing the binary multiplication x^K_0 * x^K_1 * ... * x^K_n with [K_0, K_1, ..., K_n] the j-th taps array\nfor (var k = 0; k < taps[j].length; k++) {\nif (!(value >> taps[j][k] & 1)) {\nelement = 0; \/\/ Binary multiplication of terms returns 1 iif none of the terms is null.\nbreak; \/\/ So if we encounter a null bit, we simply return 0, else 1.\n}\n}\n} else { element = 0; }\nbit ^= element; \/\/ Binary addition of the multiplication results:\n}\nbit &= 1;\n\/\/ Getting the final value in the register by popping the less-significant bit and appending the new most-significant one:\nit += 1;\nyield (value = (value >> 1) | (bit << (deg-1)));\n}\n}\n}\n\n### Test Suites\n\n#### DIEHARD Tests\n\n\u2022 Developed by George Marsaglia, in 1995\n\u2022 15 tests run over a large file containing the sequence\nbirthday spacings, overlapping permutations, ranks of 31x31 and 32x32 matrices, ranks of 6x8 matrices, monkey tests, count the 1's, parking lot, minimum distance, random spheres, squeeze, overlapping sums, runs, and craps\n\n#### TestU01 Suite\n\n\u2022 Software library, initiated in 1985\n\u2022 Collection of utilities in ANSI C\n\u2022 Classical stat tests + others from literature + original ones\n\u2022 Tools to implement specific stat tests.\n\n### Berlekamp-Massey Algorithm Alternate explanation\n\n\u2022 At each iteration $l$:\n\u2022 Evaluate the discrepancy\n\u2022 If null:\n\u2022 $F(x)$ and $L$ still correct\n\u2022 Go to next iteration\n\u2022 Else:\n\u2022 $F(x)$ should be concordantly adjusted\n\u2022 Shift & Scale syndromes added since last update\n\u2022 If $l > 2L$:\n\u2022 Update $L$ to keep track of progression\n\n### Berlekamp-Massey - Implementation\n\n##### Python\ndef BerlekampMasseyAlgorithm(sequence):\n\"\"\" Applies the Berlekamp-Massey Algorithm to the given sequence of bits;\nReturns the smallest annihilating polynomial F, ie. the smallest inverse\nfeedback polynomial corresponding to the generating LFSR.( F(sequence) = 0 )\n@param sequence (Array[int] or Tuple[int]): Sequence of bits to analyze\n@returns Array defining the computed inverse feedback polynomial\nex: [1, 0, 0, 1, 1] represents the inverse polynomial x^4 + x^3 + 1,\nand thus the feedback polynomial x^4 + x + 1 (taps = (1, 0, 0, 1)) \"\"\"\n\ndef discrepancy(sequence, poly, i, L):\n\"\"\" Returns the discrepancy.\n@param sequence (Array[int] or Tuple[int]): Sequence of bits to analyze\n@param poly (Array[int]): Current version of the inverse polynomial\n@param i (int): Current position in the sequence\n@param L (int): Current number of assumed errors\n@return Binary value of the discrepancy \"\"\"\nreturn sum([sequence[i-j]&poly[j] for j in range(0,L+1)])%2 # = s[i]*p[i] + s[i-1]*p[1] + ... + s[i-L]*p[L]\n\n\"\"\" Computes the addition of two F2 polynomials.\n@param poly1 (Array[int]): Array representing the 1st polynomial\n@param poly2 (Array[int]): Array representing the 2nd polynomial\n@param length (int): Length to be covered by the addition (trusting user to avoid testing)\n@returns Resulting Binary Array \"\"\"\nreturn [poly1[j]^poly2[j] for j in range(0, length)]\n\n# Initializing:\nN = len(sequence)\nF, f = [0]*N, [0]*N # Polynomials, with F being the one returned at the end (inverse feedback polynomial)\nF[0] = f[0] = 1\nL = 0 # Current number of assumed errors\ndelta = 1 # Number of iterations since last update of L\nfor l in range(N): # Computing F and L:\nbeta = discrepancy(sequence, F, l, L)\nif beta != 0: # Adjusting F for this term:\ng = F.copy()\nF = addPoly(F, [0]*delta + f, N)\nif 2 * L <= l: # If it is not the case, we must update L (and thus re-initalize delta), and also f:\nL = l + 1 - L # number of available syndromes used to calculate discrepancies\ndelta = 1\nf = g # f get the previous value of F\nelse: delta += 1\nelse: delta += 1\nreturn F[:L+1] # output the polynomial\n##### Javascript\nfunction BerlekampMasseyAlgorithm(sequence) {\n\/** Applies the Berlekamp-Massey Algorithm to the given sequence of bits;\nReturns the smallest annihilating polynomial F, ie. the smallest inverse\nfeedback polynomial corresponding to the generating LFSR.( F(sequence) = 0 )\n@param sequence (Array[int] or Tuple[int]): Sequence of bits to analyze\n@returns Array defining the computed inverse feedback polynomial\nex: [1, 0, 0, 1, 1] represents the inverse polynomial x^4 + x^3 + 1,\nand thus the feedback polynomial x^4 + x + 1 (taps = (1, 0, 0, 1)) *\/\n\nfunction discrepancy(sequence, poly, i, L) {\n\/** Returns the discrepancy.\n@param sequence (Array[int] or Tuple[int]): Sequence of bits to analyze\n@param poly (Array[int]): Current version of the inverse polynomial\n@param i (int): Current position in the sequence\n@param L (int): Current number of assumed errors\n@return Binary value of the discrepancy *\/\nvar disc = 0;\nfor (var j = 0; j < L+1; j++) disc += (sequence[i-j] & poly[j]) \/\/ disc = s[i]*p[i] + s[i-1]*p[1] + ... + s[i-L]*p[L]\nreturn disc%2;\n}\n\n\/** Computes the addition of two F2 polynomials.\n@param poly1 (Array[int]): Array representing the 1st polynomial\n@param poly2 (Array[int]): Array representing the 2nd polynomial\n@param length (int): Length to be covered by the addition (trusting user to avoid testing)\n@returns Resulting Binary Array *\/\nvar poly = [];\nfor (var j = 0; j < length; j++) poly.push(poly1[j] ^ poly2[j]);\nreturn poly;\n}\n\n\/\/ Initializing:\nvar N = sequence.length;\nvar F = [], f = [] \t\t\/\/ Polynomials, with F being the one returned at the end (inverse feedback polynomial)\nfor (var i = 0; i < N; i++) { F.push(0); f.push(0); }\nF[0] = f[0] = 1\nvar L = 0 \/\/ Current number of assumed errors\nvar delta = 1 \/\/ Number of iterations since last update of L\nfor (var l = 0; l < N; l++) { \/\/ Computing F and L:\nvar beta = discrepancy(sequence, F, l, L);\nif (beta != 0) { \/\/ Adjusting F for this term:\nvar g = F.slice(0);\nvar fShifted = f.slice(0); for (var k = 0; k < delta; k++) { fShifted.unshift(0); }\nif (2 * L <= l) {\nL = l + 1 - L; \/\/ number of available syndromes used to calculate discrepancies\ndelta = 1;\nf = g; \/\/ f get the previous value of F\n} else delta += 1;\n} else delta += 1;\n}\nfor (var k = L+1; k < N; k++) { F.pop(); }\nreturn F; \/\/ output the polynomial\n}","date":"2018-05-25 12:22:18","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 1, \"mathjax_asciimath\": 1, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.6592508554458618, \"perplexity\": 13237.10495525506}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 5, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2018-22\/segments\/1526794867092.48\/warc\/CC-MAIN-20180525121739-20180525141739-00110.warc.gz\"}"}
null
null
{"url":"https:\/\/www.physicsforums.com\/threads\/prove-abc-ta-1-cb.673624\/","text":"Prove ABC^TA^-1=CB\n\n1. Feb 22, 2013\n\nMikeLowri123\n\nHi all,\n\nI have a suspicion this may be obvious but have lookd and can't seem to obtain the correct answer,\n\nCan somone please explain the steps required to prove\n\nABC^TA^-1=CB\n\nwhere C^T is the transpose of C and A^-1 the inverse of A. Matrices B and A are covariance matrices and thus may be considered symmetric if that helps\n\nThanks in advance for some direction\n\n2. Feb 22, 2013\n\nStaff: Mentor\n\nRe: Multiplication\n\nI don't think your requirements are sufficient. Consider A=1, B=[1,2;2,1], C=[0,0;1,0], for example. BC^T = [0,1;0,2], but CB = [0,0;1,2].\n\n3. Feb 22, 2013\n\nMikeLowri123\n\nRe: Multiplication\n\nThanks for the reply, on a second look I now have:\n\nAB^TC^-1BA=AB^TC^-1CAB^TC^-1\n\nWhich I can break down to:\n\nAB^TC^-1BA=AB^TAB^TC^-1\n\nis there anyway I can re-order the RHS to equal the left?\n\n4. Feb 22, 2013\n\nStaff: Mentor\n\nRe: Multiplication\n\nAssuming A and B are invertible, this can be simplified to\nC^-1BA=AB^TC^-1\n\nAs both A and B are symmetric, B^T=B and AB=BA (you can check this with the definition of matrix multiplication). Define D=AB and E=C^(-1). D is symmetric as well.\n\nTherefore, your equation is equivalent to ED=DE for symmetric D and invertible E. But this is wrong, for example for D=[1,2;2,1] and E=[1,1;0,1].\n\nOr, with the original matrices:\nA=1, B=[1,2;2,1], C=[1,-1;0,1] violates the equation.\n\n5. Feb 22, 2013\n\nMikeLowri123\n\nRe: Multiplication\n\nThanks for the quick response, Apologies however A and C are symmetric B is not does this chaneg anything\n\n6. Feb 22, 2013\n\nMikeLowri123\n\nRe: Multiplication\n\nI am attempting to work through a derivation and the step attached requires the above mentioned to hold, any suggestions appreciated\n\nLast edited by a moderator: Feb 22, 2013\n7. Feb 22, 2013\n\nMikeLowri123\n\nRe: Multiplication\n\n14.63 to 14.64, should be an easy substitution but I can't get there\n\n8. Feb 22, 2013\n\nMikeLowri123\n\nRe: Multiplication\n\nno one ??","date":"2017-12-12 07:11:00","metadata":"{\"extraction_info\": {\"found_math\": false, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.8082394599914551, \"perplexity\": 3332.533622481703}, \"config\": {\"markdown_headings\": false, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2017-51\/segments\/1512948515309.5\/warc\/CC-MAIN-20171212060515-20171212080515-00714.warc.gz\"}"}
null
null
{"url":"https:\/\/www.physicsforums.com\/threads\/inverse-function.893835\/","text":"# Inverse function\n\n1. Nov 18, 2016\n\n### Karol\n\n1. The problem statement, all variables and given\/known data\nSimplify:\n$$\\sin^{-1}(2\\sin^{-1}0.8)$$\n\n2. Relevant equations\nInverse sine: $y=\\sin^{-1}(x)~\\rightarrow~\\sin(y)=x$\n$$\\sin^2(x)+\\cos^2(x)=1$$\n\n3. The attempt at a solution\nThe inner parenthesis: $\\sin y=0.8$ . In the drawing it's alpha's sine.\nNow i double the \u03b1 and the question wants the high edge in the drawing. how to find it?\n\n2. Nov 18, 2016\n\n### SammyS\n\nStaff Emeritus\nThat problem seems very strange to me.\n\nIt would be much more expected to be asked to simplify something like:\n\n$\\sin\\left(2 \\sin^{-1} (0.8)\\right)$\n\n3. Nov 18, 2016\n\n### Math_QED\n\nAre you sure that's the correct question? It seems undefined to me.\n\n4. Nov 18, 2016\n\n### haruspex\n\nLooking at the diagram, that is how Karol interpreted it.\n@Karol, what formulae do you know for sin(2\u03b1) or sin(\u03b1+\u03b2)?\n\n5. Nov 19, 2016\n\n### Karol\n\n$$\\sin(2\\alpha)=2\\sin(\\alpha)\\cos(\\alpha)$$\n$$\\sin^2(\\alpha)+\\cos^2(\\alpha)=1~\\rightarrow~\\cos(\\alpha)=0.6$$\n$$\\sin(2\\alpha)=2\\cdot 0.8 \\cdot 0.6$$\n\n6. Nov 19, 2016\n\n### SammyS\n\nStaff Emeritus\nThat looks fine, if you're trying to find $\\ \\sin\\left(2 \\sin^{-1} (0.8)\\right) \\, .$\n\n7. Nov 19, 2016\n\n### Math_QED\n\nAlso, if you want to type an implication '$\\Rightarrow$', write 'Rightarrow' in Latex instead of 'rightarrow'.\n\n8. Nov 20, 2016\n\n### Karol\n\nThanks everybody, you are great!","date":"2017-12-18 18:29:12","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 1, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.49150434136390686, \"perplexity\": 6797.857080077123}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 20, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2017-51\/segments\/1512948619804.88\/warc\/CC-MAIN-20171218180731-20171218202731-00414.warc.gz\"}"}
null
null
Ninth Place For Hutchison & JMW At Incident-Packed Spa-Francorchamps Lap two contact for team-mate Rodrigo Sales halts big points push in latest European Le Mans Series round Dundee racing driver Finlay Hutchison experienced a frustrating second round of the European Le Mans Series at Spa-Francorchamps in Belgium on Sunday, 9th August, when early-race contact for JMW Motorsport team-mate Rodrigo Sales halted the squad's push for the LMGTE podium. Arriving at the world-famous Ardennes track with a stunning new yellow and blue livery adorning the No.66 Ferrari F488 GTE EVO, hopes were very high of challenging for a major haul of points and at the start of the four-hour race everything went to plan. American team-mate Sales, making his first appearance of the year with JMW, took the opening stint and made a great start from 10th position on the LMGTE grid before climbing into the top eight in class. Then, on lap two, everything went awry after contact at Les Combes. Thankfully able to coax the damaged Ferrari back to the pits, the JMW mechanics duly set to work repairing the suspension, steering and bodywork. Falling five laps behind in the process, all hope of a strong finish was gone. Sales continued after the pit-stop, before having to serve a drive-through penalty for his part in the lap two incident which only compounded the team's woes. After handing over to compatriot Gunnar Jeanette mid-race, Sales returned to the race for a second short stint before Hutchison got behind the wheel of the No.66 car for the final hour. In the end, ninth place was the best possible result. "It's such a shame how the race turned out for us, the team did a great job all weekend and a fantastic job to get the car repaired – they deserved a podium", said the Hutchison Technologies, Evoson and In-Home Displays supported racer, "The car had quite a bit of damage, the suspension, steering rack, the left wing and the underside too. We lost quite a bit of downforce because of that. "In my stint at the end I was able to do one or two decent lap times in a row, but because the tyres were being worked harder due to the lack of downforce the performance then dropped away. With the damage we had, all things considered, just getting to the finish was the best we could hope for." Following the usual pre-event practice sessions, qualifying on Saturday, 8th August, took place with Jeanette at the wheel of the JMW Ferrari for the all-important grid determining run. Like most of the drivers, Jeanette lost his best lap time due to a track limits infringement but did have enough time to set another of 2m20.706 seconds which placed him 10th quickest in LMGTE. Sales' strong start to the race, gaining two places on lap one, came unravelled when he was battling with a rival Ferrari over seventh at the end of Kemmel Straight on lap two. Nosing to the inside into the right-hander at Les Combes, as the rival car turned into the corner contact was inevitable. Following the terrific pit-work of the JMW crew to facilitate the necessary repairs in double-quick time, Sales rejoined the race – under Safety Car conditions at that point – before salt was rubbed into the wound after the re-start when he was hit with the drive-through penalty. Ultimately stopping for the mandatory first driver-change from 10th in LMGTE with an hour and a half elapsed, Sales handed over the Ferrari F488 to Jeanette. Following a stellar stint, he then pitted with around 90 minutes to go and Sales climbed back aboard. With an hour to run, rain started to fall around part of the circuit and with 55 minutes to go Sales pitted again to hand over the JMW car to Hutchison for the run to the chequered flag. Joining the race ninth in LMGTE, the Scotsman threaded into the action when the circuit was under Full Course Yellow conditions due to the leading LMP2 class car having crashed into the barriers. When the action got back underway, Hutchison put the hammer down and set the team's fastest lap of the race, a time of 2m22.664 seconds, before improving further to 2m21.865 seconds. Three laps adrift of the next position in LMGTE, his hopes of gaining more places rested with any unforeseen errors or issues for others – especially with a Full Course Yellow period impacting the final half hour. Even so, the first year ELMS driver pressed on and lapped very strongly throughout his stint to bring the JMW entry home safely in ninth in LMGTE – very much a case of what might have been after the early misfortune. Round three of the ELMS season, which was scheduled to be the 4 Hours of Barcelona, has now been replaced by a second visit to Paul Ricard in France due to the situation with COVID-19 in the Catalunya region of Spain. The third race of the season will take place on Saturday, 29th August. Provisional 2020 European Le Mans Series LMGTE Driver Standings 8th Finlay Hutchison, 18pts Provisional 2020 European Le Mans Series LMGTE Team Standings 6th JMW Motorsport, 18pts
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
4,382
{"url":"http:\/\/mathhelpforum.com\/advanced-statistics\/139758-uniform-unbiased-estimators-print.html","text":"# Uniform - unbiased estimators?\n\n\u2022 Apr 17th 2010, 08:16 PM\nStatistik\nUniform - unbiased estimators?\nHi,\n\nI have an iid sample from $U(\\theta_1, \\theta_2)$ and found the MM of $(\\theta_1, \\theta_2)$ to be:\n\n$\n\\theta_1 = \\bar X_n - \\sqrt {3 Sn^2}$\nand $\\theta_2 = \\bar X_n + \\sqrt {3 Sn^2}\n$\n\nIn our solutions, we are told to notice that\n\n(i) $\\frac {1} {n} \\sum_i {(X_i)^2} - (\\frac {1} {n} \\sum_i {X_i})^2 = \\frac {n-1} {n} {S_n}^2\n$\n\nand\n\n(ii) $E(3(\\frac {1} {n} \\sum_i {(X_i)^2} - (\\frac {1} {n} \\sum_i {X_i})^2)) = \\frac {n-1} {n} \\frac {(\\theta_2 - \\theta_1)^2} {4}\n$\n\nI'd love to have help on how to actually see this! Thank you!","date":"2016-12-03 22:47:31","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 6, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.3187718689441681, \"perplexity\": 1459.6415445268428}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2016-50\/segments\/1480698541140.30\/warc\/CC-MAIN-20161202170901-00311-ip-10-31-129-80.ec2.internal.warc.gz\"}"}
null
null
(function (app) { app.controller('DashboardUsersViewDialogCtrl', ['$scope', '$timeout', '$mdSidenav', '$mdUtil', '$log', '$rootScope', '$mdDialog', '$routeParams', '$location', '$mdToast', 'CoResource', 'filterFilter', '$current', function ($scope, $timeout, $mdSidenav, $mdUtil, $log, $rootScope, $mdDialog, $routeParams, $location, $mdToast, CoResource, filterFilter, $current) { $scope.data = $current ? angular.copy($current) : null; $scope.toastPosition = { bottom: false, top: true, left: false, right: true }; $scope.mode = $scope.data && !!$scope.data.id ? 'edit' : 'create'; $scope.getToastPosition = function () { return Object.keys($scope.toastPosition) .filter(function (pos) { return $scope.toastPosition[pos]; }) .join(' '); }; $scope.close = function () { $mdDialog.hide(); }; // Load resource staff type CoResource.resources.Item.list({ type: 'directory_user' }, function (s) { $scope.types = s.result; }); // Edit the save $scope.save = function ($event) { var success = function () { $current = $scope.data; $rootScope.$emit('dataDirectoryUserSaved', { mode: $current ? 'edit' : 'create', $current: $current }); return $mdDialog.show( $mdDialog.alert({ preserveScope: true, autoWrap: true, skipHide: true, title: 'Add User info', content: 'User info has been saved', ariaLabel: 'Add User info', ok: 'Got it!' }) ) .finally(function () { $mdDialog.hide(); }); }; var fail = function (f) { return $mdDialog.show( $mdDialog.alert({ preserveScope: true, autoWrap: true, skipHide: true, // parent: angular.element(document.body), title: 'Add User info', content: 'There was an error while saving User. ' + f, ariaLabel: 'Add User info', ok: 'Got it!' }) ); }; $rootScope.loading('show'); if ($scope.data.id) { CoResource.resources.MemberUser.update({ id: $scope.data.directory_id, userId: $scope.data.id }, $scope.data, function (s, h) { success(); $rootScope.loading('hide'); }, function (e) { $rootScope.loading('hide'); fail(CoResource.textifyError(e.data)); }); } else { var item = new CoResource.resources.MemberUser($scope.data); item.$save({ id: $scope.data.directory_id }, function (s, h) { success(); $rootScope.loading('hide'); }, function (e) { $rootScope.loading('hide'); fail(CoResource.textifyError(e.data)); }); } }; }]); }(app));
{ "redpajama_set_name": "RedPajamaGithub" }
3,055
\section{Introduction} Fibrations play a fundamental role in categorical logic: in particular, they provide models of type theories \cite{book:Jacobs-Cat-Log} used in the semantics of logical and computational systems. The purpose of this paper is to develop the analog of fibrations for settings -- such as those describing computation -- which are based on partial maps. Because categories of partial maps admit an abstract and completely algebraic description as {\em restriction categories\/}, it is more general and, indeed, more convenient to develop a theory of fibrations for restriction categories. As the structure of a restriction category is more nuanced than that of a mere category -- due to the necessity to support the partiality of maps -- the restriction analogue of a fibration, which is called a \emph{latent fibration}, is a necessarily more subtle notion. The purpose of this paper is to initiate a careful development of the theory of latent fibrations. There are many reasons why an abstract theory of fibrations, into which partiality of maps has been baked, might be useful. Recalling that computation is fundamentally partial, there is an obvious motive to consider such settings in the semantics of computation and, in particular, to have partiality built into type theories describing computation. Notably the first use of latent fibrations was in Chad Nester's MSc.~thesis \cite{msc:nester-calgary}, where they were used in the study of realizability. More recently the desire to understand the semantics of differential programming, which has received increased attention due to its connection to machine learning, has further stimulated the development of a general theory of partiality in fibrations. In differential programming one needs to calculate the derivative of partially-defined smooth functions (that is, smooth functions defined on some open subset of their domain). The categorical structure of the so-called (total) ``forward'' derivative was developed in \cite{journal:BCS:CDC}: this characterized the operation which takes a smooth map $f\colon {\ensuremath{\mathbb R}}\xspace^n \@ifnextchar^ {\t@@}{\t@@^{}} R^m$ and produces the map \[ D[f]\colon R^n \times R^n \@ifnextchar^ {\t@@}{\t@@^{}} R^m \] whose value at a pair $(x,v)$ is $J(f)(x)\cdot v$: the Jacobian of $f$, at $x$, in the direction $v$. The structure of differentials for partially defined smooth functions is described in \cite{journal:diff-rest} which introduced differential {\em restriction\/} categories. In this case, the structure is axiomatized by an operation which still takes a map $f\colon A \@ifnextchar^ {\t@@}{\t@@^{}} B$ and produces a map $D[f]\colon A \times A \@ifnextchar^ {\t@@}{\t@@^{}} B$, but now with additional axioms relating the partiality of $D[f]$ to that of $f$: in particular, one asks that $\rst{D[f]} = \rst{f} \times 1$; that is, the partiality of $D[f]$ is entirely determined by the partiality of $f$. This structure, while being interesting in its own right, is also of key importance in understanding how one can build differential manifolds at this level of generality \cite[Section 6]{journal:TangentCats}. Differentials are tightly linked to fibrations because the derivative, both in the total and the restriction case, can be regarded as a section of the ``simple fibration'' over $\ensuremath{\mathbb X}\xspace$. That is, the category whose objects are objects in a context $\Sigma$, expressed as pairs $(\Sigma,A)$, with a map from $(\Sigma,A)$ to $(\Sigma',B)$ consisting of a pair of maps \[ f\colon \Sigma \@ifnextchar^ {\t@@}{\t@@^{}} \Sigma' \mbox{ and } g\colon \Sigma \times A \@ifnextchar^ {\t@@}{\t@@^{}} B \] where the second component $g$ uses the context. Then given a map $f\colon \Sigma \@ifnextchar^ {\t@@}{\t@@^{}} \Sigma'$ in the base category, the pair $(f,D[f])$ gives a map in the simple slice category, and the functoriality of this operation is precisely the chain rule. In the restriction case, the construction of the ordinary simple fibration does not inherit a restriction structure; however, a restriction structure is inherited when we require pairs $(f,g)$ such that $\rst{g} = \rst{f} \times 1$. This new construction does not provide an ordinary fibration over $\ensuremath{\mathbb X}\xspace$, and there is no way to obtain an ordinary fibration. However, what it does provide is a \emph{latent fibration}, which is, of course, the main subject of this paper. Intriguingly, sections of these latent simple fibrations recapture the additional axioms of differential restriction categories because the equation $\rs{D[f]} = \rs{f} \times 1$ is then forced. There is an important addendum to this story: the backpropogation algorithm, which is widely used in machine learning, is based on computing the \emph{reverse} differential \cite{arxiv:RDC} as computationally this can be much more efficient. The reverse derivative takes a smooth map $f\colon {\ensuremath{\mathbb R}}\xspace^n \@ifnextchar^ {\t@@}{\t@@^{}} R^m$ and produces the map \[ R[f]\colon R^n \times R^m \@ifnextchar^ {\t@@}{\t@@^{}} R^n \] whose value at a pair $(x,v)$ is $J^T(f)(x)\cdot v$, the \emph{transpose} of the Jacobian of $f$, at $x$, in the direction $v$ (note its difference in type from the forward derivative $D[f]\colon R^n \times R^n \@ifnextchar^ {\t@@}{\t@@^{}} R^m$). Understanding the abstract properties of the reverse differential allows one to apply these machine learning techniques to different settings: for example, they have already been used to develop machine-learning algorithms for Boolean circuits \cite{learn-bool}. When we look at the fibrational structure of the reverse derivative operation, it can also be seen as giving a section to a fibration, but instead of being a section of the simple slice over $\ensuremath{\mathbb X}\xspace$, now it is a section of the \emph{fibrational dual\/} of the simple slice over $\ensuremath{\mathbb X}\xspace$. The fibrational dual can be defined for any ordinary fibration by taking the opposite category in each fibre. For the simple fibration, its dual is the category whose objects are pairs $(A,A')$ as before, but now a map from $(A,A')$ to $(B,B')$ consists of a pair of maps \[ f\colon A \@ifnextchar^ {\t@@}{\t@@^{}} B, f^*\colon A \times B' \@ifnextchar^ {\t@@}{\t@@^{}} A'. \] Fibrational duals have also received renewed interest due to their role to the theory of lenses. Lenses were originally developed for database theory \cite{journal:lenses-rosebrugh-johnson} but are now used more widely in learning systems, and elsewhere \cite{hedges2017coherence,spivak2019generalized}. The research in this paper opens the door to the formal study of partial lenses. Thus, not only is it useful for the theory of reverse {\em restriction} differential categories to be able to form the fibrational dual of a latent fibration, but also, more generally, in the theory of partial lenses. However, in general, it is simply not the case that a latent fibration will have a fibrational dual! This is basically because the opposite of a restriction category is not generally a restriction category. Thus, taking the ``opposite in each fibre'' of a latent fibration will not necessarily produce a latent fibration. However, in the particular cases of interest, which use the simple latent fibration \ref{simple-latent-fibration} and the codomain latent fibration \ref{codomain-latent-fibration}, it is clear that it {\em is\/} possible to define a suitable fibrational dual. Thus, one of the goals of this paper is to develop the theory of latent fibrations sufficiently so that the circumstances under which it is possible to define the fibrational dual of a latent fibration is fully understood. Of course, aside from the aforementioned motivations, latent fibrations are of intrinsic mathematical interest in their own right. The definition of a latent fibration (Definition \ref{newdefn}) involves a subtle change of the normal notion of Cartesian map to what, in order to clearly distinguish the notion, we call a {\em prone map\/}. Latent fibrations first appeared in \cite{msc:nester-calgary} and were defined in a more complicated -- albeit equivalent -- manner: see Definition \ref{olddefn} in Appendix \ref{Appendix-A}. It is reasonable to wonder whether these definitions are ad hoc. A theoretically convincing argument that the notion has a solid basis can be found in Appendix \ref{Appendix-B} where it is shown that the notion of latent fibration corresponds precisely to the 2-categorical notion introduced by Street \cite{street_fibration} for the (carefully chosen) 2-category of restriction semi-functors and transformations. While many results about ordinary fibrations are true of latent fibrations, there are some subtle aspects of the theory. For example, while prone arrows always compose and isomorphisms are always prone, one might expect that partial isomorphisms should be prone as these generalize isomorphisms in a partial setting. However, they are not in general. This failure, in fact, leads to the investigation of important additional properties that a latent fibration can satisfy (see Section \ref{sec:types}). The first additional property we study is that of being an {\em admissible\/} latent fibration (see Section \ref{sec:admissible}): in admissible latent fibrations restriction idempotents have prone liftings which are restriction idempotents. An important consequence of being admissible is that one can split the idempotents to obtain an $r$-split latent fibration: these then can be linked to fibrations of partial map categories and ${\sf M}$-categories (see Section \ref{sec:fibrations-of-partial}). The next additional property we consider is that of being {\em separated\/} (see Section \ref{sec:separated}), which is the requirement that the projection functor separates restriction idempotents. This turns out to be equivalent to asking that all restriction idempotents in the total category be prone. When both these conditions hold the latent fibration is a {\em hyperfibration} (see Section \ref{sec:connected}), and this is in turn equivalent to asking that the projection functor of the latent fibration be a hyperconnection (\cite[pg. 39]{journal:rcats-enriched}). We provide separating examples for each of these additional requirements, and develop their properties: in particular, we show that latent hyperfibrations have fibrational duals. After developing the basic theory, we turn to obtaining an explicit description of latent fibrations as categories of partial maps (as ${\sf M}$-categories), Section \ref{sec:fibrations-of-partial}. Indeed, we completely characterize the $r$-split latent fibrations of partial map categories, showing that they are equivalent to giving a fibration between the total categories which is ``$M$-plentiful'' -- this is a requirement, that $M$-maps lift in an appropriate manner. Finally, in the last section, Section \ref{sec:dual}, we describe how to define the fibrational dual of a latent hyperfibration. The authors are grateful for Bob Rosebrugh's many contributions to the category theory research community, through his interesting and enlightening talks and papers, his many years of service with TAC, and, on a personal level, his discussions with each of us. We hope that the present paper may serve as a continuation of his work on lenses and fibrations (e.g., \cite{journal:lenses-rosebrugh-johnson}) into settings which involve partial maps. \section{Restriction Categories} In this section we give a brief introduction to restriction categories concentrating on some of the less well-known aspects that are relevant to this paper. For further details see \cite{journal:rcats1, cockett_lack_2007}. \subsection{Restriction categories} A {\bf restriction category} (see \cite{journal:rcats1} for details) is a category equipped with a {\bf restriction} combinator which given a map $f\colon A \@ifnextchar^ {\t@@}{\t@@^{}} B$, returns an endomorphism on the domain $\rst{f}\colon A \@ifnextchar^ {\t@@}{\t@@^{}} A$ which satisfies just four identities\footnote{Composition is written in diagrammatic order in this paper.}: \[ \mbox{[R.1]}~\rst{f}f = f ~~~~ \mbox{[R.2]}~\rst{f}~\rst{g} = \rst{g}~\rst{f} ~~~~\mbox{[R.3]}~\rst{f}~\rst{g} = \rst{\rst{f} g} ~~~~ \mbox{[R.4]}~f \rst{g} = \rst{fg}f \] The prototypical restriction category is the category of sets and partial maps, ${\sf Par}$. The restriction of a partial map in ${\sf Par}$ is the partial identity on the domain which is defined precisely when the partial map is defined. In any restriction category $\rst{f}$ is always an idempotent and any idempotent $e = ee$ with $e=\rst{e}$ is called a {\bf restriction idempotent}. It is not the case that every idempotent need be a restriction idempotent. The restriction idempotents on an object $A$ form a meet semi-lattice, with the meet given by composition, which we denote by ${\cal O}(A)$: the elements of ${\cal O}(A)$ may be regarded as distinguished predicates on the object $A$. Restriction categories are always full subcategories of partial map categories (see \cite[Proposition 3.3]{journal:rcats1}) and this means that parallel maps in a restriction category can be partially ordered: the partial order is defined using the restriction by $f \leq g$ if and only if $\rst{f}g = f$ and is called the {\bf restriction order}, and in fact gives an enrichment. A map $f\colon A \@ifnextchar^ {\t@@}{\t@@^{}} B$ in a restriction category $\ensuremath{\mathbb X}\xspace$ is said to be {\bf total} in case $\rst{f} = 1_A$. Total maps compose and include identities and, thus, form a (non-full) subcategory denoted ${\sf Total}(\ensuremath{\mathbb X}\xspace)$. Any category can be endowed with a trivial restriction which takes each map to the identity on its domain: thus, every category occurs as the total maps of some restriction category. A map $s\colon A \@ifnextchar^ {\t@@}{\t@@^{}} B$ in a restriction category is a {\bf partial isomorphism} if there is a map $s^{(-1)}\colon B \@ifnextchar^ {\t@@}{\t@@^{}} A$ -- the {\bf partial inverse} of $s$ -- with $s s^{(-1)} = \rst{s}$ and $s^{(-1)}s = \rst{s^{(-1)}}$. The partial inverse of a map is unique. Partial isomorphism include all the restriction idempotents and are closed to composition. A restriction category in which all the maps are partial isomorphisms is called an {\bf inverse category\/}. Inverse categories are to restriction categories what groupoids are to ordinary categories. \subsection{\texorpdfstring{${\sf M}$}{M}-categories and \texorpdfstring{$r$}{r}-split restriction categories} A restriction category is {\bf $r$-split} if all its restriction idempotents split. Given an arbitrary restriction category, $\ensuremath{\mathbb X}\xspace$, one may always split its restriction idempotents to obtain ${\sf Split}_r(\ensuremath{\mathbb X}\xspace)$, an $r$-split restriction category. The 2-category of $r$-split restriction categories, restriction functors, and total natural transformations is 2-equivalent to the 2-category of ${\sf M}$-categories \cite{journal:rcats1}. ${\sf M}$-categories are categories with a system of monics which is closed to composition and pullbacks along any map. Functors between ${\sf M}$-categories must not only preserve the ${\sf M}$-maps but also the pullbacks along ${\sf M}$-maps. Natural transformations between ${\sf M}$-functors are natural transformations which, in addition, are Cartesian (or tight) for transformations between ${\sf M}$-maps -- that is the naturality squares for ${\sf M}$-maps are pullbacks. The 2-equivalence is given on the one hand, by moving from an ${\sf M}$-category $(\ensuremath{\mathbb X}\xspace,{\sf M}_\ensuremath{\mathbb X}\xspace)$ to its partial map category ${\sf Par}(\ensuremath{\mathbb X}\xspace,{\sf M}_\ensuremath{\mathbb X}\xspace)$ and on the other hand by moving to the ${\sf M}$-category consisting of the total map category of the $r$-split restriction category, ${\sf Total}(\ensuremath{\mathbb E}\xspace)$, with the restriction monics, ${\sf Monic}(\ensuremath{\mathbb E}\xspace)$, $({\sf Total}(\ensuremath{\mathbb E}\xspace),{\sf Monic}(\ensuremath{\mathbb E}\xspace))$. The restriction monics can be variously described as partial isomorphisms which are total, restriction sections, or, more interestingly, as left adjoints with respect to the partial order enrichment. \subsection{Restriction (semi)functors and transformations} There are various sorts of morphisms between restriction categories which can be considered: the most basic is that of a {\bf restriction functor}, $F\colon \ensuremath{\mathbb X}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb Y}\xspace$, which is a functor between the categories which in addition preserves the restriction structure, that is $F(\rst{f}) = \rst{F(f)}$. Restriction functors so defined preserve total maps, restriction idempotents, and partial isomorphisms. As we shall see, the re-indexing or substitution functors for a latent fibration generally only satisfy the conditions for the slightly weaker notion of a restriction \emph{semi}functor; thus, it will be important to briefly review these and the related notion of a transformation between restriction semifunctors. \begin{definition} Let $\ensuremath{\mathbb X}\xspace$ and $\ensuremath{\mathbb Y}\xspace$ be restriction categories. \begin{itemize} \item A \textbf{restriction semifunctor} $F$ from $\ensuremath{\mathbb X}\xspace$ to $\ensuremath{\mathbb Y}\xspace$ is a semifunctor from $\ensuremath{\mathbb X}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb Y}\xspace$ (that is, a map on objects and arrows which preserves domains, codomains, and composition, but not necessarily identities) which preserves restrictions. Note that for a restriction semifunctor, while $F(1_X)$ is not the identity, it is still a restriction idempotent since \[ \rs{F(1_X)} = F(\rs{1_X}) = F(1_X). \] \item If $F,G\colon \ensuremath{\mathbb X}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb Y}\xspace$ are restriction semifunctors, a \textbf{restriction transformation} $\alpha\colon F \Rightarrow G$ is a natural transformation from $F$ to $G$ such that for each $X \in \ensuremath{\mathbb X}\xspace$, $\rs{\alpha_X} = F(1_X)$. \end{itemize} \end{definition} Restriction functors and transformations organize themselves into a 2-category. Similarly, restriction semifunctors and their transformations organize themselves into a 2-category, which we will denote ${\sf SRest}$. \subsection{Precise diagrams} In a restriction category, we shall call a commuting diagram {\em precise} in case the restriction of the overall map that the diagram describes is equal to the restriction of all the maps leaving the start node. Precise commuting triangles play a key role in the definition of latent fibrations. \begin{definition}\label{defn:precise} In a restriction category, a commuting triangle \[ \xymatrix{ A \ar[dr]^g \ar[d]_k \\ B \ar[r]_f & C} \] is {\bf precise} in case $\rst{g}= \rst{k}$. We refer to $k$ as the {\bf left factor} (and $f$ the right factor) of the triangle. \end{definition} The following are some useful observations on precise triangles: \begin{lemma} \label{Jaws} In any restriction category: \begin{enumerate}[(i)] \item A commuting triangle \[ \xymatrix{ A \ar[dr]^g \ar[d]_k \\ B \ar[r]_f & C} \] is precise if and only if $k\rst{f} = k$. \item A commuting triangle with right factor a restriction idempotent \[ \xymatrix{ A \ar[dr]^g \ar[d]_k \\ B \ar[r]_{e = \rst{e}} & C} \] is precise if and only if $g=k$. \item A commuting triangle with right factor a partial isomorphism \[ \xymatrix{ A \ar[dr]^g \ar[d]_k \\ B \ar[r]_{\alpha} & C} \] is precise if and only if $k = g \alpha^{(-1)}$. \item The commuting triangle \[ \xymatrix{A \ar[d]_{\rst{f}} \ar[dr]^f \\ A \ar[r]_f& B} \] is precise. \end{enumerate} \end{lemma} \begin{proof}~ \begin{enumerate}[{\em (i)}] \item We have the calculations: \begin{description} \item[($\Rightarrow$)] If $\rst{g} = \rst{k}$ then as $g=kf$ we have $k\rst{f} = \rst{kf} k = \rst{g}k = \rst{k}k = k$. \item[($\Leftarrow$)] If $k\rst{f} = k$ then $\rst{g} = \rst{k f} = \rst{k\rst{f}} = \rst{k}$. \end{description} \item We shall use {\em (i)} above: when the triangle is precise we have $g = ke = k \rst{e} = k$. Conversely, if $k=g$ then $k\rst{e} = ke = k$ so the triangle is precise. \item Again we use {\em (i)} above: when the triangle is precise we have $g \alpha^{(-1)} = k \alpha \alpha^{(-1)} = k \rst{\alpha} = k$. Conversely, if $k = g \alpha^{(-1)}$ we have \[ k \rst{\alpha} = \rst{k \alpha}k = \rst{g \alpha^{(-1)} \alpha}k = \rst{g \rst{\alpha^{(-1)}}}k = \rst{g \alpha^{(-1)}}k = \rst{k}k = k \] showing the triangle is precise. \item $\rst{f}f= f$ is precise with left factor $\rst{f}$ because $\rst{\rst{f}} = \rst{f}$. \end{enumerate} \end{proof} \subsection{Cartesian restriction categories} Cartesian restriction categories are described in \cite{cockett_lack_2007}. They are restriction categories with a restriction terminal object and restriction products. A {\bf restriction terminal object} in a restriction category, $\ensuremath{\mathbb X}\xspace$, is an object $1 \in \ensuremath{\mathbb X}\xspace$ such that for every object $X \in \ensuremath{\mathbb X}\xspace$ there is a unique total map $!_X\colon X \@ifnextchar^ {\t@@}{\t@@^{}} 1$ such that every map $f\colon X \@ifnextchar^ {\t@@}{\t@@^{}} 1$ has $f = \rst{f}!_X$. Given two objects $X,Y \in \ensuremath{\mathbb X}\xspace$ a {\bf restriction product} of $X$ and $Y$ is an object $X \times Y$ together with two projections $\pi_0\colon X \times Y \@ifnextchar^ {\t@@}{\t@@^{}} X$ and $\pi_1\colon X \times Y \@ifnextchar^ {\t@@}{\t@@^{}} Y$ which are total and are such that given any other object $Z$ with two maps $a\colon Z \@ifnextchar^ {\t@@}{\t@@^{}} X$ and $b\colon Z \@ifnextchar^ {\t@@}{\t@@^{}} Y$ there is a unique map $\< a,b\>\colon Z \@ifnextchar^ {\t@@}{\t@@^{}} X \times Y$ such that $\< a,b \>\pi_0 = \rst{b}a$ and $\< a,b\>\pi_1 = \rst{a}b$. \subsection{Latent pullbacks} The notion of a latent pullback in a restriction category was introduced in \cite[pg. 459]{journal:guo-range-join}; here we give a slight modification of the original definition: \begin{definition} A commuting square in a restriction category \[ \xymatrix{A' \ar[d]_a \ar[r]^{f'} & B' \ar[d]^{b} \\ A \ar[r]_f & B} \] is a {\bf latent pullback} in case $f'\rst{b} = f'$ and $a \rst{f} = a$ (that is, it is precise) and given any $e$-commuting square (where $e=\rst{e}$ is a restriction idempotent) \[ \xymatrix{X \ar[d]_{x_1} \ar[r]^{x_0} \ar@{}[dr]|{=_e} & B' \ar[d]^{b} \\ A \ar[r]_f & B} \] that is $e x_1 f = e x_0 b$, where $e \leq \rst{x_1f}$ and $e \leq \rst{x_0b}$ there is a unique map $k\colon X \@ifnextchar^ {\t@@}{\t@@^{}} A'$ \[ \xymatrix{X \ar[drr]^{x_0}_{\leq} \ar[ddr]_{x_1}^{\geq} \ar[dr]|k \\ & A' \ar[d]_a \ar[r]_{f'} & B' \ar[d]^{b} \\ & A \ar[r]_f & B} \] such that $\rst{k} = e$, $kf' \leq x_0$, $ka \leq x_1$, and $k \rst{f'} = k \rst{a} = k$. \end{definition} The original definition asked that for any (ordinary) commuting square $x_0b = x_1f$ there was a unique $k$ with the same requirements above, except that $\rs{k} = \rs{x_1f} = \rs{x_0b}$ instead of $\rs{k} = \rs{e}$. It is readily seen that the two definitions are equivalent, as if $e$ is a restriction idempotent on $X$ such that $e \leq \rs{x_1f}$ and $e \leq \rs{x_0b}$, then $\rs{e} = \rs{ex_1f} = \rs{ex_0b}$. We recall some basic properties of latent pullbacks: \begin{lemma} \label{latent-pullbacks}~ \begin{enumerate}[(i)] \item The two commuting squares \[ \begin{matrix}\xymatrix{A' \ar[d]_a \ar[r]^{f'} & B' \ar[d]^{b} \\ A \ar[r]_f & B} \end{matrix} ~~~\mbox{and}~~~ \begin{matrix}\xymatrix{Z \ar[d]_z \ar[r]^{y} & B' \ar[d]^{b} \\ A \ar[r]_f & B} \end{matrix} \] are latent pullbacks if and only if there is a unique mediating partial isomorphism $\alpha\colon A' \@ifnextchar^ {\t@@}{\t@@^{}} Z$ with $\alpha z = a$ , $\alpha y = f'$, $\rst{\alpha} =\rst{a}= \rst{f'}$, and $\rst{\alpha^{(-1)}} = \rst{z} = \rst{y}$ and one of the squares is a latent pullback. \item If the two smaller squares below are latent pullbacks \[ \xymatrix{A' \ar[d]_a \ar[r]^{f'} & B' \ar[d]_b \ar[r]^{g'} & C' \ar[d]_c \\ A \ar[r]_f & B \ar[r]_g & C} \] then the outer square is a latent pullback. Furthermore, if the right square is a latent pullback and the perimeter is a latent pullback then the left square is a latent pullback. \item If $\alpha\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ is a partial isomorphism and $f\colon Z \@ifnextchar^ {\t@@}{\t@@^{}} Y$ is any map, then \[ \xymatrix{ Z \ar[r]^{\rst{f\alpha^{(-1)}}} \ar[d]_{f\alpha^{(-1)}} & Z \ar[d]^{f} \\ X \ar[r]_{\alpha} & Y} \] is a latent pullback. \item If the latent pullback square \[ \xymatrix{A' \ar[d]_a \ar[r]^{f'} & B' \ar[d]^{b} \\ A \ar[r]_f & B} \] has $f$ a partial isomorphism then $f'$ is a partial isomorphism. \end{enumerate} \end{lemma} The last part of the lemma follows by combining parts (i) and (iii). \section{Latent Fibrations}\label{sec:latentFibrations} We begin with the definition of a latent fibration before looking at examples and developing some of their basic properties. \subsection{The definition} \begin{definition} \label{newdefn} Let $\ensuremath{\mathbb E}\xspace$ and $\ensuremath{\mathbb B}\xspace$ be restriction categories and ${\sf p} \colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ a restriction semifunctor. \begin{enumerate}[(i)] \item An arrow $f' \colon X' \@ifnextchar^ {\t@@}{\t@@^{}} X$ in $\ensuremath{\mathbb E}\xspace$ is {\bf ${\sf p}$-prone} in case whenever we have $g \colon Y \@ifnextchar^ {\t@@}{\t@@^{}} X$ in $\ensuremath{\mathbb E}\xspace$ and $h \colon {\sf p}(Y) \@ifnextchar^ {\t@@}{\t@@^{}} {\sf p}(X')$ in $\ensuremath{\mathbb X}\xspace$ such that $h{\sf p}(f') = {\sf p}(g)$ is a precise triangle (Definition \ref{defn:precise}) then there is a unique {\bf lifting} of $h$ to $\widetilde{h}\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} X'$ so that $\ensuremath{\mathsf{p}}(\widetilde{h}) = h$ and $\widetilde{h}f' = g$ is a precise triangle: \[ \xymatrix{Y \ar@{..>}[d]_{\widetilde{h}} \ar[dr]^{g} &~ \ar@{}[drr]|{\textstyle\mapsto}&& \ensuremath{\mathsf{p}}(Y) \ar[d]_h \ar[dr]^{\ensuremath{\mathsf{p}}(g)} & ~ \\ X' \ar[r]_{f'} & X && \ensuremath{\mathsf{p}}(X') \ar[r]_{\ensuremath{\mathsf{p}}(f')} & \ensuremath{\mathsf{p}}(X)} \] \item ${\sf p} \colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a {\bf latent fibration} if for each $X \in \ensuremath{\mathbb E}\xspace$ and each $f \colon A \@ifnextchar^ {\t@@}{\t@@^{}} {\sf p}(X)$ in $\ensuremath{\mathbb B}\xspace$ such that $f = f\ensuremath{\mathsf{p}}(1_X)$, there is a ${\sf p}$-prone map $f'\colon X' \@ifnextchar^ {\t@@}{\t@@^{}} X$ sitting over $f$ (that is, ${\sf p}(f') = f$). \end{enumerate} \end{definition} Note that if $\ensuremath{\mathsf{p}}$ is a restriction functor (so that $\ensuremath{\mathsf{p}}$ preserves identities) then the condition $f = f\ensuremath{\mathsf{p}}(1_X)$ is vacuous. However, if $\ensuremath{\mathsf{p}}$ is a genuine semifunctor, then that condition is necessary, as if there is an $f'\colon X' \@ifnextchar^ {\t@@}{\t@@^{}} X$ sitting over $f$, then since $f' = f'1_X$, $\ensuremath{\mathsf{p}}(f') = \ensuremath{\mathsf{p}}(f')\ensuremath{\mathsf{p}}(1_X)$, i.e., $f$ must satisfy $f = f\ensuremath{\mathsf{p}}(1_X)$. Most of our examples of latent fibrations will be restriction functors; however, there is at least one important example of a latent fibration which is a genuine semifunctor (the forgetful functor from the restriction idempotent splitting of $\ensuremath{\mathbb X}\xspace$ to itself: see Proposition \ref{prop:splitting_example}). More importantly, however, the main reason we have chosen to work with restriction semifunctors is that latent fibrations are precisely fibrations in the 2-category ${\sf SRest}$ of restriction categories, semifunctors, and restriction transformations: see Appendix \ref{Appendix-B}. \subsection{Examples of latent fibrations} It is clear that, for any restriction category $\ensuremath{\mathbb X}\xspace$, the identity functor is obviously a latent fibration. Furthermore, for any restriction categories $\ensuremath{\mathbb X}\xspace$ and $\ensuremath{\mathbb Y}\xspace$, the projection $\pi_1\colon \ensuremath{\mathbb X}\xspace \times \ensuremath{\mathbb Y}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb Y}\xspace$ is a latent fibration. Moreovr, there are a variety of other examples of latent fibrations: some are immediately seen to be restriction versions of a normal fibration couterpart; others are particular, however, to restriction categories. We present an overview of some of the basic examples before going into a more detailed description of them. \begin{enumerate}[\bf ({3.2.}1)] \item For any restriction category $\ensuremath{\mathbb X}\xspace$, the identity functor $1_{\ensuremath{\mathbb X}\xspace}\colon \ensuremath{\mathbb X}\xspace\@ifnextchar^ {\t@@}{\t@@^{}}\ensuremath{\mathbb X}\xspace$ and projections $\pi_1: \ensuremath{\mathbb Y}\xspace \times \ensuremath{\mathbb X}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ are a latent fibrations. \item If $\ensuremath{\mathbb X}\xspace$ is a Cartesian restriction category, there are two latent versions of the simple slice fibration: the ``lax'' simple slice $\ensuremath{\mathbb X}\xspace(\ensuremath{\mathbb X}\xspace)$ and the ``strict'' simple slice $\ensuremath{\mathbb X}\xspace[\ensuremath{\mathbb X}\xspace]$ considered in the previous section. See Definition \ref{def:simpleSlice} and Proposition \ref{prop:simpleSlice}. \item If $\ensuremath{\mathbb X}\xspace$ is a restriction category with latent pullbacks, there are strict and lax versions of the codomain fibration: see Definition \ref{def:codomain} and Proposition \ref{prop:codomain}. \item For any functor $F\colon \ensuremath{\mathbb X}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} {\sf Set}$ one may form the category of elements as a (normal) discrete fibration $\partial_F\colon {\sf Elt}(F) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$. If $\ensuremath{\mathbb X}\xspace$ is a restriction category then ${\sf Elt}(F)$ is also a restriction category and $\partial_F$ is a latent fibration: see Section \ref{discrete}. \item For any restriction category $\ensuremath{\mathbb X}\xspace$, there is a latent fibration of ``propositions'' (restriction idempotents) over $\ensuremath{\mathbb X}\xspace$: see Definition \ref{def:propositions} and Proposition \ref{prop:propositions}. \item For any restriction functor $F\colon \ensuremath{\mathbb A}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$, there is a latent fibration of ``assemblies'', denoted ${\sf Asm}(F)$: see Definition \ref{def:assemblies} and \ref{prop:assemblies}. \item For any restriction category $\ensuremath{\mathbb X}\xspace$, the forgetful functor from the restriction idempotent splitting of $\ensuremath{\mathbb X}\xspace$ to itself, $\split(\ensuremath{\mathbb X}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ is a latent fibration which is a genuine semifunctor: see Proposition \ref{prop:splitting_example}. \end{enumerate} \subsubsection{Identity and projection functors} Clearly identity functors and projections are latent fibrations. The prone map over a map for the identity function is just itself. For a projection $\pi_1: \ensuremath{\mathbb Y}\xspace \times \ensuremath{\mathbb X}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ the prone map over a map $f: X \@ifnextchar^ {\t@@}{\t@@^{}} X'$ at $Y$ is $1 \times f: Y \times X \@ifnextchar^ {\t@@}{\t@@^{}} Y \times X'$. \subsubsection{Simple latent fibrations} \label{simple-latent-fibration} We consider simple fibrations for Cartesian restriction categories: \begin{definition}\label{def:simpleSlice} If $\ensuremath{\mathbb X}\xspace$ is a Cartesian restriction category, the total category of the \textbf{lax simple fibration}, $\ensuremath{\mathbb X}\xspace(\ensuremath{\mathbb X}\xspace)$, is described as follows: \begin{description} \item[Objects:] Pairs of objects, $(\Sigma,X)$, of $\ensuremath{\mathbb X}\xspace$, where $\Sigma$ is called the ``context''; \item[Maps:] $(f,f')\colon (\Sigma,X) \@ifnextchar^ {\t@@}{\t@@^{}} (\Sigma',X')$ where $f\colon \Sigma \@ifnextchar^ {\t@@}{\t@@^{}} \Sigma'$ and $f'\colon \Sigma \times X \@ifnextchar^ {\t@@}{\t@@^{}} X'$ are maps in $\ensuremath{\mathbb X}\xspace$ such that $\rst{\pi_0 f} \geq \rst{f'}$; \item[Composition:] Identities are $(1_\Sigma, \pi_1)\colon (\Sigma,X) \@ifnextchar^ {\t@@}{\t@@^{}} (\Sigma,X)$ and given $(f,f')\colon (\Sigma,X) \@ifnextchar^ {\t@@}{\t@@^{}} (\Sigma',X')$ and $(g,g')\colon (\Sigma',X') \@ifnextchar^ {\t@@}{\t@@^{}} (\Sigma'',X'')$ the composite is the map $(f,f')(g,g') := (fg,\<\pi_0f,f'\> g')$; \item[Restriction:] $\rst{(f,f')} := (\rst{f},\rst{f'}\pi_1)$. \end{description} The \textbf{strict simple fibration} is the subcategory $\ensuremath{\mathbb X}\xspace[\ensuremath{\mathbb X}\xspace]$ determined by the maps $(f,f')$ for which $\rst{\pi_0f} = \rst{f'}$. \end{definition} The strict simple fibration was discussed in the introduction with respect to the differential of smooth partial maps as this differential may be seen as a section of the strict simple fibration. \begin{proposition}\label{prop:simpleSlice} If $\ensuremath{\mathbb X}\xspace$ is a Cartesian restriction category, then $\ensuremath{\mathbb X}\xspace(\ensuremath{\mathbb X}\xspace)$ and $\ensuremath{\mathbb X}\xspace[\ensuremath{\mathbb X}\xspace]$ are Cartesian restriction categories, and the obvious projections to $\ensuremath{\mathbb X}\xspace$ are latent fibrations. \end{proposition} \begin{proof} For the identity to be well-defined we need $\rst{\pi_0 1} \geq \rst{\pi_1}$, which is obviously true as both sides are restrictions of total maps. To show composition is well defined consider $(f,f')(g,g') = (fg,\<\pi_0f,f'\>g')$: we must show $\rst{\pi_0fg} \geq \rst{\<\pi_0f,f'\>g'}$ which is so by: \[ \rst{\<\pi_0f,f'\>g'} \leq \rst{\<\pi_0f,f'\>\pi_0 g} = \rst{\rst{f'}\pi_0fg} = \rst{\rst{f'}\rst{\pi_0f}\pi_0fg} = \rst{\rst{\pi_0f}\pi_0fg} = \rst{\pi_0fg}. \] For the restriction product structure, set $(\Sigma,X) \times (\Sigma',X') := (\Sigma \times \Sigma',X \times X')$, \[ \pi_0:= (\pi_0, \pi_1\pi_0)\colon (\Sigma,X) \times (\Sigma',X') \@ifnextchar^ {\t@@}{\t@@^{}} (\Sigma,X), \pi_1:= (\pi_1, \pi_1\pi_1)\colon (\Sigma,X) \times (\Sigma',X') \@ifnextchar^ {\t@@}{\t@@^{}} (\Sigma',X') \] and \[ \< (f,f'),(g,g') \> := (\<f,g\>, \<\<\pi_0\pi_0f,\pi_1\pi_0\>f',\<\pi_0\pi_1g,\pi_1\pi_1\>g'\>). \] We leave the remaining details of checking this is a Cartesian restriction category to the reader. There is an obvious restriction functor \[ \pi\colon \ensuremath{\mathbb X}\xspace(\ensuremath{\mathbb X}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace; \quad \begin{matrix} \xymatrix{ (\Sigma, X) \ar[d]_{(f,f')}\ar@{}[dr]|{\mapsto} & \Sigma \ar[d]^{f} \\ (\Sigma',X') & \Sigma' } \end{matrix} \] We set the prone arrow of $f\colon\Sigma \@ifnextchar^ {\t@@}{\t@@^{}} \Sigma'$ at $(\Sigma',X)$ to be $(f,\rst{\pi_0 f}\pi_1)\colon (\Sigma,X) \@ifnextchar^ {\t@@}{\t@@^{}} (\Sigma',X)$; the lifting property is given by \[ \xymatrix@C=3em{(\Gamma,Y) \ar@{..>}[d]_{\widetilde{h} := (h,g')} \ar[dr]^{(g,g')} & ~ \ar@{}[drr]|{\textstyle\mapsto}&& \Gamma \ar[dr]^g \ar[d]_{h} \\ (\Sigma,X) \ar[r]_{(f,\rst{\pi_0f}\pi_1)} & (\Sigma',X) && \Sigma \ar[r]_f & \Sigma'} \] The lifting $\widetilde{h}$ is well-defined as $\rst{g'} \leq \rst{\pi_0 g} = \rst{\pi_0 h}$ since the base triangle is precise. The top triangle commutes since \begin{eqnarray*} & & (h,g') (f,\rst{\pi_0f} \pi_1) \\ & = & (hf, \<\pi_0h, g'\>\rs{\pi_0 f} \pi_1) \\ & = & (g,\rs{\<\pi_0h, g'\>\pi_0f} \<\pi_0h,g'\>\pi_1) \\ & = & (g,\rs{\rs{g'}\pi_0 hf} \rs{\pi_0 h} g') \\ & = & (g,\rs{g'}\rs{\pi_0g}\rs{\pi_0h} g') \\ & = & (g,g') \mbox{ (since $\rs{g'} \leq \rs{\pi_0g} = \rs{\pi_0h}$)} \end{eqnarray*} The top triangle is precise since the bottom triangle is. The uniquess of $\widetilde{h}$ follows from a similar calculation to showing the top triangle commutes. Thus, the lax slice $\ensuremath{\mathbb X}\xspace(\ensuremath{\mathbb X}\xspace)$ is a latent fibration, and the result for the strict slice $\ensuremath{\mathbb X}\xspace[\ensuremath{\mathbb X}\xspace]$ follows similarly. \end{proof} \subsubsection{Codomain latent fibrations} \label{codomain-latent-fibration} When $\ensuremath{\mathbb X}\xspace$ has latent pullbacks, restriction variants of the codomain fibration form another source of examples. In fact, just as in the ordinary case (for example, see \cite{book:Jacobs-Cat-Log}), a restriction category has latent pullbacks if and only if the underlying functor from the (strict) arrow category is a latent fibration. \begin{definition}\label{def:codomain} For any restriction category $\ensuremath{\mathbb X}\xspace$, the restriction category $\ensuremath{\mathbb X}\xspace^{\leadsto}$ is defined as follows: \begin{description} \item[Objects:] are maps $a\colon A' \@ifnextchar^ {\t@@}{\t@@^{}} A$ of $\ensuremath{\mathbb X}\xspace$; \item[Maps:] are pairs of maps $(f,f')\colon a \@ifnextchar^ {\t@@}{\t@@^{}} b$ such that $f'b \leq af$ and $f' \rst{b} = f'$: \[ \xymatrix{A' \ar@{}[dr]|{\geq} \ar[d]_{a} \ar[r]^{f'} & B' \ar[d]^b \\ A \ar[r]_f & B} \] we will refer to such lax squares as being {\bf semi-precise}; \item[Composition:] if $(f,f')\colon a \@ifnextchar^ {\t@@}{\t@@^{}} b$ and $(g,g')\colon b \@ifnextchar^ {\t@@}{\t@@^{}} c$ then the composite is $(ff',gg')\colon a \@ifnextchar^ {\t@@}{\t@@^{}} c$; \item[Restriction:] the restriction of $(f,f')\colon a \@ifnextchar^ {\t@@}{\t@@^{}} b$ is $(\rst{f},\rst{f'})\colon a \@ifnextchar^ {\t@@}{\t@@^{}} a$. \end{description} The restriction category $\ensuremath{\mathbb X}\xspace^{\rightarrow}$ has the same definition except that the maps require the squares commute (and are still semi-precise). Thus there is an embedding $\ensuremath{\mathbb X}\xspace^{\rightarrow} \subseteq \ensuremath{\mathbb X}\xspace^{\leadsto}$ which is the identity on objects but, in general, a strict inclusion on maps. \end{definition} We define the functor $\partial^{\leadsto}$ by \[ \partial^{\leadsto} \colon\ensuremath{\mathbb X}\xspace^{\leadsto} \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace; \quad\begin{matrix} \xymatrix{a \ar[d]_{(f,f')}\ar@{}[dr]|{\mapsto} & A \ar[d]^f \\ b & B} \end{matrix} \] it is clearly a restriction functor. The restriction functor $\partial^{\rightarrow}$ is defined similarly. \begin{proposition}\label{prop:codomain} For any restriction category, $\ensuremath{\mathbb X}\xspace$, $\ensuremath{\mathbb X}\xspace^{\leadsto}$ and $\ensuremath{\mathbb X}\xspace^{\rightarrow}$ are restriction categories, and the restriction functors, $\partial\colon \ensuremath{\mathbb X}\xspace^{\leadsto} \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ and $\partial\colon \ensuremath{\mathbb X}\xspace^{\rightarrow} \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ are latent fibrations precisely when $\ensuremath{\mathbb X}\xspace$ has latent pullbacks. \end{proposition} \begin{proof} In $\ensuremath{\mathbb X}\xspace^{\leadsto}$, composites are well-defined as \[ afg \geq f'bg \geq f'g'c~~~\mbox{and}~~~ f'g' \rst{c} = f'(g' \rst{c}) = f'g' \] and restrictions are well-defined as \[ a \rst{f} = \rst{af} a \geq \rst{f'b} a = \rst{f'\rst{b}} a = \rst{f'} a ~~~\mbox{and}~~~ \rst{f'}~\rst{a} = \rst{f'\rst{b}} ~\rst{a} = \rst{f'b} ~\rst{a} = \rst{f'b}~\rst{af}~\rst{a} = \rst{f'b}~\rst{af} = \rst{f'b} = \rst{f'} \] Similarly composites and restrictions are well-defined in $\ensuremath{\mathbb X}\xspace^{\rightarrow}$. First we show that, when $\ensuremath{\mathbb X}\xspace$ has latent pullbacks, we have $\partial$-prone arrows, making it a latent fibration. Latent pullback gives a prone arrow in $\ensuremath{\mathbb X}\xspace^{\leadsto}$ by considering \[ \xymatrix{C' \ar[dd]_c \ar[drr]^{g'} \ar@{..>}[dr]_{\widetilde{w}} \\ & P\ar[dd]^{a'} \ar[r]_{f'} & A ' \ar[dd]^{a} \\ C \ar@{}[ur]|\geq \ar[rrd]^<<<<g|\hole \ar[dr]_w \\ & B \ar[r]_f & A} \] where the back face is a lax semi-precise square (so $cg \geq g'a$), the bottom triangle is precise (so $\rst{w} = \rst{g}$ and it commutes) and the square with apex $P$ is a latent pullback. The back face $\rst{g'a} = \rst{g'}$-commutes so there is a unique $\widetilde{w}$ with $\rst{\widetilde{w}} = \rst{g'}$, $\widetilde{w}f' \leq g'$, $\widetilde{w}a' \leq cw$. The top triangle is now clearly precise as $\widetilde{w}f' = g'$ since $\rst{\widetilde{w}f' }= \rst{\widetilde{w}}= \rst{g'}$. Finally, we must show the left square is semi-precise: \[ \widetilde{w} \rst{a'} = \rst{\widetilde{w} a'} \widetilde{w} = \rst{\widetilde{w} a' \rst{f}} \widetilde{w} = \rst{\widetilde{w} a' f} \widetilde{w} = \rst{\widetilde{w} f' a} \widetilde{w} = \rst{\widetilde{w} f' \rst{a}} \widetilde{w} = \rst{\widetilde{w} f'} \widetilde{w} = \rst{\widetilde{w} \rst{f'}} \widetilde{w} = \rst{\widetilde{w}} \widetilde{w} = \widetilde{w}. \] This shows that latent pullbacks are prone arrows; thus, if $\ensuremath{\mathbb X}\xspace$ has latent pullbacks $\partial\colon \ensuremath{\mathbb X}\xspace^{\leadsto} \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ is a latent fibration. We need to show that $\ensuremath{\mathbb X}\xspace^{\rightarrow}$ also has prone arrows given by the latent pullback. For this we need to show that if the back square is an equality the front must also be an equality. This is given by the following calculation: \[ \rst{\widehat{w} a'} = \rst{\widehat{w}} = \rst{ g'} = \rst{g'a} = \rst{cg} = \rst{c\rst{g}} = \rst{c \rst{w}} = \rst{cw} \] Conversely if $\partial$ is a latent fibration then we have a prone arrow above and $B \@ifnextchar^ {\t@@}{\t@@^{}}^f A$ and this at least is a lax semi-precise square. However, by restricting $b$ by $\rst{f'}= \rst{f'a}$ we can turn it into a precise square. Next suppose we have an $e$-commuting square with apex $Z$ then modifying the top arrow to $ez_0$ gives a lax semi-precise square: \[ \xymatrix{ Z \ar[ddr]_{z_1}^{\geq} \ar[drr]^{ez_0} \ar@{..>}[dr]|{k} \\ & X\ar[d]^{f} \ar[r]_{\rst{fe}} & X \ar[d]^{f} \\ & Y \ar[r]_e & Y} \] This implies there is a $k\colon Z \@ifnextchar^ {\t@@}{\t@@^{}} P$ with the top triangle precise and the left face a lax semi-precise square. However, $k$ clearly now provides the mediating map to make the modified prone square a latent pullback. \end{proof} If $\ensuremath{\mathbb X}\xspace$ has latent pullbacks, it is not hard to see that the (strict/lax) simple latent fibration can be embedded into the (strict/lax) codomain latent fibration in a manner analogous to that for ordinary fibrations: \[ \xymatrix{\ensuremath{\mathbb X}\xspace(\ensuremath{\mathbb X}\xspace) \ar[d]_\pi\ar@{}[dr]|{\rightarrow} & \ensuremath{\mathbb X}\xspace^{\leadsto} \ar[d]^\partial \\ \ensuremath{\mathbb X}\xspace & \ensuremath{\mathbb X}\xspace \ar@{}[u]_{~~~~;}} \quad\xymatrix{(\Sigma,X) \ar[d]^{(f,f')}\ar@{}[drr]|{\mapsto} && \Sigma \times X \ar[d]_{\< \pi_0f,f'\>}\ar@{}[dr]|{\leq} \ar[r]^{\pi_0} & \Sigma \ar[d]^f \\ (\Sigma',X') && \Sigma' \times X' \ar[r]_{\pi_0} & \Sigma'} \] \subsubsection{Discrete latent fibrations} \label{discrete} Let $\ensuremath{\mathbb X}\xspace$ be a restriction category and $F\colon \ensuremath{\mathbb X}\xspace^{\rm op} \@ifnextchar^ {\t@@}{\t@@^{}} {\sf Set}$ any functor (which ignores the restriction structure), then we may form the category of elements of $F$, ${\sf Elt}(F)$: \begin{description} \item[Objects:] $(X,x)$ where $X$ is an object of $\ensuremath{\mathbb X}\xspace$ and $x$ is an element of $F(X)$; \item[Maps:] $f\colon (X,x) \@ifnextchar^ {\t@@}{\t@@^{}} (Y,y)$ where $f\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ in $\ensuremath{\mathbb X}\xspace$ and $x = F(f)(y)$. \end{description} The functor $\partial_F\colon {\sf Elt}(F) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ has $\partial_F(X,x) = X$ and $\partial_F(f) = f$. The restriction of ${\sf Elt}(\ensuremath{\mathbb X}\xspace)$ is just the restriction in $\ensuremath{\mathbb X}\xspace$. We must check that this is well-defined that is that if $f\colon (X,x) \@ifnextchar^ {\t@@}{\t@@^{}} (Y,y)$ that $\rst{f}\colon (X,x) \@ifnextchar^ {\t@@}{\t@@^{}} (X,x)$ is a map of ${\sf Elt}(F)$. For this we need $F(\rst{f})(x) = x$ which follows as $F(\rst{f})(x) = F(\rst{f})(F(f)(y)) = F(\rst{f}f)(y) = F(f)(y) = x$. The restriction identities are then immediate. This is a latent fibration as one can easily check that the prone map above $f\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ at $(Y,y)$ is the usual Cartesian map, namely, $f\colon (X,F(f)(y)) \@ifnextchar^ {\t@@}{\t@@^{}} (Y,y)$. \subsubsection{Latent fibrations of propositions} \label{propositions} One way in which restriction idempotents can be used to construct a latent fibration is as follows: \begin{definition}\label{def:propositions} Let $\ensuremath{\mathbb X}\xspace$ be a restriction category, and define the restriction category $\mathcal{O}(\ensuremath{\mathbb X}\xspace)$ by: \begin{description} \item[Objects:] pairs $(X,e)$ where $X$ is an object of $\ensuremath{\mathbb X}\xspace$, and $e \in {\cal O}(X)$ is a restriction idempotent on $X$. \item[Maps:] $f\colon (X,e) \@ifnextchar^ {\t@@}{\t@@^{}} (X',e')$ are maps $f \colon X \@ifnextchar^ {\t@@}{\t@@^{}} X'$ of $\ensuremath{\mathbb X}\xspace$ such that $e \leq \rst{fe'}$, or equivalently $e = \rst{efe'}$. \item[Composition:] is composition in $\ensuremath{\mathbb X}\xspace$. This is well-defined since if \[(X,e) \@ifnextchar^ {\t@@}{\t@@^{}}^f (X',e') \@ifnextchar^ {\t@@}{\t@@^{}}^{f'} (X'',e'') \] are maps of $\mathcal{O}(\ensuremath{\mathbb X}\xspace)$ given by $f \colon X \@ifnextchar^ {\t@@}{\t@@^{}} X'$ and $f' \colon X' \@ifnextchar^ {\t@@}{\t@@^{}} X''$ in $\ensuremath{\mathbb X}\xspace$, then we have \[ \rst{eff'e''} = \rst{\rst{efe'}ff'e''} = \rst{e\rst{fe'}ff'e''} = \rst{efe'f'e''} = \rst{ef\rst{e'f'e''}} = \rst{efe'} = e \] so that $ff'$ gives a map $(X,e) \@ifnextchar^ {\t@@}{\t@@^{}} (X'',e'')$ in $\mathcal{O}(\ensuremath{\mathbb X}\xspace)$. \item[Identities:] as in $\ensuremath{\mathbb X}\xspace$. That is, $1_{(X,e)} = 1_X$: this is well-defined as $e = \rst{e1_Xe}$. \item[Restriction:] also as in $\ensuremath{\mathbb X}\xspace$, with $\rst{f} \colon (X,e) \@ifnextchar^ {\t@@}{\t@@^{}} (X,e)$ for $f \colon (X,e) \@ifnextchar^ {\t@@}{\t@@^{}} (X',e')$ given by $\rst{f} \colon X \@ifnextchar^ {\t@@}{\t@@^{}} X$. This is well-defined as \[ e = \rst{efe'} = \rst{e\rst{f}fe'} = \rst{ee\rst{f}fe'} = \rst{e\rst{f}efe'} = \rst{e\rst{f}\,\rst{efe'}} = \rst{e\rst{f}e} \] \end{description} \end{definition} \begin{proposition}\label{prop:propositions} The canonical map $\mathcal{O} \colon \mathcal{O}(\ensuremath{\mathbb X}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ is a latent fibration. \end{proposition} We refer to $\mathcal{O}$ as the {\bf latent fibration of propositions}. \begin{proof} Associativity of composition, the restriction combinator axioms, and the requirements on identity maps all hold in $\mathcal{O}(\ensuremath{\mathbb X}\xspace)$ immediately since they hold in $\ensuremath{\mathbb X}\xspace$. There is an obvious restriction functor \[ \mathcal{O} \colon \mathcal{O}(\ensuremath{\mathbb X}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace;\quad \begin{matrix} \xymatrix{ (X,e) \ar[d]_f\ar@{}[dr]|{\mapsto} & X \ar[d]_f \\ (Y,d) & Y} \end{matrix} \] Suppose $f \colon X' \@ifnextchar^ {\t@@}{\t@@^{}} X$ in $\ensuremath{\mathbb X}\xspace$ and let $(X,e)$ be an object of ${\cal O}(\ensuremath{\mathbb X}\xspace)$. Then $(X',\rst{fe})$ is also an object of ${\cal O}(\ensuremath{\mathbb X}\xspace)$, and $f\colon(X',\rst{fe}) \@ifnextchar^ {\t@@}{\t@@^{}} (X,e)$ is a map in ${\cal O}(\ensuremath{\mathbb X}\xspace)$ since $\rst{fe} = \rst{\rst{fe}fe}$. We shall show that this map is prone over $f$. To that end, suppose that $g \colon (Y,e') \@ifnextchar^ {\t@@}{\t@@^{}} (X,e)$ and $h \colon Y \@ifnextchar^ {\t@@}{\t@@^{}} X'$ are maps in ${\cal O}(\ensuremath{\mathbb X}\xspace)$ and $\ensuremath{\mathbb X}\xspace$ respectively such that \[\xymatrix{Y \ar[rd]^g \ar[d]_h & \\ensuremath{\mathbb X}\xspace' \ar[r]_f & X }\] is precise in $\ensuremath{\mathbb X}\xspace$, then $h\colon (Y,e') \@ifnextchar^ {\t@@}{\t@@^{}} (X',\rst{fe})$ is a map in ${\cal O}(\ensuremath{\mathbb X}\xspace)$ since $\rst{h \rst{fe}} = \rst{hfe} = \rst{ge} \geq e'$. Furthermore this gives a precise triangle in ${\cal O}(\ensuremath{\mathbb X}\xspace)$ showing $f\colon (X',\rst{fe}) \@ifnextchar^ {\t@@}{\t@@^{}} (X,e)$ is prone. \end{proof} \subsubsection{Assembly categories} \label{assemblies} Another example comes from \cite{msc:nester-calgary} and is constructed from a category of assemblies, ${\sf Asm}(F)$, associated to a restriction functor, $F\colon \ensuremath{\mathbb A}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$, from any restriction category, $\ensuremath{\mathbb A}\xspace$, called the category of {\bf realizers}, into a Cartesian restriction category, $\ensuremath{\mathbb X}\xspace$, the base. \begin{definition}\label{def:assemblies} The category of assemblies, ${\sf Asm}(F)$, is defined as follows: \begin{description} \item[Objects:] $\varphi \in {\cal O}(F(A) \times X)$ for all objects $A \in \ensuremath{\mathbb A}\xspace$ and $X \in \ensuremath{\mathbb X}\xspace$. \item[Maps:] $f\colon \varphi \@ifnextchar^ {\t@@}{\t@@^{}} \varphi'$, where $\varphi \in {\cal O}(F(A) \times X)$and $\varphi' \in {\cal O}(F(A') \times X')$, are maps $f\colon X \@ifnextchar^ {\t@@}{\t@@^{}} X'$ for which there is a ``tracking'' map $\gamma\colon A \@ifnextchar^ {\t@@}{\t@@^{}} A'$ in $\ensuremath{\mathbb A}\xspace$ satisfying: \begin{enumerate}[{\bf [Tk.1]}] \item $\varphi(F(\gamma) \times f) = \varphi(F(\gamma) \times f)\varphi'$ \item $\rst{\varphi(1 \times f)} = \rst{\varphi(F(\gamma) \times f)}$ \end{enumerate} \item[Restriction:] As in $\ensuremath{\mathbb X}\xspace$. \end{description} \end{definition} The proof that ${\sf Asm}(F)$ is a restriction category -- and, indeed, when $\ensuremath{\mathbb A}\xspace$ is a Cartesian restriction category and $F$ preserves this Cartesian structure, then ${\sf Asm}(F)$ is a Cartesian restriction category -- may be found in \cite[Prop. 5.2 and 5.3]{msc:nester-calgary}. If $\ensuremath{\mathbb X}\xspace$ is a Cartesian restriction category and $F \colon \ensuremath{\mathbb A}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ is a restriction functor, the forgetful functor ${\sf p} \colon {\sf Asm}(F) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ is a latent fibration. Specifically, if $\varphi \in {\cal O}(F(A) \times X)$, $\psi \in {\cal O}(F(B) \times Y)$, and $f \colon \varphi \@ifnextchar^ {\t@@}{\t@@^{}} \psi$ a map in ${\sf Asm}(F)$, then ${\sf p}(f)$ is $f \colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$, the underlying map in $\ensuremath{\mathbb X}\xspace$. Clearly ${\sf p}$ is a Cartesian restriction functor. \begin{proposition}\label{prop:assemblies} ${\sf p} \colon {\sf Asm}{F} \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ as defined above is a latent fibration. \end{proposition} \begin{proof} Suppose $f \colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ is a map in $\ensuremath{\mathbb X}\xspace$, and let $\psi \in {\cal O}(F(B) \times Y)$ be an assembly. Then $\rst{(1 \times f)\psi} \in {\cal O}{F(B) \times X}$ is also an assembly, and $f$ can be viewed as a map $\rst{(1 \times f)\psi} \@ifnextchar^ {\t@@}{\t@@^{}} \psi$ in ${\sf Asm}(F)$ which is readily seen to be prone. \end{proof} \subsubsection{Idempotent splitting} \label{splitting} We end these examples with a latent fibration which is a genuine semifunctor. \begin{proposition}\label{prop:splitting_example} For any restriction category $\ensuremath{\mathbb X}\xspace$, the forgetful functor from the restriction idempotent splitting of $\ensuremath{\mathbb X}\xspace$ back to $\ensuremath{\mathbb X}\xspace$, $U: \split(\ensuremath{\mathbb X}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$, is a latent fibration. \end{proposition} \begin{proof} Recall that in $\split(\ensuremath{\mathbb X}\xspace)$: \begin{itemize} \item an object is a pair $(X,e)$ with $e$ a restriction idempotent on $\ensuremath{\mathbb X}\xspace$; \item a map $f\colon (X,e) \@ifnextchar^ {\t@@}{\t@@^{}} (X',e')$ is a map $f\colon X \@ifnextchar^ {\t@@}{\t@@^{}} X'$ such that $efe' = f$; \item composition and restriction are as in $\ensuremath{\mathbb X}\xspace$; \item the identity of $(X,e)$ is $e$ itself. \end{itemize} Thus in general the forgetful functor $U\colon \split(\ensuremath{\mathbb X}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ is a genuine semifunctor (that is, it preserves composition and restriction but not necessarily identities). To see that it is a latent fibration, suppose $(X,e)$ is an object of $\split(\ensuremath{\mathbb X}\xspace)$, and $f\colon X' \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathsf{p}}(X)$ in $\ensuremath{\mathbb X}\xspace$ is such that $f = fU(1_{(X,e)}) = fe$. We want to show that $f\colon (X',\rs{fe}) \@ifnextchar^ {\t@@}{\t@@^{}} (X,e)$ is a prone lift of $f$. Indeed, $f$ is well-defined since \[ \rs{fe}f = f\rs{e}e = fe = f \] If we have a precise factorization $hf=g$, then $h$ is the unique lift: \[ \xymatrix{(X'',e') \ar@{..>}[d]_{h} \ar[dr]^{g} & ~ \ar@{}[drr]|{\textstyle\mapsto}&&X'' \ar[d]_h \ar[dr]^{g} & ~ \\ (X',\rs{fe}) \ar[r]_{f} & (X,e) && X' \ar[r]_{f} & X} \] It is well-defined since \[ e''h\rs{fe} = \rs{e''hfe}h = \rs{e''ge}h = \rs{g}h = \rs{h}h = h, \] and is clearly unique. \end{proof} \subsection{Basic theory of latent fibrations and prone arrows} A simple observation is that a latent fibration reduces to an ordinary fibration when it is between total map categories. To see this note that (i) a restriction semifunctor between such categories must be an ordinary functor, and (ii) the requirement of preciseness is automatically satisfied by total maps and so the condition of having enough prone maps reduces to the definition of a Cartesian arrow in an ordinary fibration. \begin{lemma} \label{latent-fibration-total} A latent fibration between restriction categories in which all maps are total is a fibration. \end{lemma} Just as for ordinary fibrations we can define morphisms of latent fibrations: \begin{definition} A morphism of latent fibrations $F = (F_1,F_0)\colon (\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} (\ensuremath{\mathsf{p}}'\colon \ensuremath{\mathbb E}\xspace' \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace')$ is a pair of restriction semifunctors, respectively, between the ``total'' categories, $F_1\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb E}\xspace'$, and the bases $F_0\colon \ensuremath{\mathbb B}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace'$, making \[ \xymatrix{ \ensuremath{\mathbb E}\xspace \ar[r]^{F_1} \ar[d]_{\ensuremath{\mathsf{p}}} & \ensuremath{\mathbb E}\xspace' \ar[d]^{\ensuremath{\mathsf{q}}} \\ \ensuremath{\mathbb B}\xspace \ar[r]_{F_0} & \ensuremath{\mathbb B}\xspace'} \] commute, and such that $F_1$ preserves prone maps. \end{definition} Transformations of fibrations are ``pillows'' of restriction transformations $(\alpha,\beta)\colon (F_1,F_0) \@ifnextchar^ {\t@@}{\t@@^{}} (G_1,G_0)$ with $\alpha \ensuremath{\mathsf{q}} = \ensuremath{\mathsf{p}} \beta$. Many results for Cartesian arrows for an ordinary functor have a restriction analogue for prone arrows for a restriction functor. For example: \begin{lemma} \label{composites-prones} For any restriction semifunctor, ${\sf p}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$, composites of ${\sf p}$-prone arrows in $\ensuremath{\mathbb E}\xspace$ are ${\sf p}$-prone and identity maps are $\ensuremath{\mathsf{p}}$-prone. \end{lemma} \begin{proof} Suppose $f_1$ and $f_2$ are prone in $\ensuremath{\mathbb E}\xspace$ and $k {\sf p}(f_1f_2) = {\sf p}(g)$ is a precise triangle. Then $(k {\sf p}(f_1)) {\sf p}(f_2)$ is also precise as: \[ k {\sf p}(f_1) \rst{{\sf p}(f_2)} = \rst{k {\sf p}(f_1f_2)} k {\sf p}(f_1) = \rst{k \rst{{\sf p}(f_1f_2)}} k {\sf p}(f_1) = \rst{k} k {\sf p}(f_1) = k {\sf p}(f_1) \] Now as $f_2$ is prone we have a unique $\widetilde{k {\sf p}(f_1)}$ sitting above making $\widetilde{k {\sf p}(f_1)} f_2$ precise. But then as $k {\sf p}(f_1) = {\sf p}(\widetilde{k {\sf p}(f_1)})$ is precise this gives a unique $\widetilde{k}$ with $\widetilde{k} f_1 = \widetilde{k {\sf p}(f_1)}$ precise. This certainly means $\widetilde{k} (f_1 f_2)= \widetilde{k {\sf p}(f_1)} f_2 = g$ but also this is precise as $\rst{\widetilde{k}} = \rst{\widetilde{k {\sf p}(f_1)}}$ as $\widetilde{k} f_1 = \widetilde{k {\sf p}(f_1)}$ precisely commutes, and $\rst{\widetilde{k {\sf p}(f_1)}} = \rst{g}$ as $\widetilde{k {\sf p}(f_1)} f_2 =g$ precisely commutes, so $\rst{\widetilde{k}} = \rst{g}$ showing $\widetilde{k} (f_1f_2)= g$ precisely commutes. Finally $\widetilde{k}$ is unique as supposing $h$ was an alternate then $(hf_1) f_2 = g$ precisely commutes with ${\sf p}(hf_1) = k{\sf p}(f_1)$ making $hf_1 = \widetilde{k{\sf p}(f_1)}$ but then by similar reasoning $h= \widetilde{k}$. Identity maps are clearly always prone when $\ensuremath{\mathsf{p}}$ is a restriction functor, but thi is not completely immediate if $\ensuremath{\mathsf{p}}$ is only a semifunctor. However, it still works in this case as if we have \[ \xymatrix{Y \ar@{..>}[d]^{} \ar[dr]^{g} & ~\ar@{}[drr]|{\textstyle\mapsto}&& \ensuremath{\mathsf{p}}(Y) \ar[d]_h \ar[dr]^{\ensuremath{\mathsf{p}}(g)} & ~ \\ X \ar[r]_{1_X} & X && \ensuremath{\mathsf{p}}(X) \ar[r]_{\ensuremath{\mathsf{p}}(1_X)} & \ensuremath{\mathsf{p}}(X)} \] with the base triangle precise, then since $\ensuremath{\mathsf{p}}(1_X)$ is a restriction idempotent, by Lemma \ref{Jaws}.ii, $h = p(g)$. Thus $g$ is the unique fill-in for the triangle in $\ensuremath{\mathbb E}\xspace$. \end{proof} As we shall see, however, restriction idempotents are \emph{not} always prone: see Section \ref{sec:types} for details, and Proposition \ref{prop:separated_equivalences} for a characterization of which semifunctors have all restriction idempotents prone. In the total case, it is well-known that any two Cartesian arrows over the same arrow (with a common codomain) have a unique mediating isomorphism. For prone arrows we now show that the analogous situation induces a mediating \emph{partial} isomorphism. \begin{definition} In a restriction category, a {\bf mediating} map between two maps $f$ and $f'$ with a common codomain, is a partial isomorphism, $\alpha$, such that $\alpha f' = f$ and $\alpha^{(-1)}f = f'$ are both precise. \end{definition} \begin{lemma} \label{mediating_maps} If ${\sf p}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a restriction semifunctor, then: \begin{enumerate}[(i)] \item If $f\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ and $f'\colon X' \@ifnextchar^ {\t@@}{\t@@^{}} Y$ are ${\sf p}$-prone maps of $\ensuremath{\mathbb E}\xspace$ with ${\sf p}(f) = {\sf p}(f')$ then there is a unique mediating partial isomorphism $\alpha \colon X \@ifnextchar^ {\t@@}{\t@@^{}} X'$ (so that ${\sf p}(\alpha) = \rst{{\sf p}(f)}$, $\alpha f' = f$, and $\rst{\alpha} = \rst{f}$, $\alpha^{(-1)} f = f'$, and $\rst{\alpha^{(-1)}} = \rst{f'}$). \item If $\alpha$ is a mediating partial isomorphism in $\ensuremath{\mathbb E}\xspace$ between $f$ and $f'$ (so that $\alpha f' = f$, and $\rst{\alpha} = \rst{f}$, $\alpha^{(-1)} f = f'$, and $\rst{\alpha^{(-1)}} = \rst{f'}$ as above) then, if either $f$ or $f'$ is ${\sf p}$-prone, then both $f$ and $f'$ are ${\sf p}$-prone. \item If $e,e'\colon X \@ifnextchar^ {\t@@}{\t@@^{}} X$ are both prone restriction idempotents in $\ensuremath{\mathbb E}\xspace$ such that $\ensuremath{\mathsf{p}}(e) = \ensuremath{\mathsf{p}}(e')$, then $e = e'$. \end{enumerate} \end{lemma} \begin{proof}~ \begin{enumerate}[{\em (i)}] \item Let $\alpha,\alpha'$ be the liftings for the precise triangle $\rst{{\sf p}(f)}{\sf p}(f) = {\sf p}(f)$ (recalling ${\sf p}(f)={\sf p}(f')$): \[ \xymatrix{ X \ar@{.>}[d]_\alpha \ar[rd]^f \\ X' \ar[r]_{f'} & Y } \hspace{30pt} \xymatrix{ X' \ar@{.>}[d]_{\alpha'} \ar[rd]^{f'} \\ X \ar[r]_f & Y } \] The fact that they are precise triangles implies immediately that $\rst{\alpha} = \rst{f}$ and $\rst{\alpha'} = \rst{f'}$. The composite of these precise triangles sits over the same precise triangle, $\rst{\ensuremath{\mathsf{p}}(f)}\ensuremath{\mathsf{p}}(f) = {\sf p}(f)$, and so, using Lemma \ref{Jaws}.iv, $\alpha\alpha' = \rst{f}$ and $\alpha'\alpha = \rst{f'}$ making them partial inverses. \item Suppose $f$ and $f'$ are mediated by $\alpha$, then the precise triangles on $f$ are in bijective correspondence to those on $f'$, as a precise $kf=g$ is carried to a precise $(k\alpha) f' = g$ from which the result is immediate. \item By (i), there is a mediating partial isomorphism $\alpha\colon X \@ifnextchar^ {\t@@}{\t@@^{}} X$ between $e$ and $e'$. In particular, $\alpha e = e'$ is precise. But then by Lemma \ref{Jaws}.ii, this means $\alpha = e'$, and similarly $\alpha^{(-1)} = e$. But then $\alpha$ is a restriction idempotent, so its partial inverse is itself, so we have $e = \alpha^{(-1)} = \alpha = e'$. \end{enumerate} \end{proof} As noted above, restriction idempotents are not always prone. However, isomorphisms, and, more generally, restriction retractions are: \begin{lemma} \label{restriction-retraction} If ${\sf p}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a restriction semifunctor, then: \begin{enumerate}[(i)] \item Isomorphisms are always $\ensuremath{\mathsf{p}}$-prone and, if $f$ is total and $\ensuremath{\mathsf{p}}$-prone with $\ensuremath{\mathsf{p}}(f)$ an isomorphism and $\ensuremath{\mathsf{p}}$ a restriction functor, then $f$ is itself an isomorphism. \item All restriction retractions in $\ensuremath{\mathbb E}\xspace$ are ${\sf p}$-prone. \item If $f = r f'$ is ${\sf p}$-prone in $\ensuremath{\mathbb E}\xspace$ and $r$ is a restriction retraction, then $f'$ is $\ensuremath{\mathsf{p}}$-prone. \item If $\ensuremath{\mathsf{p}}$ is a restriction functor, and $f$ is a ${\sf p}$-prone map such that $\rst{f}$ splits, then $\ensuremath{\mathsf{p}}(f)$ is a restriction retraction if and only if $f$ is a restriction retraction. \end{enumerate} \end{lemma} \begin{proof}~ \begin{enumerate}[{\em (i)}] \item That isomorphisms are prone is immediate (and follows for example from part {\em (ii)}, see below). Suppose $f\colon E' \@ifnextchar^ {\t@@}{\t@@^{}} E$ is total, prone, and $\ensuremath{\mathsf{p}}(f)$ is an isomorphism with $\ensuremath{\mathsf{p}}$ a restriction functor, then $\ensuremath{\mathsf{p}}(f)^{-1}$ lifts: \[ \xymatrix{E \ar@{=}[dr] \ar@{..>}[d]_{\widetilde{\ensuremath{\mathsf{p}}(f)^{-1}}} & ~\ar@{}[drr]|{\textstyle\mapsto}&&& \ensuremath{\mathsf{p}}(E) \ar@{=}[dr] \ar[d]_{\ensuremath{\mathsf{p}}(f)^{-1}} \\ E' \ar[r]_{f'} & E &&&\ensuremath{\mathsf{p}}(E') \ar[r]_{\ensuremath{\mathsf{p}}(f)} & \ensuremath{\mathsf{p}}(E)} \] and then we have \[ \xymatrix{E' \ar[dr]^{f} \ar@{..>}[d]_{f~\widetilde{p(f)^{-1}}} & ~ \ar@{}[drr]|{\textstyle\mapsto}&& \ensuremath{\mathsf{p}}(E) \ar[dr]^f \ar@{=}[d] \\ E' \ar[r]_{f} & E && \ensuremath{\mathsf{p}}(E') \ar[r]_{\ensuremath{\mathsf{p}}(f)} & \ensuremath{\mathsf{p}}(E)} \] showing $f~\widetilde{\ensuremath{\mathsf{p}}(f)^{-1}}$ is a lifting of the identity map. However, an alternate lifting is the identity map so $f~\widetilde{\ensuremath{\mathsf{p}}(f)^{-1}} = 1_{E'}$, showing $f$ is an isomorphism. This is the same argument for why Cartesian arrows above isomorphisms are isomorphims in an ordinary fibration. \item A restriction retraction, $r$, is a partial isomorphism whose inverse is a section $m$, thus $rm = \rst{r}$ and $mr = 1$. Suppose ${\sf p}(g) = h {\sf p}(r)$ then, by Lemma \ref{Jaws}.iii, $h = {\sf p}(g) {\sf p}(m) = {\sf p}(gm)$ so that $gm$ is a possible lifting and will necessarily be so provided $gmr = g$ but this is the case as $gmr = g1 = g$. \item Suppose the restriction retraction $r\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} X'$ has section $m\colon X' \@ifnextchar^ {\t@@}{\t@@^{}} Y$, so $rm = \rs{r}$ and $mr = 1_{X'}$. Suppose that there is a $g\colon Z \@ifnextchar^ {\t@@}{\t@@^{}} X$ so that $h\ensuremath{\mathsf{p}}(f') = \ensuremath{\mathsf{p}}(g)$ is precise. Then we claim that there is a lifting $\tilde{h}$ over $h\ensuremath{\mathsf{p}}(m)$ via the prone-ness of $f$: \[ \xymatrix{Z \ar[dr]^g \ar@{..>}[d]_{\widetilde{h} } & ~ \ar@{}[drr]|{\textstyle~~\mapsto}&&&\ensuremath{\mathsf{p}}(Z) \ar[dr]^{\ensuremath{\mathsf{p}}(g)} \ar[d]_{h \ensuremath{\mathsf{p}}(m)} \\ Y \ar[r]_{f} & X &&& \ensuremath{\mathsf{p}}(Y) \ar[r]_{\ensuremath{\mathsf{p}}(f)} & \ensuremath{\mathsf{p}}(X)} \] For this, we need to check that the base triangle is precise. (It obviously commutes by definition of $f$). For preciseness, note that since $h\ensuremath{\mathsf{p}}(f') = \ensuremath{\mathsf{p}}(g)$ is precise, \[ h = h \rs{\ensuremath{\mathsf{p}}(f')} = h \rs{\ensuremath{\mathsf{p}}(1_{X'}f')} = h \rs{\ensuremath{\mathsf{p}}(1_{X'})\ensuremath{\mathsf{p}}(f')} \leq h \rs{\ensuremath{\mathsf{p}}(1_{X'})}, \] but we always have the opposite inequality, so $h = h \rs{\ensuremath{\mathsf{p}}(1_{X'})}$. Thus the base triangle above is precise since \[ \rs{h\ensuremath{\mathsf{p}}(m)} = \rs{h\ensuremath{\mathsf{p}}(\rs{m})} = \rs{h\ensuremath{\mathsf{p}}(1_{X'})} = \rs{h} = \rs{\ensuremath{\mathsf{p}}(g)}. \] We now claim that $\tilde{h}r\colon Z \@ifnextchar^ {\t@@}{\t@@^{}} X'$ is the unique lift for $h$, demonstrating the prone-ness of $f'$. The triangle commutes since $ \tilde{h}rf' = \tilde{h}f = g$, is precise since \[ \tilde{h} r\rs{f'} = \rs{\tilde{h}rf'} \tilde{h}r = \rs{\tilde{h}f}\tilde{h}r = \rs{\tilde{h}}\tilde{h}r = \tilde{h}r, \] and is over $h$ since \[ \ensuremath{\mathsf{p}}(\tilde{h}r) = h\ensuremath{\mathsf{p}}(m)\ensuremath{\mathsf{p}}(r) = h \ensuremath{\mathsf{p}}(mr) = h\ensuremath{\mathsf{p}}(1_{X'}) = h \] with the final equality proven above. Finally, suppose we have some other $k\colon Z \@ifnextchar^ {\t@@}{\t@@^{}} X'$ so that $kf' = g, \rs{k} = \rs{g}$, and $\ensuremath{\mathsf{p}}(k) = h$. Then $km$ is a precise lifting of $h\ensuremath{\mathsf{p}}(m)$ as \[ \ensuremath{\mathsf{p}}(km) = \ensuremath{\mathsf{p}}(k)\ensuremath{\mathsf{p}}(m) = h\ensuremath{\mathsf{p}}(m), \ \ kmf = kmrf' = kf' = g, \] and \[ km\rs{f} = \rs{kmf}km = \rs{g}km = \rs{k}km = km. \] Thus $km = \tilde{h}$, so $kmr = \tilde{h}r$, so $k = \tilde{h}r$. Thus $f'$ is indeed $\ensuremath{\mathsf{p}}$-prone. \item If $f$ is a restriction retraction then $\ensuremath{\mathsf{p}}(f)$ is certainly a restriction retraction since $\ensuremath{\mathsf{p}}$ is a restriction functor. For the converse, suppose $f\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} X$ is $\ensuremath{\mathsf{p}}$-prone with $\ensuremath{\mathsf{p}}(f)$ a restriction retraction, so that there is an $m\colon \ensuremath{\mathsf{p}}(X) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathsf{p}}(Y)$ with $m\ensuremath{\mathsf{p}}(f) = 1_{\ensuremath{\mathsf{p}}(X)}$ and $\rst{\ensuremath{\mathsf{p}}(f)} = \ensuremath{\mathsf{p}}(f)m$. Then $f$ is a retraction with $\widetilde{m}$ its section: \[ \xymatrix{X \ar@{=}[dr] \ar@{..>}[d]_{\widetilde{m}} & ~ \ar@{}[drr]|{\textstyle\mapsto}&&\ensuremath{\mathsf{p}}(X) \ar@{=}[dr] \ar[d]_{m} & ~ \\ Y \ar[r]_f & X && \ensuremath{\mathsf{p}}(Y) \ar[r]_{\ensuremath{\mathsf{p}}(f)} & \ensuremath{\mathsf{p}}(X)} \] so this means, by splitting $\rst{f}$, that $f$ can be factorized into $rf'$ where $r\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} Y'$ is the coequalizer of $\rst{f}$ and $1_Y$ and lies over the equalizer of $\ensuremath{\mathsf{p}}(f)$ and $1_{\ensuremath{\mathsf{p}}(X)}$ which means $f'$ is prone over an isomorphism. However, $f'$ is also total as using the fact that $r$ is epic we have $r\rst{f'} = \rst{rf'}r =\rst{f}r =r = r1_{Y'}$. It follows that $\widetilde{\ensuremath{\mathsf{p}}(f)^{-1}}f' = 1_{X}$ and so $f'$ is an isomorphism from part (i). This means, in turn, that $f=rf'$ is a restriction retraction. \end{enumerate} \end{proof} In the case when $\ensuremath{\mathsf{p}}$ is a restriction functor and all restriction idempotents in $\ensuremath{\mathbb E}\xspace$ split, Cartesian arrows give prone arrows: \begin{lemma}\label{lemma:cart_are_prone} Suppose that $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a restriction functor, and all restriction idempotents in $\ensuremath{\mathbb E}\xspace$ split. If $f\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ a total map in $\ensuremath{\mathbb E}\xspace$ which is Cartesian for the functor ${\sf Total}(\ensuremath{\mathsf{p}})\colon {\sf Total}(\ensuremath{\mathbb E}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} {\sf Total}(\ensuremath{\mathbb B}\xspace)$, then $f$ is $\ensuremath{\mathsf{p}}$-prone. \end{lemma} \begin{proof} Suppose we have \[ \xymatrix{Z \ar[dr]^{g} &~\ar@{}[drr]|{\textstyle\mapsto}&& \ensuremath{\mathsf{p}}(Z) \ar[d]_h \ar[dr]^{\ensuremath{\mathsf{p}}(g)} & ~ \\ X \ar[r]_{f} & Y && \ensuremath{\mathsf{p}}(X) \ar[r]_{\ensuremath{\mathsf{p}}(f)} & \ensuremath{\mathsf{p}}(Y)} \] with the triangle in $\ensuremath{\mathbb B}\xspace$ precise, so $\rs{h} = \rs{\ensuremath{\mathsf{p}}(g)}$. Let $m\colon Z' \@ifnextchar^ {\t@@}{\t@@^{}} Z$, $r\colon Z \@ifnextchar^ {\t@@}{\t@@^{}} Z'$ be a splitting of $\rs{g}$, so $rm = \rs{g}$ and $mr = 1$. Then in particular $mg\colon Z' \@ifnextchar^ {\t@@}{\t@@^{}} Y$ is total. Moreover, the pair $(\ensuremath{\mathsf{p}}(m),\ensuremath{\mathsf{p}}(r))$ gives a splitting of $\rs{\ensuremath{\mathsf{p}}(g)}$ and hence of $h$, so $\ensuremath{\mathsf{p}}(m)h$ is also total. Thus, we have diagrams in ${\sf Total}(\ensuremath{\mathbb E}\xspace)$ and ${\sf Total}(\ensuremath{\mathbb B}\xspace)$, and hence get a unique total $h'\colon Z' \@ifnextchar^ {\t@@}{\t@@^{}} X$: \[ \xymatrix{Z' \ar@{..>}[d]_{h'} \ar[dr]^{mg} & ~ \ar@{}[drr]|{\textstyle~~\mapsto}&&&\ensuremath{\mathsf{p}}(Z) \ar[d]_{\ensuremath{\mathsf{p}}(m)h} \ar[dr]^{\ensuremath{\mathsf{p}}(m)\ensuremath{\mathsf{p}}(g)} & ~ \\ X \ar[r]_{f} & Y &&& \ensuremath{\mathsf{p}}(X) \ar[r]_{\ensuremath{\mathsf{p}}(f)} & \ensuremath{\mathsf{p}}(Y)} \] We now claim that the composite \[ Z \@ifnextchar^ {\t@@}{\t@@^{}}^{r} Z' \@ifnextchar^ {\t@@}{\t@@^{}}^{h'} X \] gives the required unique arrow in the first triangle. Indeed, we have \[ rh'f = rmg = \rs{g}g = g, \] and since $h'$ is total, \[ \rs{rh'} = \rs{r} = \rs{g}, \] and \[ \ensuremath{\mathsf{p}}(rh') = \ensuremath{\mathsf{p}}(r)\ensuremath{\mathsf{p}}(m)h = \ensuremath{\mathsf{p}}(rm)h = \ensuremath{\mathsf{p}}(\rs{g})h = \rs{\ensuremath{\mathsf{p}}(g)}h = \rs{h}h = h, \] so $rh'$ has all the required properties. Moreover, if there is some other $k\colon Z \@ifnextchar^ {\t@@}{\t@@^{}} X$ with these properties, then $mk$ is total (as $\rs{mk} = \rs{m\rs{g}} = \rs{mg} = 1$) and has the same other properties as $h'$, so $h' = mk$. Thus, $rh' = rmk = \rs{g}k = \rs{k}k = k$, so $rh'$ is unique. \end{proof} It follows from the general theory of fibrations of 2-categories that latent fibrations behave well with respect to composition and pullback. However, it is still helpful to see how this behaviour works out concretely. \begin{lemma}\label{lemma:prone_with_comp_functors} If $\ensuremath{\mathsf{q}}\colon \ensuremath{\mathbb F}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb E}\xspace$ and $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ are restriction semifunctors, $f'$ is $\ensuremath{\mathsf{p}}$-prone over $f$, and $f''$ is $\ensuremath{\mathsf{q}}$-prone over $f'$, then $f''$ is $\ensuremath{\mathsf{q}}\ensuremath{\mathsf{p}}$-prone over $f$. \end{lemma} \begin{proof} This is given by a straightforward two step lifting: \[ \xymatrix{ F \ar[dr]^{g} \ar@{..>}[d]_{\widetilde{\widetilde{h}}} & ~\ar@{}[drr]|{\textstyle\mapsto}&& \ensuremath{\mathsf{q}}(F) \ar[dr]^{\ensuremath{\mathsf{q}}(g)} \ar[d]_{\widetilde{h}} & ~ \ar@{}[drr]|{\textstyle\mapsto} && \ensuremath{\mathsf{p}}(\ensuremath{\mathsf{q}}(F)) \ar[dr]^{\ensuremath{\mathsf{p}}(\ensuremath{\mathsf{q}}(g))} \ar[d]_{h} \\ B \ar[r]_{f''} & F && \ensuremath{\mathsf{q}}(B) \ar[r]_{f'} & \ensuremath{\mathsf{q}}(F') && \ensuremath{\mathsf{p}}(\ensuremath{\mathsf{q}}(B)) \ar[r]_f & \ensuremath{\mathsf{p}}(\ensuremath{\mathsf{q}}(F'))} \] \end{proof} \begin{corollary} Latent fibrations are closed under composition. \end{corollary} Unfortunately, it is not the case that splitting the idempotents of a latent fibration $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$, to obtain $\split(\ensuremath{\mathsf{p}})\colon \split(\ensuremath{\mathbb E}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \split(\ensuremath{\mathbb B}\xspace)$, will yield in general a latent fibration. However, it is the case that splitting the idempotents of the total category will yield a latent fibration over the same base, $U \ensuremath{\mathsf{p}}\colon\split(\ensuremath{\mathbb E}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$, as we may precompose with the latent fibration $U\colon \split(\ensuremath{\mathbb E}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb E}\xspace$ of subsection \ref{splitting}. Shortly we shall see that $\split(\ensuremath{\mathsf{p}})$ is a latent fibration when $\ensuremath{\mathsf{p}}$ is an \emph{admissible} latent fibration (see Proposition \ref{splitting-latent-fibration}). Pullbacks in the category of restriction categories and restriction functors are described in \cite{journal:rcats1}, and work similarly for restriction semifunctors: \begin{definition} If $p\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ and $F\colon \ensuremath{\mathbb X}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ are restriction semifunctors, then their pullback is \[\xymatrix{ \ensuremath{\mathbb W}\xspace \ar[r]^{p_1} \ar[d]_{p_0} & \ensuremath{\mathbb E}\xspace \ar[d]^{\ensuremath{\mathsf{p}}} \\ \ensuremath{\mathbb X}\xspace \ar[r]_F & \ensuremath{\mathbb B}\xspace }\] in which the category $\ensuremath{\mathbb W}\xspace$ is defined by \begin{description} \item {\bf objects} are pairs $(X,E)$ where $X$ and $E$ are objects of $\ensuremath{\mathbb X}\xspace$ and $\ensuremath{\mathbb E}\xspace$ respectively which satisfy $F(X) = {\sf p}(E)$. \item {\bf maps} of type $(X,E) \@ifnextchar^ {\t@@}{\t@@^{}} (X',E')$ are pairs $(f,g)$ where $f$ and $g$ are maps of $\ensuremath{\mathbb X}\xspace$ and $\ensuremath{\mathbb E}\xspace$ respectively which satisfy $F(f) = \ensuremath{\mathsf{p}}(g)$. \item {\bf composition} and {\bf identities} are defined pointwise. \item {\bf restriction} is given by $\rst{(f,g)} = (\rst{f},\rst{g})$, which is well defined since, if $F(f) = \ensuremath{\mathsf{p}}(g)$, then $F(\rst{f}) = \rst{F(f)} = \rst{\ensuremath{\mathsf{p}}(g)} = \ensuremath{\mathsf{p}}(\rst{g})$. \end{description} and the pullback maps $p_0,p_1$ are the first and second projections. \end{definition} \begin{lemma}\label{lemma:pullback_prone} If $p\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ and $F\colon \ensuremath{\mathbb X}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ are restriction semifunctors and $W$ is their pullback as defined above, then $(f,g)\colon (X,E) \@ifnextchar^ {\t@@}{\t@@^{}} (X',E')$ in $\ensuremath{\mathbb W}\xspace$ is $\ensuremath{\mathsf{p}}_0$-prone if and only if $g$ is $\ensuremath{\mathsf{p}}$-prone. \end{lemma} \begin{proof} Suppose we have \[ \xymatrix@C=3em{ (X'',E'') \ar[rd]^-{(f',g')} & ~\ar@{}[drr]|{\textstyle\mapsto}&& X'' \ar[d]_h \ar[rd]^{f'} & \\ (X,E) \ar[r]_-{(f,g)}& (X',E') && X \ar[r]_f & X'} \] then since $g$ is $p$-prone and $F(f) = \ensuremath{\mathsf{p}}(g), F(f') = \ensuremath{\mathsf{p}}(g')$, etc., we also have \[ \xymatrix@C=3em{ E'' \ar[rd]^-{g'} \ar[d]_{\widetilde{h}} & ~\ar@{}[drr]|{\textstyle\mapsto} && F(X'') \ar[d]_{F(h)} \ar[rd]^{F(f')} & \\ E \ar[r]_-{g}& X' && F(X) \ar[r]_{F(f)} & F(X')} \] so $(h, \tilde{h})$ uniquely fills in the first triangle. Thus, $(f,g)$ is $\ensuremath{\mathsf{p}}_0$-prone; a similar proof shows the converse. \end{proof} \begin{corollary}\label{prop:pullback_latent} The pullback of a latent fibration along any restriction semifunctor is a latent fibration. \end{corollary} Recall that in an ordinary fibration, any map in the total category can be factored as a vertical map followed by a Cartesian map, and this factorization is unique up to a unique vertical isomorphism. A similar result holds for latent fibrations, with vertical replaced by \emph{sub}vertical: \begin{definition} If $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a restriction semifunctor, then a map $v\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ in $\ensuremath{\mathbb E}\xspace$ is said to be \textbf{subvertical} if $\ensuremath{\mathsf{p}}(v)$ is a restriction idempotent. \end{definition} \begin{proposition}\label{prop:factorization} If $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a latent fibration, then for any map $f\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ in $\ensuremath{\mathbb E}\xspace$, there is a subvertical map $v\colon X \@ifnextchar^ {\t@@}{\t@@^{}} X'$ and a prone map $c\colon X' \@ifnextchar^ {\t@@}{\t@@^{}} Y$ such that \[ \xymatrix{ X \ar[d]_{v} \ar[dr]^{f} & \\ X' \ar[r]_{c} & Y } \] is a precise triangle, and, moreover, $\rs{\ensuremath{\mathsf{p}}(c)} = \ensuremath{\mathsf{p}}(v)$. Such a factorization is unique up to unique subvertical partial isomorphism. \end{proposition} \begin{proof} For existence, let $c\colon X' \@ifnextchar^ {\t@@}{\t@@^{}} Y$ be a prone arrow over $\ensuremath{\mathsf{p}}(f)\colon \ensuremath{\mathsf{p}}(X) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathsf{p}}(Y)$, and let $v$ be the induced unique map \[ \xymatrix{X \ar@{..>}[d]_{v} \ar[dr]^{f} & ~\ar@{}[drr]|{\textstyle\mapsto}&& \ensuremath{\mathsf{p}}(X) \ar[d]_{\rs{\ensuremath{\mathsf{p}}(f)}} \ar[dr]^{\ensuremath{\mathsf{p}}(f)} & ~ \\ X' \ar[r]_{c} & Y && \ensuremath{\mathsf{p}}(X') \ar[r]_{\ensuremath{\mathsf{p}}(f)} & \ensuremath{\mathsf{p}}(Y)} \] This satisfies all the required conditions; uniqueness follows from using Lemma \ref{mediating_maps}.i. \end{proof} \subsection{Substitution semifunctors} Analogous to the fiber of an object in a fibration, is the notion of a \emph{strand} of an object in a latent fibration. \begin{definition} Given a restriction functor $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$, the {\bf strand} of $B \in \ensuremath{\mathbb B}\xspace$, written ${\sf p}^{(-1)}(B)$, is the category whose objects are objects $E$ of $\ensuremath{\mathbb E}\xspace$ such that $\ensuremath{\mathsf{p}}(E) = B$, and whose maps are the $f$ of $\ensuremath{\mathbb E}\xspace$ such that $\ensuremath{\mathsf{p}}(f) \leq 1_B$ (i.e. $\ensuremath{\mathsf{p}}(f) \in {\cal O}(B)$); that is, each $f$ is subvertical. \end{definition} A latent fibration may be \emph{cloven} in the same sense as for ordinary fibrations: \begin{definition} A latent fibration {\bf has a cleavage} (or is {\bf cloven}) in case there is a chosen prone arrow $f^*_E \colon f^*(E) \@ifnextchar^ {\t@@}{\t@@^{}} E$ over each $f \colon A \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathsf{p}}(E)$ for each $E \in \ensuremath{\mathbb E}\xspace$. \end{definition} We now construct, for a cloven latent fibration, the analogue of a reindexing or substitution functor. In general, the substitution functors we obtain between the strands of a latent fibration will only be restriction semifunctors. Let ${\sf p} \colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ be a cloven latent fibration, and let $u \colon A \@ifnextchar^ {\t@@}{\t@@^{}} B$ be a map in $\ensuremath{\mathbb B}\xspace$. We define the \emph{substitution semifunctor} $u^* \colon {\sf p}^{(-1)}(B) \@ifnextchar^ {\t@@}{\t@@^{}} {\sf p}^{(-1)}(A)$, from the strand above $B$ to the strand above $A$ as follows: on arrows $f \colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ in ${\sf p}^{(-1)}(B)$, $u^*(f)$ is the arrow $u^*(X) \@ifnextchar^ {\t@@}{\t@@^{}} u^*(Y)$ in ${\sf p}^{(-1)}(A)$ given by the lifting \[ \text{in $\ensuremath{\mathbb E}\xspace$}\hspace{10pt} \xymatrix{ u^*(X) \ar@{.>}[dd]_{u^*(f)= \widetilde{\rst{u\ensuremath{\mathsf{p}}(f)}}} \ar[rd]^{u^*_X} \\ & X \ar[rd]^f \\ u^*(Y) \ar[rr]_{u^*_Y} && Y } \hspace{30pt} \text{in $\ensuremath{\mathbb X}\xspace$}\hspace{10pt} \xymatrix{ A \ar[dd]_{\rst{u\ensuremath{\mathsf{p}}(f)}} \ar[rd]^u && \\ & B \ar[rd]^{{\sf p}(f)} \\ A \ar[rr]_u && B } \] where $u^*(X)$ is the domain of the prone map above $u$ with codomain $X$. Furthermore, if $u \leq v$ then there is for each $X \in \ensuremath{\mathsf{p}}^{(-1)}(B)$ a map $(u \leq v)^{*}_X\colon u^{*}(X) \@ifnextchar^ {\t@@}{\t@@^{}} v^{*}(X)$ given by: \[ \xymatrix{ u^{*}(X) \ar@{..>}[d]_{(u \leq v)^{*}_X} \ar[dr]^{u^{*}} & ~\ar@{}[drr]|{\textstyle\mapsto}&& \ensuremath{\mathsf{p}}(u^{*}(X))=A \ar[d]_{\rst{u}} \ar[dr]^-u \\ v^{*}(X) \ar[r]_{v^{*}} & X && \ensuremath{\mathsf{p}}(v^{*}(X))=A \ar[r]_-{v} & \ensuremath{\mathsf{p}}(X)=B} \] We then have: \begin{proposition} \label{pseudo-functor} If ${\sf p} \colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a latent fibration with a cleavage and $u \colon A \@ifnextchar^ {\t@@}{\t@@^{}} B$ is a map in $\ensuremath{\mathbb X}\xspace$, then $u^* \colon {\sf p}^{(-1)}(B) \@ifnextchar^ {\t@@}{\t@@^{}} {\sf p}^{(-1)}(A)$ as defined above is a restriction semifunctor. Furthermore, the assignment $(\_)^*\colon \ensuremath{\mathbb B}\xspace^{\rm op} \@ifnextchar^ {\t@@}{\t@@^{}} {\sf SRest}$ is a pseudo 2-functor where we regard $\ensuremath{\mathbb B}\xspace$ as a 2-category whose 2-cells are given by the restriction ordering of maps. \end{proposition} Recall that a pseudo 2-functor $P\colon \ensuremath{\mathbb B}\xspace^{\rm op} \@ifnextchar^ {\t@@}{\t@@^{}} {\sf SRest}$ associates to each map (1-cell) $f\colon A \@ifnextchar^ {\t@@}{\t@@^{}} B$ in $\ensuremath{\mathbb B}\xspace$ a functor (1-cell) $P(f)\colon P(B) \@ifnextchar^ {\t@@}{\t@@^{}} P(A)$ in ${\sf SRest}$ such that there are (2-cell) natural isomorphisms $\alpha_{f,g}\colon P(g) P(f) \@ifnextchar^ {\t@@}{\t@@^{}} P(fg)$ and $\alpha_X\colon 1_{P(X)} \@ifnextchar^ {\t@@}{\t@@^{}} P(1_X)$ satisfying: \[ \xymatrix@C=3em{P(f) \ar@{=}[dr] \ar[r]^-{P(f) \alpha_Y} &P(f)P(1_Y) \ar[d]^{\alpha_{f,1_x}} \\ & P(f)} ~~~~~ \xymatrix@C=3em{P(f) \ar@{=}[dr] \ar[r]^-{\alpha_X P(f)} &P(1_X)P(f) \ar[d]^{\alpha_{f,1_x}} \\ & P(f)} ~~~~~ \xymatrix@C=3em{P(h) P(g) P(f) \ar[d]_{P(h) \alpha_{f,g}} \ar[r]^-{\alpha_{g,h} P(f)} & P(gh) P(f) \ar[d]^{\alpha_{f,gh}} \\ P(h) P(fg) \ar[r]_-{\alpha_{h,fg}} & P(fgh)} \] Similarly if $(f \leq g)\colon f \@ifnextchar^ {\t@@}{\t@@^{}} g$ is a 2-cell in $\ensuremath{\mathbb B}\xspace$ then $P(f \leq g)\colon P(f) \@ifnextchar^ {\t@@}{\t@@^{}} P(g)$ must be a 2-cell (or transformation) in ${\sf SRest}$. This assignment must preserve the horizontal (1-cell) and vertical (2-cell) composition. Preserving the 2-cell composition is the requirement that $P(f \leq g)P(g \leq h) = P(f \leq h)$ while preserving the 1-cell composition means \[ \xymatrix{P(g);P(f) \ar[d]_{P(g\leq h);P(f\leq k)} \ar[r]^-{\alpha_{f,g}} & P(fg) \ar[d]^{P(fg \leq hk)} \\ P(h);P(k) \ar[r]_-{\alpha_{h,k}} & P(hk)} \] where the semicolon emphasizes that we are using the horizontal composition in ${\sf SRest}$. Thus, for $P\colon \ensuremath{\mathbb B}\xspace^{\rm op} \@ifnextchar^ {\t@@}{\t@@^{}} {\sf SRest}$ the inequalities (which are covariant) become transformations between semifunctors whose composites must be preserved. \medskip \begin{proof} We must first show that $u^{*}$ is a semifunctor, that is $u^*(\rst{f}) = \rst{u^*(f)}$ and $u^*(fg) = u^*(f)u^*(g)$. First, suppose $f \colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ is a map in ${\sf p}^{(-1)}(B)$. Then $u^*(\rst{f})$ is defined as the lifting of $\rst{u \ensuremath{\mathsf{p}}(\rst{f})}$. However, $\rst{u^*_X(\rst{f})}$ also makes the triangle precise and $\ensuremath{\mathsf{p}}( \rst{u^*_X(\rst{f})}) = \rst{\ensuremath{\mathsf{p}}(u^*_X(\rst{f}))} = \rst{u\ensuremath{\mathsf{p}}(\rst{f})}$ so $u^*(\rst{f})= \rst{u^*(\rst{f})}$ and, thus $u^*$ preserves the restriction. Next, suppose $f \colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ and $g \colon Y \@ifnextchar^ {\t@@}{\t@@^{}} Z$ are maps of ${\sf p}^{-1}(B)$. Then $u^*(fg)$ is defined as the lifting of $\rst{u\ensuremath{\mathsf{p}}(fg)} =\rst{u\ensuremath{\mathsf{p}}(f)\ensuremath{\mathsf{p}}(g)}$ while $u^*(f)u^*(g)$ is the lifting of $\ensuremath{\mathsf{p}}(u^*(f)u^*(g)) = \ensuremath{\mathsf{p}}(u^*(f)) \ensuremath{\mathsf{p}}(u^*(g)) = \rst{u\ensuremath{\mathsf{p}}(f)}~\rst{u\ensuremath{\mathsf{p}}(g)} = \rst{\rst{u\ensuremath{\mathsf{p}}(f)}u \ensuremath{\mathsf{p}}(g)} = \rst{u\rst{p(f)}\ensuremath{\mathsf{p}}(g)} = \rst{u\ensuremath{\mathsf{p}}(f)\ensuremath{\mathsf{p}}(g)}$ so that they are equal. Thus $u^*$ preserves composition, and is therefore a restriction semifunctor. To show that $(\_)^*$ is a pseudo functor we use the fact that composites of prone maps are unique up to a unique mediating partial isomorphism (combine Lemma \ref{composites-prones} with Lemma \ref{mediating_maps}) and the unit data is provided by the fact that the identity map $1_X$ is prone so there is a mediating map $X \@ifnextchar^ {\t@@}{\t@@^{}} 1_{\ensuremath{\mathsf{p}}(X)}^*(X)$. We must show $\rst{(u\leq v)^{*}_X} = u^*(1_X)$ and that the transformation is natural; that is, $u^*(f) (u\leq v)^{*}_Y = (u\leq v)^{*}_X v^*(f)$ (so $(u \leq v)^*_f$ is the identity). The first requirement is immediate from the preciseness of the triangle defining $(u \leq v)^{*}_X$ as $u^{*}(1_X) = \rst{u^*_X} = \rst{(u \leq v)^{*}_X}$. Suppose for the second that $f\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ in the strand over $B$ then $\ensuremath{\mathsf{p}}(u^*(f) (u\leq v)^{*}_Y) = \rst{u\ensuremath{\mathsf{p}}(f)}~\rst{u} = \rst{u\ensuremath{\mathsf{p}}(f)}$ and $\ensuremath{\mathsf{p}}((u\leq v)_X v^*(f)) = \rst{u} ~\rst{vp(f)} = \rst{\rst{u}v\ensuremath{\mathsf{p}}(f)} = \rst{u\ensuremath{\mathsf{p}}(f)}$ so both arrows sit above $\rst{u\ensuremath{\mathsf{p}}(f)} \in \ensuremath{\mathbb B}\xspace$. As all the components are precise liftings their composites are as well; thus, they are equal. This shows that the transformation is natural as required. The assignment is immediately functorial with respect to the restriction preorder as the components of the transformations lift restriction idempotents which compose. The coherence properties with the associator are similarly straightforward to prove. \end{proof} Recall that in an $r$-split latent fibration $\ensuremath{\mathsf{p}} \colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$, whenever $f$ is total we know that there is a total prone map above it (see Lemma \ref{totals-in-flush-fibration}). If the cleavage is such that, for every $X$, $f^*_X$ is total whenever $f$ is total, then substitution semifunctors along total maps become restriction functors. \begin{proposition} If ${\sf p} \colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a latent fibration with a cleavage which preserves total maps then $f^* \colon {\sf p}^{(-1)}(B) \@ifnextchar^ {\t@@}{\t@@^{}} {\sf p}^{(-1)}(A)$ is a restriction functor whenever $f$ is total. \end{proposition} \begin{proof} We have already shown that $f^*$ is a restriction semifunctor. Since the cleavage of ${\sf p}$ preserves total maps, we have that $f^*_X \colon u^*(X) \@ifnextchar^ {\t@@}{\t@@^{}} X$ is total, when $f$ is. This gives \begin{align*} & f^*(1_X) = f^*(\rst{1_X}) = \rst{f^*(1_X)} = \rst{f^*(1_X)\rst{f^*_X}} \\ & = \rst{f^*(1_X)f^*_X} = \rst{f^*_X} = 1_{f^*(X)} \end{align*} and so $f^*$ is a restriction functor. \end{proof} \section{Types of Latent Fibrations}\label{sec:types} In the previous section we described some properties of latent fibrations. However, it is important to note two things that do \emph{not} hold in an arbitrary latent fibration $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$. Both involve the behaviour of restriction idempotents. The first is that a restriction idempotent in the base need not lift as one might expect: that is, given a restriction idempotent $e\colon A \@ifnextchar^ {\t@@}{\t@@^{}} A$ in $\ensuremath{\mathbb B}\xspace$ and an object $X$ over $A$ in $\ensuremath{\mathbb E}\xspace$, there need not be a prone restriction idempotent $e'\colon X \@ifnextchar^ {\t@@}{\t@@^{}} X$ over $e$. For an example of this, consider the latent fibration of propositions of a restriction category $\ensuremath{\mathbb X}\xspace$ (Example \ref{def:propositions}). Suppose $e\colon A \@ifnextchar^ {\t@@}{\t@@^{}} A$ is a restriction idempotent in $\ensuremath{\mathbb X}\xspace$, and $(A,e')$ is an object in $\O(\ensuremath{\mathbb X}\xspace)$ over $A$. Since the projection in this case simply sends a map to itself, a restriction idempotent over $e$ must be $e$ itself; thus, we would need $e$ to be a map from $(X,e')$ to $(X,e')$ in $\O(\ensuremath{\mathbb X}\xspace)$. But this would mean that $e = e'e$, which requires $e \leq e'$. Thus, for any restriction idempotent $e'$ which is not $\geq$ e, this is not possible. Of course, being a latent fibration, there is \emph{a} lift of $e$: as per Proposition \ref{prop:propositions}, it is the map \[ (X,ee') \@ifnextchar^ {\t@@}{\t@@^{}}^{e} (X,e'), \] it is just that this map is not a restriction idempotent in $\O(\ensuremath{\mathbb X}\xspace)$ (in particular, it is not an endomorphism!) Thus, in this case, there isn't even a restriction idempotent over $e$ (let alone a prone restriction idempotent). As we shall see, however, the ability to lift restriction idempotents to prone restriction idempotents is useful, and is true for most latent fibrations. Thus, in the next section we consider such latent fibrations; we term these \emph{admissible}. The second issue is the behaviour of restriction idempotents in the total category of a latent fibration. In particular, while identities are always prone, restriction idempotents (i.e., ``partial identities'') need not be. To see the problem in general, suppose $e\colon X \@ifnextchar^ {\t@@}{\t@@^{}} X$ is a restriction idempotent in $\ensuremath{\mathbb E}\xspace$, and suppose we have $g\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} X$ in $\ensuremath{\mathbb E}\xspace$ and $h\colon B \@ifnextchar^ {\t@@}{\t@@^{}} A$ in $\ensuremath{\mathbb B}\xspace$ such that $h\ensuremath{\mathsf{p}}(e) = \ensuremath{\mathsf{p}}(g)$: \[ \xymatrix{Y \ar@{..>}[d] \ar[dr]^{g} & ~\ar@{}[drr]|{\textstyle\mapsto}&&A \ar[dr]^{\ensuremath{\mathsf{p}}(g)} \ar[d]_h \\ X \ar[r]_{e} & X && A \ar[r]_{\ensuremath{\mathsf{p}}(e)} & A} \] If $e$ were the identity, we could obviously choose $g$ as the unique lift. However, if $e$ is partial, there is no obvious lift that will make the triangle in $\ensuremath{\mathbb E}\xspace$ commute. However, note that we do have some control given that the commutativity of the bottom triangle tells us something about how defined $g$ is relative to $e$. Thus, if the restriction idempotents in $\ensuremath{\mathbb E}\xspace$ were closely related to those in $\ensuremath{\mathbb B}\xspace$, then we could hope to have a lift. In fact, this is the case in the ``strict'' versions of the simple and codomain fibrations. In these examples, one can check that restriction idempotents are prone (while they are not generally in the ``lax'' versions). Thus, restriction idempotents being prone is clearly an important condition that is true in some latent fibrations but not all. We shall see that this condition is equivalent to $\ensuremath{\mathsf{p}}$ being monic on restriction idempotents, or \emph{separated}. Thus, in Section \ref{sec:separated} we consider the theory and examples of separated latent fibrations. Many examples (including the motivating example of the strict simple fibration) are both admissible and separated. The combination of these conditions has some very useful consequences (for example, see Proposition \ref{prop:hyper_consequences}). Moreover, we will see that the combination of the admissible and separated conditions is equivalent to the restriction semifunctor $\ensuremath{\mathsf{p}}$ being \emph{hyperconnected}; that is, $\ensuremath{\mathsf{p}}$ is a bijection on restriction idempotents. Thus, in the next few sections, we consider the theory and examples of admissible, separated, and hyperconnected latent fibrations. \subsection{Admissible latent fibrations}\label{sec:admissible} \begin{definition} A restriction semifunctor ${\sf p} \colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is {\bf admissible} if for every $X \in \ensuremath{\mathbb E}\xspace$ and every restriction idempotent, $e$ on ${\sf p}(X)$ in the base such that $e\ensuremath{\mathsf{p}}(1_X) = e$, there is a prone restriction idempotent $e^{*}$ on $X$ over $e$, that is with ${\sf p}(e^{*}) = e$. \end{definition} \begin{example} Most latent fibrations are admissible: \begin{enumerate}[(i)] \item The identity $1_\ensuremath{\mathbb X}\xspace\colon \ensuremath{\mathbb X}\xspace\@ifnextchar^ {\t@@}{\t@@^{}}\ensuremath{\mathbb X}\xspace$ is admissible. \item The lax and strict simple fibrations are admissible, as the prone lifting given in Proposition \ref{prop:simpleSlice} is a restriction idempotent. \item The lax and strict codomain fibrations are admissable; for proof, see the result below. \item The product latent fibration is admissible; the prone lifting $(1,e)$ of a restriction idempotent $e$ is again a restriction idempotent. \item The assemblies fibration is admissible; given a restriction idempotent $e\colon X \@ifnextchar^ {\t@@}{\t@@^{}} X$ and an assembly $\phi$ over $X$, it is easy to check that $e$ (with identity tracking map) is prone in the assemblies category. \item The splitting fibration $\split(\ensuremath{\mathbb X}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ is admissible. If we are given an object $(X,e')$ in $\split(\ensuremath{\mathbb X}\xspace)$ and a restriction idempotent $e\colon X \@ifnextchar^ {\t@@}{\t@@^{}} X$ such that $e = e\ensuremath{\mathsf{p}}(1_{(X,e')}) = ee'$, then \[ (X,e') \@ifnextchar^ {\t@@}{\t@@^{}}^{e} (X,e') \] is a well-defined map in $\split(\ensuremath{\mathbb X}\xspace)$, and it is straightforward to check that it is prone (since this fibration is separated, this also follows from Proposition \ref{prop:separated_equivalences}). \item As noted in the introduction to this section, the latent fibration of propositions is \textbf{not} generally admissible. Nor is a discrete fibration in general admissible: the prone arrow above a restriction idempotent is not necessarily an endomorphism unless the point is fixed by the function induced by the idempotent. \end{enumerate} \end{example} \begin{proposition} For any restriction category $\ensuremath{\mathbb X}\xspace$ with latent pullbacks, the latent fibrations $\ensuremath{\mathbb X}\xspace^{\leadsto}$ and $\ensuremath{\mathbb X}\xspace^{\rightarrow}$ are admissible. \end{proposition} \begin{proof} Given a restriction idempotent $e\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} Y$ and a map $f\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ in $X$, we need to define a prone lifting of $e$ to $\ensuremath{\mathbb X}\xspace^{\leadsto}$ which is also a restriction idempotent. We claim that the pair $(\rs{fe}, e)$ does this. Indeed, consider the diagram \[ \xymatrix{C' \ar[dd]_c \ar[drr]^{g'} \ar@{..>}[dr]_{\widetilde{w}} \\ & X \ar[dd]^{f} \ar[r]_{\rst{fe}} & X \ar[dd]^{f} \\ C \ar@{}[ur]|\geq \ar[rrd]^<<<<g|\hole \ar[dr]_w \\ & Y\ar[r]_e & Y} \] Since $g = we$ is precise, by Lemma \ref{Jaws}.iii, $g = w$. Similarly, for the top triangle to be precise, we must have $\widetilde{w} = g'$. It remains to check that $\widetilde{w} = g'$ satisfies the required conditions. Indeed, it gives a lax square since the outer square is lax by assumption: \[ g'f \leq cg = cwe = cw, \] and the top triangle commutes since \[ g'\rs{fe} = \rs{g'fe}g' = \rs{chee} g' = \rs{che}g' = \rs{g'f} = g'\rs{f} = g' \] with the last equality holding since the outer square is assumed precise. Thus $(fe,e)$ is prone, and so $\ensuremath{\mathbb X}\xspace^{\leadsto}$ is admissible; a similar proof shows that $\ensuremath{\mathbb X}\xspace^{\rightarrow}$ is admissible. \end{proof} The definition of admissible only asks for the existence of a prone restriction idempotent over a restriction idempotent in the base. However, by Lemma \ref{mediating_maps}.iii, such a restriction idempotent is unique; thus, one in fact gets a section: \begin{lemma} \label{reflection-fibration} If ${\sf p}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is admissible, then the semilattice map \[ {\sf p}|_{{\cal O}(X)}\colon {\cal O}(X) \@ifnextchar^ {\t@@}{\t@@^{}} \{e \in \O(\ensuremath{\mathsf{p}}(X))\colon e \leq \ensuremath{\mathsf{p}}(1_X)\} \] has a section $(\_)^*$, where $e^*$ is the (unique) prone restriction idempotent above $e$ at $X$. Furthermore, for any $d \in \O(X)$, $d \leq {\sf p}(d)^{*}$, so that the prone arrows are a reflective subposet of ${\cal O}(X)$. \end{lemma} \begin{proof} Note that $(e_1 e_2)^{*} = e_1^{*} e_2^{*}$, as both are prone over $e_1e_2$, and $\ensuremath{\mathsf{p}}(1_X)^{*} = 1_X$, as $1_X$ is trivially prone. Thus, the section is also a semilattice morphism. Moreover, ${\sf p}(d) {\sf p}(d) = {\sf p}(d)$ is a trivial precise triangle with $d {\sf p}(d)^{*} = e$ precise above it: but this implies $d \leq {\sf p}(d)^{*}$ as required. \end{proof} Another useful property of admissible latent fibrations is: \begin{lemma} \label{restriction-monics-in-fibrations} If $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is admissible and $m\colon E' \@ifnextchar^ {\t@@}{\t@@^{}} E$ is prone over a restriction monic, then $m$ is a partial isomorphism. \end{lemma} \begin{proof} If $m\colon E' \@ifnextchar^ {\t@@}{\t@@^{}} E$ is over a restriction monic, then there is a map $r\colon \ensuremath{\mathsf{p}}(E) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathsf{p}}(E')$ such that $r\ensuremath{\mathsf{p}}(m) = \rs{r}$ and $\ensuremath{\mathsf{p}}(m)r = 1_{\ensuremath{\mathsf{p}}(E')}$. By admissibility, there is a prone restriction idempotent $\rs{r}^*\colon E \@ifnextchar^ {\t@@}{\t@@^{}} E$ over $\rs{r}$. Let $r'$ be defined as the unique lift \[ \xymatrix{ E \ar@{..>}[d]_{r'} \ar[dr]^{\rs{r}^*} & ~\ar@{}[drr]|{\textstyle\mapsto}&& \ensuremath{\mathsf{p}}(E) \ar[d]_{r} \ar[dr]^{\rs{r}} & ~ \\ E' \ar[r]_m & E &&\ensuremath{\mathsf{p}}(E') \ar[r]_{\ensuremath{\mathsf{p}}(m)} & \ensuremath{\mathsf{p}}(E) } \] Thus, $r'm = \rs{r}^* = \rs{\rs{r}^*} = \rs{r'}$. Thus, $r'$ satisfies one half of being a partial inverse to $m$; we also need $mr' = \rs{m}$. For this, first, since $\rs{r}^*$ is itself prone, we have some unique $k\colon E' \@ifnextchar^ {\t@@}{\t@@^{}} E$ such that \[ \xymatrix{ E' \ar@{..>}[d]_{k} \ar[dr]^{m} & ~\ar@{}[drr]|{\textstyle\mapsto}&& \ensuremath{\mathsf{p}}(E') \ar[d]_{\ensuremath{\mathsf{p}}(m)} \ar[dr]^{\ensuremath{\mathsf{p}}(m)} & ~ \\ E \ar[r]_{\rs{r}^*} & E && \ensuremath{\mathsf{p}}(E) \ar[r]_{\rs{r}} & \ensuremath{\mathsf{p}}(E) } \] However, by Lemma \ref{Jaws}.ii, $k = m$. Thus, in particular, since the top triangle is precise, we have $m\rs{r}^* = m$, so $mr'm = m\rs{r}^* = m$. Thus we have the triangle \[ \xymatrix{ E' \ar[d]_{mr'} \ar[dr]^{m} & ~ \ar@{}[drr]|{\textstyle\mapsto}&& \ensuremath{\mathsf{p}}(E') \ar[d]_{1} \ar[dr]^{\ensuremath{\mathsf{p}}(m)} & ~ \\ E' \ar[r]_{m} & E && \ensuremath{\mathsf{p}}(E') \ar[r]_{\ensuremath{\mathsf{p}}(m)} & \ensuremath{\mathsf{p}}(E) } \] But $\rs{m}$ also fits precisely into the top triangle in place of $mr'$ (and is over $1$ since $\ensuremath{\mathsf{p}}(m)$ is total), so since $m$ is prone, $mr' = \rs{m}$. Thus $m$ is a partial isomorphism with partial inverse $r'$. \end{proof} \begin{lemma} \label{nearly_connected} If ${\sf p}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is an admissible latent fibration then any mediating partial isomorphism $\alpha$ between two ${\sf p}$-prone maps $f'$ and $f$ over the same map such that $\rst{f} = \rst{{\sf p}(f)}^*$ is itself prone . \end{lemma} \begin{proof} Consider maps $f$ and $f'$ which are both ${\sf p}$-prone above the same map, ${\sf p}(f) = {\sf p}(f')$. Let $\alpha$ be the mediating map between $f$ and $f'$ so in particular ${\sf p}(\alpha) = \rst{{\sf p}(f)}$. Now consider $g\colon C \@ifnextchar^ {\t@@}{\t@@^{}} B$ with ${\sf p}(g) = h \rst{{\sf p}(f)}$. By Lemma \ref{Jaws}.ii, it follows that $h = {\sf p}(g)$. We wish to show that there is a lifting $\widetilde{h}$ so that $\widetilde{h}\alpha = g$ is precise, however, the best we can do is to lift $h$, using the fact that $f$ is prone, making $\widetilde{h}f' = gf$: \[ \xymatrix{C \ar[dr]_g \ar@{..>}[r]^{\widetilde{h}} & B' \ar[d]^{\alpha} \ar[dr]^{f'} & ~\ar@{}[drr]|{\textstyle\mapsto}&&{\sf p}(C) \ar[dr]_{{\sf p}(g)} \ar[r]^{h} & {\sf p}(B) \ar[d]|{{\sf p}(\alpha) =\rst{{\sf p}(f)}} \ar[dr]^{{\sf p}(f)} \\ & B \ar[r]_f & A &&& {\sf p}(B) \ar[r]_{{\sf p}(f)} & {\sf p}(A) } \] Now we observe that $\widetilde{h} \alpha$ sits over ${\sf p}(g)$ as ${\sf p}(\widetilde{h} \alpha) = {\sf p}(\widetilde{h}){\sf p}(\alpha) = h \rst{{\sf p}(f)} = {\sf p}(g)$; furthermore, $\widetilde{h} \alpha f = \widetilde{h} f' = g f$ and $\widetilde{h} \alpha \rst{f} = \widetilde{h} \alpha$ so the triangle $(\widetilde{h}\alpha) f = (\widetilde{h} f')$ is precise. However, as $g\rst{f} = g \rst{{\sf p}(f)}^*$ as ${\sf p}(g) ={\sf p}(g) \rst{{\sf p}(f)}$, the triangle $g f = (\widetilde{h} f')$ is precise. So we may conclude that $g = \widetilde{h}\alpha$ and, as $\widetilde{h}\rst{\alpha} = \widetilde{h}\rst{f'}$, it is precise. The triangle is, furthermore, unique as $\alpha$ is a partial isomorphism, and thus $\alpha$ is prone. \end{proof} \begin{proposition} Admissible latent fibrations are closed under composition and pullback. \end{proposition} \begin{proof} This follows from Lemma \ref{lemma:prone_with_comp_functors} and Lemma \ref{lemma:pullback_prone}. \end{proof} Perhaps the most useful property of admissible latent fibrations is that they can be $r$-split; we prove this in Proposition \ref{splitting-latent-fibration}. \subsection{Separated latent fibrations}\label{sec:separated} We now consider a different property a restriction semifunctor can have. \begin{definition} A restriction semifunctor $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is said to be \textbf{separated} if for any object $X \in \ensuremath{\mathbb E}\xspace$ and restriction idempotents $e,e'$ on $X$, if $\ensuremath{\mathsf{p}}(e) = \ensuremath{\mathsf{p}}(e')$ then $e = e'$. \end{definition} \begin{example}\label{separated_examples} \begin{enumerate}[(i)] \item The identity fibration $1_\ensuremath{\mathbb X}\xspace\colon \ensuremath{\mathbb X}\xspace\@ifnextchar^ {\t@@}{\t@@^{}}\ensuremath{\mathbb X}\xspace$ is separated. \item The strict simple fibration $\ensuremath{\mathbb X}\xspace[\ensuremath{\mathbb X}\xspace]$ is separated: since the restriction of a map in the total category is entirely determined by its restriction in the base, the functor $\partial\colon \ensuremath{\mathbb X}\xspace[\ensuremath{\mathbb X}\xspace]\@ifnextchar^ {\t@@}{\t@@^{}}\ensuremath{\mathbb X}\xspace$ is separated. \item Conversely, in general the lax simple fibration $\ensuremath{\mathbb X}\xspace(\ensuremath{\mathbb X}\xspace)$ is not separated, as the inequality allowed in the definition of an arrow permits multiple possible restriction idempotents over a fixed one in the base. Thus, this is an example of a latent fibration which is admissible but not separated. \item For any restriction category $\ensuremath{\mathbb X}\xspace$ with latent pullbacks, $\ensuremath{\mathbb X}\xspace^{\rightarrow} \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ is separated: if \[ \xymatrix{ X \ar[r]^{e'} \ar[d]_{f} & X \ar[d]^{f} \\ Y \ar[r]_{e} & Y} \] is a restriction idempotent in $\ensuremath{\mathbb X}\xspace^{\rightarrow}$, then since the square must strictly commute and be precise, we have \[ e' = \rs{e'} = \rs{e'\rs{f}} = \rs{e'f} = \rs{fe}. \] Thus, the restriction idempotent $(e',e)$ is entirely determined by $e$, and so the functor $\partial\colon \ensuremath{\mathbb X}\xspace^{\rightarrow} \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ is a separated. \item By lemma \ref{latent-pullbacks}.ii, one may be tempted to think that $\ensuremath{\mathbb X}\xspace^{\leadsto}$ is separated; however, for this to be the case partially inverting the top and bottom arrows of latent pullback would have to produce a semi-precise square: this is not the case in general. \item The propositions fibration is clearly separated, as in this case $\ensuremath{\mathsf{p}}(e) = e$. Thus, this is an example of a latent fibration which is separated but not admissible. \item For assemblies, notice that all restriction idempotents $e \in \ensuremath{\mathbb X}\xspace$ are tracked by identity realizers and so every restriction idempotent is tracked. This means that the restriction idempotents in ${\sf Asm}(F)$ are the same as those in $\ensuremath{\mathbb X}\xspace$ making $\ensuremath{\mathsf{p}}$ separated. \item The splitting fibration $\split(\ensuremath{\mathbb X}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ is clearly separated, as in this case $\ensuremath{\mathsf{p}}(e) = e$. Similarly, a discrete fibration is always separated. \end{enumerate} \end{example} An immediate property of a separated semifunctor is the following: \begin{lemma}\label{lemma:separated_reflects_total} If $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is separated, then $\ensuremath{\mathsf{p}}(f)$ total implies $f$ is total. \end{lemma} \begin{proof} If we have such an $f$, then we also have \[ 1_{\ensuremath{\mathsf{p}}(X)} = \rs{\ensuremath{\mathsf{p}}(f)} = \ensuremath{\mathsf{p}}(\rs{f}) = \ensuremath{\mathsf{p}}(1_X)\ensuremath{\mathsf{p}}(\rs{f}) \leq \ensuremath{\mathsf{p}}(1_X), \] but we always have the opposite inequality, so $\ensuremath{\mathsf{p}}(1_X) = 1_{\ensuremath{\mathsf{p}}(X)}$. Then $\ensuremath{\mathsf{p}}(\rs{f}) = 1_{\ensuremath{\mathsf{p}}(X)} = \ensuremath{\mathsf{p}}(1_X)$, so since $p$ is separated, $\rs{f} = 1_X$. Thus $f$ is total. \end{proof} More importantly, however, being separated is precisely the right condition to assure that all restriction idempotents (and, more generally, partial ismorphisms) are prone. \begin{proposition}\label{prop:separated_equivalences} For any restriction semifunctor $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$, the following are equivalent: \begin{enumerate}[(i)] \item $\ensuremath{\mathsf{p}}$ is separated; \item all partial isomorphisms in $\ensuremath{\mathbb E}\xspace$ are $\ensuremath{\mathsf{p}}$-prone; \item all restriction idempotents in $\ensuremath{\mathbb E}\xspace$ are $\ensuremath{\mathsf{p}}$-prone. \end{enumerate} \end{proposition} \begin{proof} For (i) $\Rightarrow$ (ii), suppose that $\ensuremath{\mathsf{p}}$ is separated. Suppose we have a partial isomorphism $f\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ (with partial inverse $f^{(-1)}$) and a map $g\colon Z \@ifnextchar^ {\t@@}{\t@@^{}} Y$ such that $h\ensuremath{\mathsf{p}}(f) = \ensuremath{\mathsf{p}}(g)$ is precise: \[ \xymatrix{Z \ar@{..>}[d]^{} \ar[dr]^{g} & ~\ar@{}[drr]|{\textstyle\mapsto}&& \ensuremath{\mathsf{p}}(Z) \ar[d]_h \ar[dr]^{\ensuremath{\mathsf{p}}(g)} & ~ \\ X \ar[r]_{f} & Y && \ensuremath{\mathsf{p}}(X) \ar[r]_{\ensuremath{\mathsf{p}}(f)} & \ensuremath{\mathsf{p}}(Y)} \] We want to show that $gf^{(-1)}$ precisely fits in the left triangle (it is the only possible choice by Lemma \ref{Jaws}.iii). First, note that \[ \ensuremath{\mathsf{p}}(\rs{gf^{(-1)}}) = \rs{h\ensuremath{\mathsf{p}}(f)\rs{\ensuremath{\mathsf{p}}(f^{(-1)})}} = \rs{h\ensuremath{\mathsf{p}}(f)} = \rs{\ensuremath{\mathsf{p}}(g)} = \ensuremath{\mathsf{p}}(\rs{g}) \] so since $\ensuremath{\mathsf{p}}$ is separated, $\rs{gf^{(-1)}} = \rs{g}$. Then the triangle commutes since \[ gf^{(-1)}f = g\rs{f^{(-1)}} = \rs{gf^{(-1)}}g = \rs{g}g, \] and is precise. Moreover, $gf^{(-1)}$ is over $h$ since \[ \ensuremath{\mathsf{p}}(gf^{(-1)}) = h\ensuremath{\mathsf{p}}(f)\ensuremath{\mathsf{p}}(f^{(-1)} = h\rs{\ensuremath{\mathsf{p}}(f)} = h \] since $h\ensuremath{\mathsf{p}}(f) = \ensuremath{\mathsf{p}}(g)$ is precise. \\ (ii) $\Rightarrow$ (iii) is immediate, since restriction idempotents are partial isomorphisms.\\ For (iii) $\Rightarrow$ (i), if all restriction idempotents are prone, then $\ensuremath{\mathsf{p}}$ is separated by Lemma \ref{mediating_maps}.iii. \end{proof} When a restriction semifunctor is separated, the prone condition is a bit simpler to check: \begin{lemma}\label{lemma:prone_for_separated} If $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is separated, then an arrow $f\colon X \@ifnextchar^ {\t@@}{\t@@^{}} X'$ in $\ensuremath{\mathbb E}\xspace$ is prone if and only if for any $g\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} X'$ and any $h\colon p(Y) \@ifnextchar^ {\t@@}{\t@@^{}} p(X)$ such that $hp(f) = p(g)$ is precise, there is a unique $h'\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} X$ such that $h'f = g$. \end{lemma} \begin{proof} The only change in the ordinary definition is the requirement that the triangle in $\ensuremath{\mathbb E}\xspace$ be precise. However, if $h'f = g$, then $\rs{p(h')} = \rs{h} = \rs{p(g)}$, so since $\ensuremath{\mathsf{p}}$ is separated, $\rs{h'} = \rs{g}$ is guaranteed. \end{proof} These functors also have a useful factorization property of prone arrows: \begin{lemma}\label{lemma:left_factor_separated} Suppose $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is separated. If \[ \xymatrix{X \ar[dr]^{f} \ar[d]_{f_1} & \\ X' \ar[r]_{f_2} & X''} \] is a precise triangle in $\ensuremath{\mathbb E}\xspace$ and $f$ and $f_2$ are prone, then $f_1$ is prone. \end{lemma} \begin{proof} Throughout this proof we will use the fact that when looking at whether an arrow is prone for a separated semifunctor, we don't need to consider precise-ness of the top triangle (see Lemma \ref{lemma:prone_for_separated}). Thus, we need to show that for $g\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} X'$ with $h\colon p(Y) \@ifnextchar^ {\t@@}{\t@@^{}} p(X)$ such that $hp(f_1) = p(g)$ is precise, we have a unique fill-in $h'\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} X$. Consider this diagram, extended to the right by post-composing with $f_2$: \[ \xymatrix{Y \ar@{..>}[d]_{h'} \ar[dr]^{g} \ar@/^1pc/[drr]^{gf_2} & &\ar@{}[drr]|{\textstyle\mapsto}&& \ensuremath{\mathsf{p}}(Y) \ar[dr]^{\ensuremath{\mathsf{p}}(g)} \ar[d]_{h} \ar@/^1pc/[drr]^{\ensuremath{\mathsf{p}}(gf_2)} & & \\ X \ar[r]_{f_1} & X' \ar[r]_{f_2} & X'' &&\ \ensuremath{\mathsf{p}}(X) \ar[r]_{\ensuremath{\mathsf{p}}(f_1)} & \ensuremath{\mathsf{p}}(X') \ar[r]_{\ensuremath{\mathsf{p}}(f_2)} & \ensuremath{\mathsf{p}}(X')} \] Now, the outer triangle in the base is also precise since \[ \rs{\ensuremath{\mathsf{p}}(gf_2)} = \rs{h\ensuremath{\mathsf{p}}(f_1)\rs{\ensuremath{\mathsf{p}}(f_2)}} = \rs{h\ensuremath{\mathsf{p}}(f_1)} = \rs{\ensuremath{\mathsf{p}}(g)} = \rs{h} \] where we have used the fact that $f = f_1f_2$ is precise to give $f_1\rs{f_2} = f_1$. Thus, since $f = f_1f_2$ is prone, there is a unique $h'\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} X$ such that $\rs{h'} = \rs{gf_2}$ and $h'f_1f_2 = gf_2$. We need to show that $h'$ makes the inner triangle commute; that is, we need $h'f = g$. However, $\ensuremath{\mathsf{p}}(h'f_1) = h\ensuremath{\mathsf{p}}(f_1) = \ensuremath{\mathsf{p}}(g)$, so since we have $h'f_1f_2 = gf_2$ and $f_2$ is prone, $h'f = g$. Uniqueness of $h'$ then follows by uniqueness of factorizations for $f_1f_2$. \end{proof} \begin{corollary}\label{cor:res_monic_prone} If $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is separated, then any restriction monic in $\ensuremath{\mathbb E}\xspace$ is prone. \end{corollary} \begin{proof} Suppose we have a restriction monic $m\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ in $\ensuremath{\mathbb E}\xspace$. Then by assumption there is a restriction retraction $r\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} X$ so that $mr = 1$; thus, \[ \xymatrix{X \ar[dr]^{1} \ar[d]_{m} & \\ Y \ar[r]_{r} & X} \] is a precise triangle. By Lemma \ref{restriction-retraction}, identities and restriction retractions are always prone, so by the previous result, $m$ is also prone. \end{proof} Separated latent fibrations have the useful property that one can (latently) pullback a prone arrow along a subvertical arrow: \begin{lemma}\label{lemma:pullback_prone_subvertical} Suppose $\ensuremath{\mathsf{p}} \colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a separated latent fibration. Then every cospan \[ B \@ifnextchar^ {\t@@}{\t@@^{}}^{c} C \@ifnextchar^ {\t@@left}{\t@@left^{}}^{v} A \] with $v$ subvertical and $c$ prone has a corresponding latent pullback: \[ \xymatrix{ U \ar[r]^{c'} \ar[r] \ar[d]_{w} & A \ar[d]^{v} \\ B \ar[r]_{c} \ar[r] & C } \] where $w$ is subvertical and $c'$ is prone. \end{lemma} \begin{proof} Since $v$ is subvertical, $p(A) = p(C)$, and so the composite \[ \ensuremath{\mathsf{p}}(B) \@ifnextchar^ {\t@@}{\t@@^{}}^{\ensuremath{\mathsf{p}}(c)} \ensuremath{\mathsf{p}}(A) \@ifnextchar^ {\t@@}{\t@@^{}}^{\ensuremath{\mathsf{p}}(v)} \ensuremath{\mathsf{p}}(C) \] is well-defined; we take $c'\colon U \@ifnextchar^ {\t@@}{\t@@^{}} A$ to be a prone lift of this map to $A$. Then let $w\colon U \@ifnextchar^ {\t@@}{\t@@^{}} B$ be the unique arrow from the universal property of $c$: \[ \xymatrix{ U \ar@{..>}[d]_{w} \ar[dr]^{c'v} & ~\ar@{}[drr]|{\textstyle\mapsto}&&& \ensuremath{\mathsf{p}}(U) \ar[dr]^{\ensuremath{\mathsf{p}}(c'v)} \ar[d]_{\rs{\ensuremath{\mathsf{p}}(c)\ensuremath{\mathsf{p}}(v)}} \\ B \ar[r]_{c} & C &&&\ensuremath{\mathsf{p}}(B) \ar[r]_{\ensuremath{\mathsf{p}}(c)} & \ensuremath{\mathsf{p}}(C)} \] (Note that the triangle in $\ensuremath{\mathbb B}\xspace$ is precise since $\rs{\ensuremath{\mathsf{p}}(c'v)} = \rs{\ensuremath{\mathsf{p}}(c)\ensuremath{\mathsf{p}}(v)\ensuremath{\mathsf{p}}(v)} = \rs{\ensuremath{\mathsf{p}}(c)\ensuremath{\mathsf{p}}(v)}$). Thus, $w$ as above exists, and has the property that $\rs{w} = \rs{c'v}$. Moreover, since $\rs{\ensuremath{\mathsf{p}}(w)} = \rs{\ensuremath{\mathsf{p}}(c)\ensuremath{\mathsf{p}}(v)} = \rs{\ensuremath{\mathsf{p}}(c')}$, $\rs{w} = \rs{c'}$ as $\ensuremath{\mathsf{p}}$ is separated. Thus, the square is a potential candidate to be a latent pullback. We will check that the original definition (see \cite[pg. 460]{journal:guo-range-join}) of latent pullback holds, as it involves one fewer piece of data. So, assume we have $a\colon X \@ifnextchar^ {\t@@}{\t@@^{}} A$ and $b\colon X \@ifnextchar^ {\t@@}{\t@@^{}} B$ so that $av = bc$. We need to find a unique $b'$ as indicated: \[ \xymatrix{X \ar@/^0.7pc/[drr]^{a}_{\leq} \ar@/^-0.7pc/[ddr]_{b}^{\geq} \ar@{-->}[dr]|{b'} \\ & U \ar[d]_w \ar[r]_{c'} & A \ar[d]^{v} \\ & B \ar[r]_c & C} \] with the additional properties that $\rs{b'} = \rs{bc} = \rs{av}$ and $b'\rs{w} = b'\rs{c'} = b'$. Let $b'$ be the unique arrow arising from the universal property of $c'$: \[ \xymatrix{ X \ar@{..>}[d]_{b'} \ar[dr]^{a\rs{v}} & ~\ar@{}[drr]|{\textstyle\mapsto}&&& \ensuremath{\mathsf{p}}(X) \ar[dr]^{\ensuremath{\mathsf{p}}(a\rs{v})} \ar[d]_{\ensuremath{\mathsf{p}}(b)\ensuremath{\mathsf{p}}(w)} \\ U \ar[r]_{c'} & A &&& \ensuremath{\mathsf{p}}(U) \ar[r]_{\ensuremath{\mathsf{p}}(c)\ensuremath{\mathsf{p}}(v)} & \ensuremath{\mathsf{p}}(C)} \] Note that $\ensuremath{\mathsf{p}}(b)\ensuremath{\mathsf{p}}(w)$ is well-defined since $w$ is subvertical. The triangle in $\ensuremath{\mathbb B}\xspace$ commutes since \[ \ensuremath{\mathsf{p}}(b)\ensuremath{\mathsf{p}}(w)\ensuremath{\mathsf{p}}(c)\ensuremath{\mathsf{p}}(v) = \ensuremath{\mathsf{p}}(b)\rs{\ensuremath{\mathsf{p}}(c)\ensuremath{\mathsf{p}}(v)}\ensuremath{\mathsf{p}}(c)\ensuremath{\mathsf{p}}(v) = \ensuremath{\mathsf{p}}(b)\ensuremath{\mathsf{p}}(c)\ensuremath{\mathsf{p}}(v) = \ensuremath{\mathsf{p}}(a)\ensuremath{\mathsf{p}}(v)\ensuremath{\mathsf{p}}(v) = \ensuremath{\mathsf{p}}(a)\ensuremath{\mathsf{p}}(v) = \ensuremath{\mathsf{p}}(a\rs{v}) \] and is precise since \[ \rs{\ensuremath{\mathsf{p}}(b)\ensuremath{\mathsf{p}}(w)} = \rs{\ensuremath{\mathsf{p}}(b)\rs{\ensuremath{\mathsf{p}}(c)\ensuremath{\mathsf{p}}(v)}} = \rs{\ensuremath{\mathsf{p}}(b)\ensuremath{\mathsf{p}}(c)\ensuremath{\mathsf{p}}(v)} = \rs{\ensuremath{\mathsf{p}}(a)\ensuremath{\mathsf{p}}(v)\ensuremath{\mathsf{p}}(v)} = \rs{\ensuremath{\mathsf{p}}(a)\ensuremath{\mathsf{p}}(v)}. \] Thus, such a $b'$ does exist, and has the property that $\rs{b'} = \rs{a\rs{v}} = \rs{av}$. It immediately satisfies many of the required properties: \[ b'c' = a\rs{v} \leq a, \] and \[ \rs{b'} = \rs{av} \] and \[ b'\rs{c'} = \rs{b'c'}b' = \rs{av}b' = \rs{b'}b' = b'. \] However, checking that $b'w \leq b$, that is, $\rs{b'w}b = b'w$, takes a bit more work. For this, we will use the cancellation property of the prone map $c$: if $\rs{b'w}bc = b'wc$, $\ensuremath{\mathsf{p}}(\rs{b'w}b) = \ensuremath{\mathsf{p}}(b'w)$, and the relevant triangle in the base is precise, then $\rs{b'w}b = b'w$ follows. For the first requirement, $b'wc = b'c'v = a\rs{v}v = av$ while $\rs{b'w}bc = \rs{b'w}av$. However, we also have \[ \ensuremath{\mathsf{p}}(\rs{b'w}) = \rs{\ensuremath{\mathsf{p}}(b')\ensuremath{\mathsf{p}}(w)} = \rs{\ensuremath{\mathsf{p}}(b)\ensuremath{\mathsf{p}}(w)\ensuremath{\mathsf{p}}(w)} = \rs{\ensuremath{\mathsf{p}}(b)\ensuremath{\mathsf{p}}(w)} = \rs{\ensuremath{\mathsf{p}}(a)\ensuremath{\mathsf{p}}(v)} \] by above; thus since $\ensuremath{\mathsf{p}}$ is separated we get $\rs{b'w}av = \rs{av}av = av$. For the second requirement, \[ \ensuremath{\mathsf{p}}(\rs{b'w}b) = \rs{\ensuremath{\mathsf{p}}(b)\ensuremath{\mathsf{p}}(w)} \ensuremath{\mathsf{p}}(b) = \ensuremath{\mathsf{p}}(b)\rs{p(w)} = \ensuremath{\mathsf{p}}(b)\ensuremath{\mathsf{p}}(w)\ensuremath{\mathsf{p}}(w) = \ensuremath{\mathsf{p}}(b'w). \] For preciseness of the triangle in the base, \[ \rs{\ensuremath{\mathsf{p}}(b'w)} = \rs{\ensuremath{\mathsf{p}}(b)\ensuremath{\mathsf{p}}(w)} = \rs{\ensuremath{\mathsf{p}}(a)\ensuremath{\mathsf{p}}(v)} \] by above. Thus, by proneness of $c$, $\rs{b'w}b = b'w$, and so $b'w \leq b$. Thus $b'$ satisfies all the required properties; it remains to show that it is unique. Thus, suppose we have \[ \xymatrix{X \ar@/^0.7pc/[drr]^{a}_{\leq} \ar@/^-0.7pc/[ddr]_{b}^{\geq} \ar[dr]|{d} \\ & U \ar[d]_w \ar[r]_{c'} & A \ar[d]^{v} \\ & B \ar[r]_c & C} \] with $\rs{d} = \rs{bc} = \rs{av}$ and $d = d\rs{c'} = d\rs{w}$. We want to show that $d = b'$; we will use the universal property of $c'$. Thus, we need to show that $dc' = b'c'$, $\ensuremath{\mathsf{p}}(d) = \ensuremath{\mathsf{p}}(b')$, and the relevant triangle in the base is precise. For the first requirement, since $dc' \leq a$, \[ dc' = \rs{dc'}a = \rs{d}a = \rs{av}a = a\rs{v} = b'c'. \] For the second requirement, since $d = d\rs{w}$, and $dw \leq b$, \[ \ensuremath{\mathsf{p}}(d) = \ensuremath{\mathsf{p}}(dw)\ensuremath{\mathsf{p}}(w) = \ensuremath{\mathsf{p}}(\rs{dw}b)\ensuremath{\mathsf{p}}(w) = \ensuremath{\mathsf{p}}(\rs{bc}b)\ensuremath{\mathsf{p}}(w) = \ensuremath{\mathsf{p}}(b\rs{c})\ensuremath{\mathsf{p}}(w) = \ensuremath{\mathsf{p}}(b)\ensuremath{\mathsf{p}}(w) = \ensuremath{\mathsf{p}}(b'), \] with the second-last equality since $w\rs{c} = w$. Finally, since $\rs{d} = \rs{av}$, the relevant triangle in $\ensuremath{\mathbb E}\xspace$ and hence in $\ensuremath{\mathbb B}\xspace$ is precise. Thus, proneness of $c'$ gives $d = b'$, and we have completed the proof that the square is a latent pullback. \end{proof} \begin{proposition} Separated latent fibrations are closed under composition and pullback. \end{proposition} \begin{proof} This follows from Lemma \ref{lemma:prone_with_comp_functors} and Lemma \ref{lemma:pullback_prone}. \end{proof} \subsection{Hyperconnected latent fibrations}\label{sec:connected} \begin{definition} A semifunctor $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is said to be \textbf{hyperconnected} (and $\ensuremath{\mathsf{p}}$ is said to be a \textbf{hyperconnection}) if for any $X \in \ensuremath{\mathbb E}\xspace$, the map \[ \ensuremath{\mathsf{p}}_{|\O(X)}\colon \O(X) \@ifnextchar^ {\t@@}{\t@@^{}} \{d \in \O(\ensuremath{\mathsf{p}}(X)): d\ensuremath{\mathsf{p}}(1_X) = d\} \] sending $e \in \O(X)$ to $\ensuremath{\mathsf{p}}(e)$ is an isomorphism. We say that a latent fibration $\ensuremath{\mathsf{p}}$ is a \textbf{hyperfibration} if $\ensuremath{\mathsf{p}}$ is a hyperconnection. \end{definition} This is a generalization of the definition of hyperconnection (see \cite[pg. 39]{journal:rcats-enriched}) to restriction \emph{semi}functors. Before looking at examples, it will be useful to know the following: \begin{proposition}\label{prop:hyperconnected_equivalence} A semifunctor $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is hyperconnected if and only if it is separated and admissible. \end{proposition} \begin{proof} If $\ensuremath{\mathsf{p}}$ is hyperconnected, then it is monic on restriction idempotents, and so is separated. Moreover, if $e\colon \ensuremath{\mathsf{p}}(X) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathsf{p}}(X)$ is a restriction idempotent in $\ensuremath{\mathbb B}\xspace$, then there is a restriction idempotent $e'\colon X \@ifnextchar^ {\t@@}{\t@@^{}} X$ over it (since $\ensuremath{\mathsf{p}}$ is hyperconnected), and this is prone by Proposition \ref{prop:separated_equivalences}. Conversely, if $\ensuremath{\mathsf{p}}$ is separated and admissible, then for any $X$, the map \[ \ensuremath{\mathsf{p}}_{|\O(X)}\colon \O(X) \@ifnextchar^ {\t@@}{\t@@^{}} \{d \in \O(\ensuremath{\mathsf{p}}(X)): d\ensuremath{\mathsf{p}}(1_X) = e\} \] is monic and has a section (by Lemma \ref{reflection-fibration}) thus is an isomorphism. \end{proof} Thus, by the results of the previous two sections, we have the following examples (and non-examples) of latent hyperfibrations: \begin{example}\label{connected_examples} \begin{enumerate}[(i)] \item The identity fibration $1_\ensuremath{\mathbb X}\xspace\colon \ensuremath{\mathbb X}\xspace\@ifnextchar^ {\t@@}{\t@@^{}}\ensuremath{\mathbb X}\xspace$ is a hyperfibration. \item The strict simple fibration $\ensuremath{\mathbb X}\xspace[\ensuremath{\mathbb X}\xspace]$ is a hyperfibration, while in general the lax simple fibration $\ensuremath{\mathbb X}\xspace(\ensuremath{\mathbb X}\xspace)$ is not. \item For any restriction category $\ensuremath{\mathbb X}\xspace$ with latent pullbacks, $\ensuremath{\mathbb X}\xspace^{\rightarrow} \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ is a hyperfibration, while in general $\ensuremath{\mathbb X}\xspace^{\leadsto}$ is not. \item The assemblies fibration is a hyperfibration. \item The splitting fibration $\split(\ensuremath{\mathbb X}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ is a hyperfibration. \item The propositions fibration and discrete fibrations are not a hyperfibration (in general) as they are not admissible. \end{enumerate} \end{example} Table \ref{examples-properties} presents an overview of the examples of latent fibrations we have considered with their properties. \begin{table} \begin{center} \begin{tabular}{|r|c|c|c||c|} \hline Restriction semifunctor & Restriction Functor & Admissible & Separated & Hyperfibration \\ \hline $\split(\ensuremath{\mathbb X}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ & $\times$ & \checkmark & \checkmark & \checkmark \\ \hline $\ensuremath{\mathbb X}\xspace \times \ensuremath{\mathbb Y}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ & \checkmark & \checkmark & $\times$ & $\times$ \\ \hline (lax) $\ensuremath{\mathbb X}\xspace(\ensuremath{\mathbb X}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ & \checkmark & \checkmark & $\times$ & $\times$ \\ \hline (strict) $\ensuremath{\mathbb X}\xspace[\ensuremath{\mathbb X}\xspace] \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ & \checkmark & \checkmark & \checkmark & \checkmark \\ \hline (lax) $\ensuremath{\mathbb X}\xspace^\leadsto \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ & \checkmark & \checkmark & $\times$ & $\times$ \\ \hline (strict) $\ensuremath{\mathbb X}\xspace^\rightarrow \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ & \checkmark & \checkmark & \checkmark & \checkmark \\ \hline $\O(\ensuremath{\mathbb X}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ & \checkmark & $\times$ & \checkmark & $\times$ \\ \hline ${\sf Elt}(F) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ & \checkmark & $\times$ & \checkmark & $\times$ \\ \hline ${\sf Asm}(F) \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$ & \checkmark & \checkmark & \checkmark & \checkmark \\ \hline \end{tabular} \end{center} \caption{Properties of latent fibrations\label{examples-properties}} \end{table} Hyperconnections are particularly well-behaved when looking at prone arrows. For a general restriction semifunctor, there can be prone maps over total maps or restriction monics which are themselves neither total nor restriction monic. However, for a hyperconnection this is no longer the case, as {\em all} prone maps above a total map are total, and, similarly, {\em all\/} maps prone above a restriction monic are restriction monics. In addition, any prone maps above a restriction retraction is a restriction retraction: \begin{proposition}\label{prop:hyper_consequences} If $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a hyperconnection, then: \begin{enumerate}[(i)] \item $\ensuremath{\mathsf{p}}$ reflects total maps; \item if $\alpha$ in $\ensuremath{\mathbb E}\xspace$ is prone over a partial isomorphism, then $\alpha$ is itself a partial isomorphism; \item if $s$ in $\ensuremath{\mathbb E}\xspace$ is prone over a restriction monic then $s$ is itself a restriction monic; \item if $r$ in $\ensuremath{\mathbb E}\xspace$ is prone over a restriction retraction, then $r$ is itself a restriction retraction; \item if $\ensuremath{\mathsf{p}}$ is a latent hyperfibration, then the total maps and restriction monics of $\ensuremath{\mathsf{p}}$ each determine a subfibration of $\ensuremath{\mathsf{p}}$. \end{enumerate} \end{proposition} \begin{proof} \begin{enumerate}[(i)] \item We saw in Lemma \ref{lemma:separated_reflects_total} that separated semifunctors reflect total maps. \item By Proposition \ref{prop:hyperconnected_equivalence}, $\ensuremath{\mathsf{p}}$ is admissible, so there is a prone restriction idempotent $e$ over $\widehat{\ensuremath{\mathsf{p}}(\alpha)} = \rs{\ensuremath{\mathsf{p}}(\alpha)^{(-1)}}$. Let $\alpha'\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ be the lift of $\ensuremath{\mathsf{p}}(\alpha)^{(-1)}$: \[ \xymatrix{X \ar[dr]^e \ar@{..>}[d]_{\alpha'} & ~\ar@{}[drr]|{\textstyle\mapsto}&&&\ensuremath{\mathsf{p}}(X) \ar[dr]^{\widehat{\ensuremath{\mathsf{p}}(\alpha)}} \ar[d]_{\ensuremath{\mathsf{p}}(\alpha)^{(-1)}} \\ Y \ar[r]_{\alpha} & X &&& \ensuremath{\mathsf{p}}(Y) \ar[r]_{\ensuremath{\mathsf{p}}(\alpha)} & \ensuremath{\mathsf{p}}(X)} \] Thus $\alpha' \alpha = e = \rs{e} = \rs{\alpha'}$ since the triangle is precise. We want to show that $\alpha'$ is the partial inverse of $\alpha$, and so we also need $\alpha \alpha' = \rs{\alpha}$. By definition, $e$ and $\alpha$ are prone, so by Lemma \ref{lemma:left_factor_separated}, $\alpha'$ is also prone, so by Lemma \ref{composites-prones}, $\alpha\alpha'$ is prone. Thus $\alpha \alpha'$ and $\rs{\alpha}$ are prone over the same map ($\ensuremath{\mathsf{p}}(\rs{\alpha})$), and so by Lemma \ref{mediating_maps}.i, there is a mediating partial isomorphism $\beta\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} Y$ \[ \xymatrix{ Y \ar[r]^{\beta} \ar[dr]_{\rs{\alpha}} & Y \ar[d]^{\alpha \alpha'} \\ & Y} \] with $\rs{\beta} = \rs{\alpha}$ and $\rs{\beta^{(-1)}} = \rs{\alpha \alpha'}$. But then we have \[ \beta = \beta \beta^{(-1)}\beta = \beta \rs{\beta^{(-1)}} = \beta \alpha \alpha' = \rs{\alpha}. \] So $\beta$ is a restriction idempotent, and so its partial inverse $\beta^{(-1)}$ is itself, namely $\rs{\alpha}$. Thus $\alpha \alpha^{(-1)} = \beta^{(-1)} \rs{\alpha} = \rs{\alpha}$, as required. \item Suppose $s\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} X$ is prone over a restriction monic $\ensuremath{\mathsf{p}}(s)$. Then in particular $s$ is prone over a partial isomorphism, so by (ii), it has a partial inverse $r$, so that $sr = \rs{s}$ and $rs = \rs{r}$. But since $\ensuremath{\mathsf{p}}(s)$ is a restriction monic, it is total, so by (i), $s$ is total, and thus $sr = \rs{s} = 1$. Thus $s$ is a restriction monic. \item Similar proof to (iii). \item Restricting the fibration to total maps gives a trivial latent fibration which is a fibration. Prone maps become Cartesian: so, in particular, restriction monics, being partial isomorphisms and therefore prone, are Cartesian. \end{enumerate} \end{proof} Given any admissible latent fibration ${\sf p}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ we may always extract a latent hyperfibration $\widehat{\ensuremath{\mathsf{p}}}\colon \widehat{\ensuremath{\mathbb E}\xspace} \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ by restricting the restriction idempotents to the prone restriction idempotents and the maps to those which pullback prone restrictions to prone restrictions: that is, $f \in \widehat{\ensuremath{\mathbb E}\xspace}$ in case for every prone restriction idempotent, $e$ on the codomain of $f$, $\rst{fe}$ is a prone restriction idempotent (recall from Lemma \ref{reflection-fibration} that the prone restrictions for an admissible semifunctor form a (reflective) subsemilattice). We then have: \begin{proposition} If ${\sf p}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is an admissible latent fibration then \[ \xymatrix{~\widehat{\ensuremath{\mathbb E}\xspace}~ \ar[dr]_{\widehat{\ensuremath{\mathsf{p}}}} \ar@{>->}[rr] && \ensuremath{\mathbb E}\xspace \ar[dl]^{{\sf p}} \\ & \ensuremath{\mathbb B}\xspace} \] commutes, and $\widehat{\ensuremath{\mathsf{p}}}$ is a latent hyperfibration. \end{proposition} \begin{proof} $\widehat{\ensuremath{\mathsf{p}}}$ is clearly still admissible, and by definition, all restriction idempotents in it are prone. Thus, by Proposition \ref{prop:separated_equivalences} and Proposition \ref{prop:hyperconnected_equivalence}, $\widehat{\ensuremath{\mathsf{p}}}$ is a latent hyperfibration. \end{proof} \begin{proposition} Hyperconnected latent fibrations are closed under composition and pullback. \end{proposition} \begin{proof} Follows from Lemma \ref{lemma:prone_with_comp_functors} and Lemma \ref{lemma:pullback_prone}. \end{proof} Another key property of a latent hyperfibration (see Section \ref{sec:dual}) is that one can construct its fibrational dual. \section{\texorpdfstring{${\sf M}$}{M}-category Fibrations} \label{sec:fibrations-of-partial} The purpose of this section is to provide a series of results describing the relation between fibrations of ${\sf M}$-categories and latent fibrations. In particular, we completely characterize, as an ${\sf M}$-category, a certain type of latent fibration which we call $r$-split (see Definition \ref{defn:split}). This allows us to similarly characterize $r$-split admissible latent fibrations and $r$-split latent hyperfibrations. Another key result in this story is that \emph{admissible} latent fibrations are stable under the process of splitting restriction idempotents: this means that any admissible latent fibration can be embedded into an $r$-split latent fibration and hence into the partial map category of an ${\sf M}$-category. It is worth emphasizing that for this we need the additional property of being admissible. Thus, in general, one cannot split the restriction idempotents of a latent fibration to get a latent fibration. In this section we shall use restriction functors rather than semi-functors. In particular, we shall need functors which preserve the total maps in order to access the equivalence between $r$-split restriction categories and ${\sf M}$-categories. \subsection{\texorpdfstring{$r$}{r}-split latent fibrations} In this section we will focus on restriction functors between $r$-split restriction categories, and demand that the fibres of these functors behave well with respect to splitting. \begin{definition}\label{defn:split} Let $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ be a restriction functor. \begin{itemize} \item $\ensuremath{\mathsf{p}}$ is said to be \textbf{well-fibred} if for all $B$ in $\ensuremath{\mathbb B}\xspace$, whenever a restriction idempotent in the fibre\footnote{Note that we reserve the notation $\ensuremath{\mathsf{p}}^{-1}(B)$ for the classical fibre, containing only arrows that are mapped to $1_B$ by $\ensuremath{\mathsf{p}}$, whereas $\ensuremath{\mathsf{p}}^{(-1)}(B)$ is used for the strand containing all arrows that are mapped to a restriction idempotent on $B$.} $\ensuremath{\mathsf{p}}^{-1}(B)$ splits in $\ensuremath{\mathbb E}\xspace$, it also splits in $\ensuremath{\mathsf{p}}^{-1}(B)$. \item $\ensuremath{\mathsf{p}}$ is said to be {\bf $r$-split} in case the restriction categories $\ensuremath{\mathbb E}\xspace$ and $\ensuremath{\mathbb B}\xspace$ are $r$-split, and $\ensuremath{\mathsf{p}}$ is well-fibred. \end{itemize} \end{definition} One reason to ask that the projection functor be well-fibred is the following result: \begin{lemma} \label{totals-in-flush-fibration} If $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a restriction functor in which $\ensuremath{\mathbb E}\xspace$ is $r$-split, then every total map, $f\colon B \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathsf{p}}(E)$, having a prone map $f'\colon E' \@ifnextchar^ {\t@@}{\t@@^{}} E$ above it whose idempotent $\rst{f'}$ splits in $\ensuremath{\mathsf{p}}^{-1}(B)$, has a prone map, which is {\em total}, above $f$ at $E$. \end{lemma} \begin{proof} Suppose $f\colon B \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathsf{p}}(E)$ is total in $\ensuremath{\mathbb B}\xspace$ with an $f'\colon E' \@ifnextchar^ {\t@@}{\t@@^{}} E$ which is prone above it. Note that $\rst{f'} \in \ensuremath{\mathsf{p}}^{-1}(B)$ as $\ensuremath{\mathsf{p}}(\rst{f'}) =\rst{\ensuremath{\mathsf{p}}(f')} = \rst{f} = 1_B$. Thus, since $\ensuremath{\mathsf{p}}$ is well-fibred, there is a splitting $m\colon E'' \@ifnextchar^ {\t@@}{\t@@^{}} E$, $r\colon E' \@ifnextchar^ {\t@@}{\t@@^{}} E''$ of $\rs{f'}$ with both $m$ and $r$ in $\ensuremath{\mathsf{p}}^{-1}(B)$ also. Let $f'' = fm'$; we claim that $f''$ is the required total prone map over $f$. First, since $m \in \ensuremath{\mathsf{p}}^{-1}(B)$, \[ \ensuremath{\mathsf{p}}(f'') = \ensuremath{\mathsf{p}}(m)\ensuremath{\mathsf{p}}(f') = 1_B f = f, \] so $f''$ is over $f$. Next, \[ \rs{f''} = \rs{mf'} = \rs{m\rs{f'}} = \rs{mrm} = \rs{m} = 1_{E''}, \] so $f''$ is total. Finally, we have $f' = \rs{f'}f' = rmf = rf''$, so since $f'$ is prone and $r$ is a restriction retraction, $f''$ is itself prone by Lemma \ref{restriction-retraction}.iii. \end{proof} When $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is an $r$-split latent fibration the total maps form a fibration because, as observed previously, prone maps in a category in which all maps are total, are just Cartesian maps. Thus, we have the important observation: \begin{corollary} \label{totals-in-latent-fibration} The total maps of an $r$-split latent fibration form a fibration. \end{corollary} \subsection{Latent fibrations and \texorpdfstring{${\sf M}$}{M}-categories} Next, it will be helpful to understand precise triangles and prone maps in a partial map category. \begin{lemma}\label{lemma:precise_in_partialMapCat} Suppose that \[ X \@ifnextchar^ {\t@@left}{\t@@left^{}}^{m} X' \@ifnextchar^ {\t@@}{\t@@^{}}^{f} Y, \ X \@ifnextchar^ {\t@@left}{\t@@left^{}}^{m'} X'' \@ifnextchar^ {\t@@}{\t@@^{}}^{f'} Z, \ Y \@ifnextchar^ {\t@@left}{\t@@left^{}}^{n} Y' \@ifnextchar^ {\t@@}{\t@@^{}}^g Z \] are representatives of maps in a partial map category ${\sf Par}(\ensuremath{\mathbb X}\xspace,{\sf M})$. Then the triangle \[ \xymatrix{X \ar[dr]^{(m',f')} \ar[d]_{(m,f)} & \\ Y \ar[r]_{(n,g)} & Z } \] precisely commutes in ${\sf Par}(\ensuremath{\mathbb X}\xspace,{\sf M})$ if and only if there is a map $k\colon X'' \@ifnextchar^ {\t@@}{\t@@^{}} Y'$ such that $kg = f'$ and $(m,f) \cong (m',kn)$. \end{lemma} \begin{proof} First, suppose that the triangle precisely commutes. Then $\rs{(m,f)} \cong \rs{(m',f')}$, so there is an isomorphism $\alpha\colon X'' \@ifnextchar^ {\t@@}{\t@@^{}} X'$ such that $\alpha m = m'$. Let $(am,bg)$ be a representative of $(m,f) \circ (n,g)$: \[ \xymatrix{ & & P \ar[dl]_{a} \ar[dr]^b & & \\ & X' \ar[dl]_m \ar[dr]^f & & Y' \ar[dl]_n \ar[dr]^g & \\ X & & Y & & Z} \] then since $(m,f) \circ (n,g) \cong (m',f')$, there is an isomorphism $\beta\colon X'' \@ifnextchar^ {\t@@}{\t@@^{}} P$ such that $\beta am = m'$ and $\beta bg = f'$. Let $k = \beta b$, so we have $kg = f'$. Moreover, we claim that $\alpha$ witnesses $(m,f) \cong (m',kn)$. Indeed, we have $\alpha m = m'$. We also need $\alpha f = kn = \beta bn = \beta a f$, so it suffices to show $\alpha = \beta a$. But $\alpha m = m' = \beta a m$, so since $m$ is monic, $\alpha = \beta a$, as required. Conversly, suppose that we have such a $k$, and an $\alpha\colon X'' \@ifnextchar^ {\t@@}{\t@@^{}} X$ that witnesses $(m,f) \cong (m',kn)$. Then by definition $\alpha$ witnesses $\rs{(m,f)} \cong \rs{(m',f')}$. Moreover, \[ \xymatrix{ X'' \ar[r]^{k} \ar[d]_{\alpha} & Y' \ar[d]^{n} \\ X' \ar[r]_{f} & Y } \] commutes by assumption, and it is straightforward to check that it is a pullback since $\alpha$ is an isomorphism and $n$ is monic. Thus \[ \xymatrix{ & & X'' \ar[dl]_{\alpha} \ar[dr]^k & & \\ & X' \ar[dl]_m \ar[dr]^f & & Y' \ar[dl]_n \ar[dr]^g & \\ X & & Y & & Z} \] is a representative of $(m,f) \circ (n,g)$, and equals $(m',f')$ by assumption on $\alpha$ and $k$. \end{proof} The following result allows us to build prone maps in a partial map category from Cartesian maps in the original category: \begin{lemma}\label{lemma:prone_in_partialMapCat} Suppose $\ensuremath{\mathsf{p}}\colon (\ensuremath{\mathbb E}\xspace,\ensuremath{\mathsf{M}}_{\ensuremath{\mathbb E}\xspace}) \@ifnextchar^ {\t@@}{\t@@^{}} (\ensuremath{\mathbb B}\xspace,\ensuremath{\mathsf{M}}_{\ensuremath{\mathbb B}\xspace})$ is an $\ensuremath{\mathsf{M}}$-category functor. If $f\colon X' \@ifnextchar^ {\t@@}{\t@@^{}} Y$ is $\ensuremath{\mathsf{p}}$-Cartesian in $\ensuremath{\mathbb E}\xspace$, then for any $m\colon X \@ifnextchar^ {\t@@}{\t@@^{}} X'$ in $\ensuremath{\mathsf{M}}_{\ensuremath{\mathbb E}\xspace}$, (the equivalence class of) $(m,f)$ is ${\sf Par}(\ensuremath{\mathsf{p}})$-prone in the restriction category ${\sf Par}(\ensuremath{\mathbb E}\xspace,\ensuremath{\mathsf{M}}_{\ensuremath{\mathbb E}\xspace})$. \end{lemma} \begin{proof} Suppose $Z \@ifnextchar^ {\t@@left}{\t@@left^{}}^{n} Z' \@ifnextchar^ {\t@@}{\t@@^{}}^{g} Y$ is a map from $Z$ to $Y$: \[ \xymatrix{Z \ar[dr]^{(n,g)} & \\ X \ar[r]_{(m,f)} & Y } \] Then by the previous lemma, to have a precise triangle with $\ensuremath{\mathsf{p}}(m,f)\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ in the base means that there is an $h\colon Z' \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathsf{p}}(X')$ with $h\ensuremath{\mathsf{p}}(g) = \ensuremath{\mathsf{p}}(f)$, with the triangle of the form \[ \xymatrix@C=4em{\ensuremath{\mathsf{p}}(Z) \ar[dr]^{(\ensuremath{\mathsf{p}}(n),\ensuremath{\mathsf{p}}(g))} \ar[d]_{(\ensuremath{\mathsf{p}}(n),h\ensuremath{\mathsf{p}}(m)} & \\ \ensuremath{\mathsf{p}}(X) \ar[r]_{(\ensuremath{\mathsf{p}}(m),\ensuremath{\mathsf{p}}(f))} & \ensuremath{\mathsf{p}}(Y) } \] Then since $f$ is $\ensuremath{\mathsf{p}}$-Cartesian, there is a unique $\tilde{h}\colon Z' \@ifnextchar^ {\t@@}{\t@@^{}} X'$ in $\ensuremath{\mathbb E}\xspace$ such that $\tilde{h}f = g$ and $\ensuremath{\mathsf{p}}(\tilde{h}) = h$. Then by the previous lemma, $(n,\tilde{h}m)$ is a precise fill-in for the first triangle: \[ \xymatrix{Z \ar[dr]^{(n,g)} \ar[d]_{(n,\tilde{h}m)} & \\ X \ar[r]_{(m,f)} & Y } \] and is over $(\ensuremath{\mathsf{p}}(n),\tilde{h}\ensuremath{\mathsf{p}}(m))$ by definition. It remains to show it is unique. If we have another precise fill-in, by the previous lemma we can take it to be of the form $(n,km)$ for some $k\colon Z' \@ifnextchar^ {\t@@}{\t@@^{}} X'$ such that $kf = g$. Since it must be over $(\ensuremath{\mathsf{p}}(n),h\ensuremath{\mathsf{p}}(m))$, we must have some isomorphism $\alpha\colon \ensuremath{\mathsf{p}}(Z') \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathsf{p}}(Z')$ such that $\ensuremath{\mathsf{p}}(n) = \alpha \ensuremath{\mathsf{p}}(n)$ and $\ensuremath{\mathsf{p}}(k)\ensuremath{\mathsf{p}}(m) = \alpha h \ensuremath{\mathsf{p}}(m)$. But since $\ensuremath{\mathsf{p}}(n)$ is monic, the first equality means $\alpha = 1$; then since $\ensuremath{\mathsf{p}}(m)$ is monic the second equality gives $\ensuremath{\mathsf{p}}(k) = h$. Thus we have $kf = g$ and $\ensuremath{\mathsf{p}}(k) = h$, so since $\tidle{h}$ was unique with these properties, $k = \tidle{h}$. So $(n,\tilde{h}m)$ is unique. \end{proof} Thus, having an $\ensuremath{\mathsf{M}}$-category functor $\ensuremath{\mathsf{p}}\colon (\ensuremath{\mathbb E}\xspace,\ensuremath{\mathsf{M}}_{\ensuremath{\mathbb E}\xspace}) \@ifnextchar^ {\t@@}{\t@@^{}} (\ensuremath{\mathbb B}\xspace,\ensuremath{\mathsf{M}}_{\ensuremath{\mathbb B}\xspace})$ being an ordinary fibration is close to ${\sf Par}({\sf p})\colon {\sf Par}(\ensuremath{\mathbb E}\xspace,{\sf M}_{\ensuremath{\mathbb E}\xspace}) \@ifnextchar^ {\t@@}{\t@@^{}} {\sf Par}(\ensuremath{\mathbb B}\xspace,{\sf M}_{\ensuremath{\mathbb B}\xspace})$ being a latent fibration. However, something is missing: if we are given a span \[ \xymatrix{ & Y' \ar[dl]_m \ar[dr]^{f} & \\ Y & & \ensuremath{\mathsf{p}}(X)} \] in ${\sf Par}(\ensuremath{\mathbb B}\xspace,{\sf M}_{\ensuremath{\mathbb B}\xspace})$, then by the above result, we can lift $f$ to a Cartesian arrow; however, there is no reason why there should be an $\ensuremath{\mathsf{M}}_{\ensuremath{\mathbb E}\xspace}$ monic over $m$ in $\ensuremath{\mathbb E}\xspace$. Thus, we make the following definitions. \begin{definition} Let $\ensuremath{\mathsf{p}}\colon (\ensuremath{\mathbb E}\xspace,{\sf M}_{\ensuremath{\mathbb E}\xspace}) \@ifnextchar^ {\t@@}{\t@@^{}} (\ensuremath{\mathbb B}\xspace,{\sf M}_{\ensuremath{\mathbb B}\xspace})$ be an ${\sf M}$-category functor. \begin{itemize} \item $\ensuremath{\mathsf{p}}$ is said to be {\bf ${\sf M}$-plentiful} in case for every object $X \in \ensuremath{\mathbb E}\xspace$ and ${\sf M}_{\ensuremath{\mathbb B}\xspace}$-map $m\colon \ensuremath{\mathsf{p}}(X) \@ifnextchar^ {\t@@}{\t@@^{}} Y$ there is an ${\sf M}_\ensuremath{\mathbb E}\xspace$-map $n\colon X \@ifnextchar^ {\t@@}{\t@@^{}} X'$ in $\ensuremath{\mathbb E}\xspace$ with ${\sf p}(n) = m$. \item $\ensuremath{\mathsf{p}}$ is said to be an \textbf{${\sf M}$-fibration} if it is a fibration (in the ordinary sense) and it is ${\sf M}$-plentiful. \end{itemize} \end{definition} This is enough to get an ($r$-split) latent fibration: \begin{lemma} If $\ensuremath{\mathsf{p}}\colon (\ensuremath{\mathbb E}\xspace,{\sf M}_{\ensuremath{\mathbb E}\xspace}) \@ifnextchar^ {\t@@}{\t@@^{}} (\ensuremath{\mathbb B}\xspace,{\sf M}_{\ensuremath{\mathbb B}\xspace})$ is an ${\sf M}$-fibration then ${\sf Par}({\sf p})\colon {\sf Par}(\ensuremath{\mathbb E}\xspace,{\sf M}_{\ensuremath{\mathbb E}\xspace}) \@ifnextchar^ {\t@@}{\t@@^{}} {\sf Par}(\ensuremath{\mathbb B}\xspace,{\sf M}_{\ensuremath{\mathbb B}\xspace})$ is an $r$-split latent fibration. \end{lemma} \begin{proof} Let $(m,f)\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathsf{p}}(X)$ be a map in ${\sf Par}(\ensuremath{\mathbb B}\xspace,{\sf M}_{\ensuremath{\mathbb B}\xspace})$, i.e., a span: \[ Y \@ifnextchar^ {\t@@left}{\t@@left^{}}^{m} Y' \@ifnextchar^ {\t@@}{\t@@^{}}^f \ensuremath{\mathsf{p}}(X) \] We lift this to \[ Y_0 \@ifnextchar^ {\t@@left}{\t@@left^{}}^n Y_1' \@ifnextchar^ {\t@@}{\t@@^{}}^{f^*} X \] where $f^*$ is a Cartesian arrow above $f$ and $n$ is any ${\sf M}_{\ensuremath{\mathbb E}\xspace}$-map above $m$, the existence of which is guaranteed by the plentiful requirement. Then by Lemma \ref{lemma:prone_in_partialMapCat}, $(n,f^*)$ is prone in ${\sf Par}({\ensuremath{\mathbb T}}\xspace,{\sf M}_{{\ensuremath{\mathbb T}}\xspace})$. We also need to show that ${\sf Par}({\sf p})$ is well-fibred. An idempotent is a span $(m,m)$ where $m\colon X' \@ifnextchar^ {\t@@}{\t@@^{}} X$ is in ${\sf M}_\ensuremath{\mathbb E}\xspace$: its splitting (as an equalizer with the identity) is $(m,1_{X'})$. Now $(m,m)$ could have $(\ensuremath{\mathsf{p}}(m),\ensuremath{\mathsf{p}}(m)) = (1_X,1_X)$ and yet $\ensuremath{\mathsf{p}}(X') \neq \ensuremath{\mathsf{p}}(X)$ although, as both are limits of the trivial equalizer, they must be isomorphic, say by $\gamma\colon \ensuremath{\mathsf{p}}(X') \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathsf{p}}(X)$. But that means there is a Cartesian arrow above $\gamma^{-1}$, $(\gamma^{-1})^{*}\colon X_0 \@ifnextchar^ {\t@@}{\t@@^{}} X'$ which is an isomorphism and has $\ensuremath{\mathsf{p}}(T_0) =\ensuremath{\mathsf{p}}(X)$. This means that $(\gamma^{-1})^{*}m\colon X_0 \@ifnextchar^ {\t@@}{\t@@^{}} X$ is also a splitting and it is now in the fiber over $\ensuremath{\mathsf{p}}(X)$. \end{proof} Conversely we have: \begin{lemma}\label{lemma:split_to_MFibration} If $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is an $r$-split latent fibration, then the induced functor between the total map categories, with their restriction monics, ${\sf Total}(\ensuremath{\mathsf{p}})\colon ({\sf Total}(\ensuremath{\mathbb E}\xspace),{\sf Monic}(\ensuremath{\mathbb E}\xspace)) \@ifnextchar^ {\t@@}{\t@@^{}} ({\sf Total}(\ensuremath{\mathbb B}\xspace),{\sf Monic}(\ensuremath{\mathbb B}\xspace))$, is an ${\sf M}$-fibration. \end{lemma} \begin{proof} The fact that $\ensuremath{\mathsf{p}}$ is a latent fibration ensures that ${\sf Total}(\ensuremath{\mathsf{p}})$ is a fibration as discussed in Corollary \ref{totals-in-latent-fibration}. It remains, therefore, to show that ${\sf Total}(\ensuremath{\mathsf{p}})$ is ${\sf M}$-plentiful. Toward this end let $m\colon \ensuremath{\mathsf{p}}(E) \@ifnextchar^ {\t@@}{\t@@^{}} X$ be a restriction monic in $\ensuremath{\mathbb B}\xspace$, so that by definition there is a restriction retraction $r\colon X \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathsf{p}}(E)$ such that $mr = 1_{\ensuremath{\mathsf{p}}(E)}$ and $rm = \rst{r}$. Let $r'\colon X' \@ifnextchar^ {\t@@}{\t@@^{}} E$ be a prone arrow over it. Then since $\ensuremath{\mathbb E}\xspace$ is $r$-split, we can use Lemma \ref{restriction-retraction}.iv to get that $r'$ is itself a restriction retraction. Thus, there is a restriction monic $m'\colon E \@ifnextchar^ {\t@@}{\t@@^{}} X'$ which is its partial inverse. Since restriction functors preserve partial inverses, we have \[ \ensuremath{\mathsf{p}}(m') = \ensuremath{\mathsf{p}}(r'^{(-1)}) =\ensuremath{\mathsf{p}}(r')^{(-1)} = r^{(-1)} = m, \] showing that $\ensuremath{\mathsf{p}}$ is ${\sf M}$-plentiful. \end{proof} As a consequence we now have: \begin{theorem} $r$-split latent fibrations, in the equivalence between $r$-split restriction categories and ${\sf M}$-categories, correspond precisely to ${\sf M}$-fibrations. \end{theorem} This gives us a further source of examples of latent fibrations. Any category with finite limits $\ensuremath{\mathbb X}\xspace$ is an ${\sf M}$-category with respect to all the monics. The arrow category gives the standard fibration over $\ensuremath{\mathbb X}\xspace$, $\partial\colon \ensuremath{\mathbb X}\xspace^{\rightarrow} \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb X}\xspace$. Then moving to ${\sf Par}(\partial)$ always gives a latent fibration as above any monic $m\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ in $\ensuremath{\mathbb X}\xspace$ and any object $x\colon X' \@ifnextchar^ {\t@@}{\t@@^{}} X$ in $\ensuremath{\mathbb X}\xspace^\rightarrow$ lies the map (which is a square): \[ \xymatrix{X' \ar[d]_x \ar@{=}[r] & X' \ar[d]^{xm} \\ X \ar[r]_m & Y} \] which is clearly monic. Note when $m$ is an isomorphism this is an isomorphism in $\ensuremath{\mathbb X}\xspace^\rightarrow$ and thus this $\partial$ is ${\sf M}$-conservative. This means the functor ${\sf Par}(\partial)$ is ${\sf M}$-plentiful and the ${\sf Par}(\partial)$ is a latent fibration -- and in a few moments we will see, because pullbacks of monics along any map are monic, that this is an \emph{admissible} latent fibration. Furthermore, it is not hard to see that this is actually the full subfibration of ${\sf Par}(\ensuremath{\mathbb X}\xspace,{\sf Monic})^{\leadsto} \@ifnextchar^ {\t@@}{\t@@^{}} {\sf Par}(\ensuremath{\mathbb X}\xspace,{\sf Monic})$ determined by the objects $a\colon A' \@ifnextchar^ {\t@@}{\t@@^{}} A \in \ensuremath{\mathbb E}\xspace$ such that $a$ is total. This means, in particular, it is not hyperconnected. It is worth noting that this is not the only choice of ${\sf M}$-maps possible for the total category $\ensuremath{\mathbb X}\xspace^\rightarrow$, as we could have chosen squares which are pullbacks. These are clearly monics in $\ensuremath{\mathbb X}\xspace^\rightarrow$ and still give an ${\sf M}$-plentiful fibration using the codomain map as the above square for demonstrating ${\sf M}$-plenitude is a pullback. It is not hard then to see that passing to the partial map categories for this fibration gives a subfibration of the strict codomain fibration, ${\sf Par}(\ensuremath{\mathbb X}\xspace,{\sf Monic})^{\rightarrow} \@ifnextchar^ {\t@@}{\t@@^{}} {\sf Par}(\ensuremath{\mathbb X}\xspace,{\sf Monic})$ determined by objects $a$ which are total maps. \subsection{Admissible, separated, and hyperconnected \texorpdfstring{${\sf M}$}{M}-fibrations} In this section we characterize the admissible, separated, and hyperconnected latent fibrations between partial map categories. \begin{definition} An ${\sf M}$-fibration $\ensuremath{\mathsf{q}}\colon ({\ensuremath{\mathbb T}}\xspace,{\sf M}_{{\ensuremath{\mathbb T}}\xspace}) \@ifnextchar^ {\t@@}{\t@@^{}} (\S,{\sf M}_{\S})$ is an \textbf{admissible ${\sf M}$-fibration} if Cartesian maps over ${\sf M}$-maps are themselves ${\sf M}$-maps. \end{definition} We can immediately observe: \begin{lemma} If ${\sf p}\colon (\ensuremath{\mathbb E}\xspace,{\sf M}_{\ensuremath{\mathbb E}\xspace}) \@ifnextchar^ {\t@@}{\t@@^{}} (\ensuremath{\mathbb B}\xspace,{\sf M}_{\ensuremath{\mathbb B}\xspace})$ is an admissible ${\sf M}$-fibration then ${\sf Par}({\sf p})\colon {\sf Par}(\ensuremath{\mathbb E}\xspace,{\sf M}_{\ensuremath{\mathbb E}\xspace}) \@ifnextchar^ {\t@@}{\t@@^{}} {\sf Par}(\ensuremath{\mathbb B}\xspace,{\sf M}_{\ensuremath{\mathbb B}\xspace})$ is an admissible $r$-split latent fibration. \end{lemma} \begin{proof} We must show that there is a prone restriction idempotent above every restriction idempotent. A restriction idempotent as a partial map is a span $(m,m)$. Thus, if we let $m^*$ be a Cartesian map above $m$, by assumption $m^*$ is in ${\sf M}_{\ensuremath{\mathbb E}\xspace}$, and by Lemma \ref{lemma:prone_in_partialMapCat}, $(m^*,m^*)$ is prone over $(m,m)$. \end{proof} Conversely, we also have: \begin{lemma} If $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is an admissible $r$-split latent fibration, then ${\sf Total}(\ensuremath{\mathsf{p}})\colon ({\sf Total}(\ensuremath{\mathbb E}\xspace),{\sf Monic}(\ensuremath{\mathbb E}\xspace)) \@ifnextchar^ {\t@@}{\t@@^{}} ({\sf Total}(\ensuremath{\mathbb B}\xspace),{\sf Monic}(\ensuremath{\mathbb B}\xspace))$ is an admissible ${\sf M}$-fibration. \end{lemma} \begin{proof} We already know by Lemma \ref{lemma:split_to_MFibration} that ${\sf Total}(\ensuremath{\mathsf{p}})$ is an ${\sf M}$-fibration; thus, it remains to show the ${\sf M}$-admissible property. Suppose that $m\colon X' \@ifnextchar^ {\t@@}{\t@@^{}} X$ is a Cartesian map in ${\sf Total}(\ensuremath{\mathbb E}\xspace)$ such that $\ensuremath{\mathsf{p}}(f)$ is a restriction monic in $\ensuremath{\mathbb B}\xspace$. By Lemma \ref{lemma:cart_are_prone}, $m$ is prone in $\ensuremath{\mathbb E}\xspace$. Then by Lemma \ref{restriction-monics-in-fibrations}, $m$ is a partial isomorphism, so there is an $r\colon X \@ifnextchar^ {\t@@}{\t@@^{}} X'$ so that $rm = \rs{r}$ and $mr = \rs{m} = 1$. That is, $m$ is a restriction monic. \end{proof} \begin{theorem} Admissible $r$-split latent fibrations, in the equivalence between $r$-split restriction categories and ${\sf M}$-categories, correspond precisely to admissible ${\sf M}$-fibrations. \end{theorem} We next turn to characterizing separated latent fibrations between partial map categories. \begin{definition} An ${\sf M}$-fibration $\ensuremath{\mathsf{q}}\colon ({\ensuremath{\mathbb T}}\xspace,{\sf M}_{{\ensuremath{\mathbb T}}\xspace}) \@ifnextchar^ {\t@@}{\t@@^{}} (\S,{\sf M}_{\S})$ is a \textbf{separated ${\sf M}$-fibration} if all ${\sf M}_{{\ensuremath{\mathbb T}}\xspace}$-maps are Cartesian. \end{definition} \begin{lemma} If ${\sf q}\colon ({\ensuremath{\mathbb T}}\xspace,{\sf M}_{{\ensuremath{\mathbb T}}\xspace}) \@ifnextchar^ {\t@@}{\t@@^{}} (\S,{\sf M}_{\S})$ is a separated ${\sf M}$-fibration then ${\sf Par}({\sf q})\colon {\sf Par}({\ensuremath{\mathbb T}}\xspace,{\sf M}_{{\ensuremath{\mathbb T}}\xspace}) \@ifnextchar^ {\t@@}{\t@@^{}} {\sf Par}(\S,{\sf M}_{\S})$ is a separated $r$-split latent fibration. \end{lemma} \begin{proof} By Proposition \ref{prop:separated_equivalences}, it suffices to show that restriction idempotents are prone, so suppose that $(m,m)$ is a restriction idempotent in ${\sf Par}({\ensuremath{\mathbb T}}\xspace,{\sf M}_{{\ensuremath{\mathbb T}}\xspace})$. Then by assumption $m$ is Cartesian, so by Lemma \ref{lemma:prone_in_partialMapCat}, $(m,m)$ is prone. \end{proof} \begin{lemma} If $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a separated $r$-split latent fibration, then ${\sf Total}(\ensuremath{\mathsf{p}})\colon ({\sf Total}(\ensuremath{\mathbb E}\xspace),{\sf Monic}(\ensuremath{\mathbb E}\xspace)) \@ifnextchar^ {\t@@}{\t@@^{}} ({\sf Total}(\ensuremath{\mathbb B}\xspace),{\sf Monic}(\ensuremath{\mathbb B}\xspace))$ is a separated ${\sf M}$-fibration. \end{lemma} \begin{proof} Suppose $m\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ is a restriction monic in $\ensuremath{\mathbb E}\xspace$. Then by Corollary \ref{cor:res_monic_prone}, $m$ is prone, and by definition it is total, hence it is Cartesian in ${\sf Total}(\ensuremath{\mathbb E}\xspace)$. \end{proof} \begin{theorem} Separated $r$-split latent fibrations, in the equivalence between $r$-split restriction categories and ${\sf M}$-categories, correspond precisely to separated ${\sf M}$-fibrations. \end{theorem} By proposition \ref{prop:hyperconnected_equivalence}, we now also have a characterization of $r$-split latent hyperfibrations: \begin{corollary} $r$-split latent hyperfibrations, in the equivalence between $r$-split restriction categories and ${\sf M}$-categories, correspond precisely to ${\sf M}$-fibrations which are admissible and separated. \end{corollary} \subsection{\texorpdfstring{$r$}{r}-splitting a latent fibration} Next, we turn to the question of what happens when we split the restriction idempotents of a latent fibration. Unfortunately, in general this will not again be a latent fibration. To see the issue, suppose we have a latent fibration $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ and a map $f\colon (Y,e_0) \@ifnextchar^ {\t@@}{\t@@^{}} (\ensuremath{\mathsf{p}}(X),\ensuremath{\mathsf{p}}(e))$ in $\split(\ensuremath{\mathbb B}\xspace)$. As a mere map in $\ensuremath{\mathbb B}\xspace$, we can lift it to a $\ensuremath{\mathsf{p}}$-prone map $f^*\colon X' \@ifnextchar^ {\t@@}{\t@@^{}} X$ in $\ensuremath{\mathbb E}\xspace$. However, we need it to be a map in $\split(\ensuremath{\mathbb E}\xspace)$, and in particular we need a restriction idempotent $e'$ over $e$ to serve as the domain for $f^*$ as a map in $\split(\ensuremath{\mathbb E}\xspace)$. As we have seen earlier (specifically, in the introduction to section \ref{sec:types}), this need not always exist. Moreover, we will need this idempotent $e'$ to itself be prone, so that when we compose it with $f^*$ the result will again be prone. Of course, asking for a prone restriction idempotent in $\ensuremath{\mathbb E}\xspace$ over a restriction idempotent in $\ensuremath{\mathbb B}\xspace$ is precisely what we demand of an admissible latent fibration, and this property turns out to be sufficient. \begin{proposition} \label{splitting-latent-fibration} If ${\sf p}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is an admissible latent fibration then $\split(\ensuremath{\mathsf{p}})\colon \split(\ensuremath{\mathbb E}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \split(\ensuremath{\mathbb B}\xspace)$ is an admissible latent fibration. \end{proposition} \begin{proof} It is easy to see that splitting idempotents always results in a well-fibred restriction functor: suppose $e = \rst{e}$ is an object of $\split(\ensuremath{\mathbb B}\xspace)$ and $e_0\colon e_1 \@ifnextchar^ {\t@@}{\t@@^{}} e_1$ is in $\ensuremath{\mathsf{p}}^{-1}(e)$, so $\ensuremath{\mathsf{p}}(e_1) = e$ and $\ensuremath{\mathsf{p}}(e_0) = e$, then a splitting of the map $e_0$ is given by $e_1 \@ifnextchar^ {\t@@}{\t@@^{}}^{e_0} e_0 \@ifnextchar^ {\t@@}{\t@@^{}}^{e_0} e_1$ and this is all in $\ensuremath{\mathsf{p}}^{-1}(e)$. Now suppose $f\colon e_0 \@ifnextchar^ {\t@@}{\t@@^{}} {\sf p}(e)$ is a map in $\split(\ensuremath{\mathbb B}\xspace)$, with $e$ a restriction idempotent in $\ensuremath{\mathbb E}\xspace$; we must show that there is a prone lifting to some map in $\split(\ensuremath{\mathbb E}\xspace)$. As a mere map, $f\colon B \@ifnextchar^ {\t@@}{\t@@^{}} {\sf p}(E)$ in $\ensuremath{\mathbb B}\xspace$ has a prone arrow over it $f'\colon E' \@ifnextchar^ {\t@@}{\t@@^{}} E$ but also $e_0$ has a, prone, restriction idempotent $e_0^{*}$ on $E'$ over it (as ${\sf p}$ is an admissible latent fibration): we claim that $e_0^{*} f' e\colon e'_0 \@ifnextchar^ {\t@@}{\t@@^{}} e$ is prone over $f\colon e_0 \@ifnextchar^ {\t@@}{\t@@^{}} {\sf p}(e)$: it is certainly over $f$. Suppose now that $g\colon e_1 \@ifnextchar^ {\t@@}{\t@@^{}} e$ in $\ensuremath{\mathbb E}\xspace$ and ${\sf p}(g) = h f$ is precise with $h\colon {\sf p}(e_1) \@ifnextchar^ {\t@@}{\t@@^{}} e_0$ then there is a lifting of $h$, $\widetilde{h}$, in $\ensuremath{\mathbb E}\xspace$ to give a precise triangle $\widetilde{h} (e_0^{*} f') = g$. The preciseness means $\rst{\widetilde{h}} = \rst{g} \leq e_1$ so it starts at the idempotent $e_1$ as desired. It also ends at $e_0^{*}$ as $h e_0= h$ so $\widetilde{h} e_0^{*} = \widetilde{h}$. Finally we have $\widetilde{h}e_0^{*} f' e = \widetilde{h} f' e = g e = g$. Thus $e_0^{*} f' e\colon e_0^{*} \@ifnextchar^ {\t@@}{\t@@^{}} e$ is prone over $f\colon e_0 \@ifnextchar^ {\t@@}{\t@@^{}} {\sf p}(e)$ and $\split({\sf p})\colon \split(\ensuremath{\mathbb E}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \split(\ensuremath{\mathbb B}\xspace)$ is a latent fibration. Finally we must show that above each restriction idempotent $e$ on ${\sf p}(e')$, in $\split(\ensuremath{\mathbb B}\xspace)$, which means $e \leq {\sf p}(e')$ in $\ensuremath{\mathbb B}\xspace$, there is a prone restriction idempotent on $e'$: it is of course $e^{*}e'$, which is prone by the argument above, and less than or equal to $e'$. \end{proof} The property of being separated (thus, hyperconnected) is also preserved under this process: \begin{corollary} If ${\sf p}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a latent hyperfibration then $\split({\sf p})\colon \split(\ensuremath{\mathbb E}\xspace) \@ifnextchar^ {\t@@}{\t@@^{}} \split(\ensuremath{\mathbb B}\xspace)$ is a latent hyperfibration. \end{corollary} \begin{proof} Partial isomorphisms in $\split(\ensuremath{\mathbb E}\xspace)$ are partial isomorphisms in $\ensuremath{\mathbb E}\xspace$ so are prone there and this easily means they are prone in $\split(\ensuremath{\mathbb E}\xspace)$. The result thus follows from Proposition \ref{prop:separated_equivalences} and Proposition \ref{prop:hyperconnected_equivalence}. \end{proof} \section{The Dual of a Latent Hyperfibration}\label{sec:dual} Latent hyperfibrations have the interesting property that one can form their fibred dual. This process is, for example, important in framing the semantics of reversible differential programming in which a fibrewise ability to take the dual is required. For an ordinary fibration, one can construct its fibred dual by using a span construction on the total category of the fibration (for example, see \cite[pg. 898]{journal:benabou_dual}, \cite[Section 1.10.11]{book:Jacobs-Cat-Log}, and \cite{arxiv:simple-dual}). In this section, we see how to generalize this construction to any latent hyperfibration. We begin by investigating certain conditions under which we can form a restriction category of spans. \subsection{Hyper-opens and spans} Let $\ensuremath{\mathbb X}\xspace$ be a restriction category and ${\cal H}$ and ${\cal Q}$ be two {\bf downclosed systems} of maps, that is classes of maps which are downclosed, closed to composition, and contain the partial isomorphisms, which commute in the sense that for each $h\colon A \@ifnextchar^ {\t@@}{\t@@^{}} B \in {\cal H}$ and $q\colon C \@ifnextchar^ {\t@@}{\t@@^{}} B \in {\cal Q}$ there is a latent pullback \[ \xymatrix{& D \ar[dr]^{h'} \ar[dl]_{q'} \\ A \ar[dr]_{h} & & C \ar[dl]^{q} \\ & B} \] such that $h' \in {\cal H}$ and $q' \in {\cal Q}$. Note that if there is one latent pullback with this property then {\em all\/} latent pullbacks for the cospan $(h,q)$ will have this property as the classes are certainly closed to mediation. We shall be particularly interested in the situation in which the maps in ${\cal H}$ are {\bf hyper-open} in the sense that, for each $(h\colon X\@ifnextchar^ {\t@@}{\t@@^{}} Y) \in {\cal H}$ there is an isomorphism $\exists_h$ which is inverse to the pullback $h^*$ in the sense that the following are inverse: \[ \exists_h\colon \rst{h}/{\cal O}(X) \@ifnextchar^ {\t@@}{\t@@^{}} \exists_h(1_X)/{\cal O}(Y); e \mapsto \exists_h(e) ~~~~~~h^{*}\colon \exists_h(1_X)/{\cal O}(Y) \@ifnextchar^ {\t@@}{\t@@^{}} \rst{h}/{\cal O}(X)\colon e \mapsto \rst{he} \] Note that any hyper-open map is automatically open (for a map to be open one only requires the existence of the adjoint, where hyper-open requires the adjoint to be an isomorphism; see \cite{journal:ranges1}) and so in particular any hyper-open map $h\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ has an associated map $\widehat{h}\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} Y$ which satisfies the properties of a range combinator. Given such a commuting pair of classes of maps we can form the span category ${\sf Span}_\ensuremath{\mathbb X}\xspace({\cal H},{\cal Q})$ with the following data: \begin{description} \item[Objects: ] Those of $\ensuremath{\mathbb X}\xspace$; \item[Maps: ] An arrow $A \@ifnextchar^ {\t@@}{\t@@^{}} B$ consists of an equivalence class of spans \[ \xymatrix{ & A' \ar[dl]_{h} \ar[dr]^{q} \\ A && B} \] such that $h \in {\cal H}$, $q \in {\cal Q}$ with $\rst{h} = \rst{q}$ under the equivalence relation that $(f,g) \sim (f',g')$ when there is a mediating partial isomorphism $\alpha \in {\cal H}$ such that: \[ \xymatrix@C=4em@R=4em{ & A' \ar[dl]_{h} \ar[drr]_{q} \ar[r]^{\alpha} & A'' \ar[dll]^{h'}|<<<<<<<<<\hole \ar[dr]^{q'} \\ A & & & B } \] where $\alpha h' = h$ and $\alpha q' = q$, $\alpha^{-1} h = h'$ and $\alpha^{-1}q = q'$, and $\rst{\alpha^{-1}} = \rst{q'}$ and $\rst{\alpha} = \rst{q}$. \item[Identities: ] The identity span $A = A = A$; \item[Composition: ] By latent pullback. \end{description} ${\sf Span}_\ensuremath{\mathbb X}\xspace({\cal H},{\cal Q})$ is clearly a category but, in general, is not a restriction category. However, if ${\cal H}$ is a hyper-open class of maps then it is a restriction category with $\rst{(h,q)}= (\widehat{h},\widehat{h})$. \begin{proposition}\label{prop:span_res_cat} If ${\cal H}$ and ${\cal Q}$ are two commuting classes of maps with ${\cal H}$ a class of hyper-opens then ${\sf Span}_\ensuremath{\mathbb X}\xspace({\cal H},{\cal Q})$ is a restriction category. \end{proposition} \begin{proof} We will use $\circ$ to denote composition in ${\sf Span}_\ensuremath{\mathbb X}\xspace({\cal H},{\cal Q})$. For [R.1], let \[ \xymatrix{ & S \ar[dl]_{h} \ar[dr]^{q} \\ X && Y} \] be a representative of an arrow in ${\sf Span}_\ensuremath{\mathbb X}\xspace({\cal H},{\cal Q})$. Then since $\hat{h}$ is a restriction idempotent (and hence a partial isomorphism), by Lemma \ref{latent-pullbacks}.iii, the composite $\rs{(h,q)} \circ (h,q)$ is given by \[ \xymatrix{ & & S \ar[dl]_{h\hat{h}} \ar[dr]^{\rs{h\hat{h}}} & & \\ & X \ar[dl]_{\hat{h}} \ar[dr]^{\hat{h}} & & S \ar[dl]_{h} \ar[dr]^{q} & \\ X & & X & & Y } \] But using the range properties of open maps, we have \[ (h\hat{h}\hat{h},\rs{h\hat{h}}q) = (h,\rs{h}q) = (h,\rs{q}q) = (h,q) \] as required. [R.2] and [R.3] follow similarly. For [R.4], let $(h_2^*h_1,q_1^*q_2)$ denote the composite $(h_1,q_1) \circ (h_2,q_2)$: \[ \xymatrix{ & & P \ar[dl]_{h_2^*} \ar[dr]^{q_1^*} & & \\ & S \ar[dl]_{h_1} \ar[dr]^{q_1} & & T \ar[dl]_{h_2} \ar[dr]^{q_2} & \\ X & & Y & & Z } \] (where the diamond is a latent pullback). Then $\rs{(h_1,q_1) \circ (h_2,q_2)} = (\widehat{h_2^*h_1},\widehat{h_2^*h_1})$, so as for [R.1], \[ \rs{(h_1,q_1) \circ (h_2,q_2)} \circ (h_1,q_1) = (h_1\widehat{h_2^*h_1}, \rs{h_1\widehat{h_2^*h_1}} q_1) = (\rs{h_1\widehat{h_2^*h_1}}h_1, \rs{h_1\widehat{h_2^*h_1}} q_1). \] On the other hand, $(h_1,q_1) \circ \rs{(h_2,q_2)}$ is given by \[ \xymatrix{ & & S \ar[dl]_{\rs{q\widehat{h_2}}} \ar[dr]^{q_1 \widehat{h_2}} & & \\ & S \ar[dl]_{h_1} \ar[dr]^{q_1} & & Y \ar[dl]_{\widehat{h_2}} \ar[dr]^{\widehat{h_2}} & \\ X & & Y & & Z } \] that is, \[ (h_1,q_1) \circ \rs{h_2,q_2} = (\rs{q_1\widehat{h_2}}h_1,q_1\widehat{h_2}) = (\rs{q_1\widehat{h_2}}h_1, \rs{q_1\widehat{h_2}}q_1) \] Thus, comparing the two expressions, it suffices to prove that \[ \rs{h_1\widehat{h_2^*h_1}} = \rs{q_1\widehat{h_2}}. \] Note that these are both restriction idempotents on $S$. To prove they are equal, since $h_2^*$ is hyper-open, pulling back along $h_2^*$ is an isomorphism; thus, it suffices to instead prove that \[ \rs{h_2^* \rs{h_1\widehat{h_2^*h_1}}} = \rs{h_2^*\rs{q_1\widehat{h_2}}}. \] Indeed, \[ \rs{h_2^* \rs{h_1\widehat{h_2^*h_1}}} = \rs{h_2^*h_1\widehat{h_2^*h_1}} = \rs{h_2^*h_1} \] while \[ \rs{h_2^*\rs{q_1\widehat{h_2}}} = \rs{h_2^* q_1 \widehat{h_2}} = \rs{q_1^*h_2 \widehat{h_2}} = \rs{q_1^*h_2} = \rs{h_2^*q_1} = \rs{h_2^*h_1} \] since $\rs{q_1} = \rs{h_1}$. \end{proof} \subsection{The fibrational dual} Our main construction of this section is the following: \begin{proposition} Suppose $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a latent hyperfibration. Then there is a restriction category $\ensuremath{\mathbb E}\xspace^*$ whose objects are those of $\ensuremath{\mathbb E}\xspace$ and whose maps are equivalence classes of spans \[ \xymatrix{ & S \ar[dl]_{v} \ar[dr]^{h} & \\ X & & Y} \] where $v$ is subvertical, $h$ is prone, and $\rs{v} = \rs{h}$; the equivalence is up to subvertical partial isomorphism and as in Proposition \ref{prop:span_res_cat}, the restriction of $[(v,c)]$ is $[(\hat{v},\hat{v})]$. Moreover, there is a restriction functor $\ensuremath{\mathsf{p}}^*\colon \ensuremath{\mathbb E}\xspace^* \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ which is defined on objects as $\ensuremath{\mathsf{p}}^*(A) = \ensuremath{\mathsf{p}}(A)$ and on arrows as $\ensuremath{\mathsf{p}}^*(v,h) = \ensuremath{\mathsf{p}}(h)$. \end{proposition} \begin{proof} Using Lemma \ref{lemma:pullback_prone_subvertical}, the prones and subverticals in a latent hyperfibration form a commuting pair of classes of maps; moreover, the subverticals are hyper-open and the classes are downclosed systems of maps. Thus Proposition \ref{prop:span_res_cat} tells us we can form a restriction category from the spans. We will use $\circ$ to denote composition in $\ensuremath{\mathbb E}\xspace^*$. We need to show that $\ensuremath{\mathsf{p}}^*$ is a well-defined restriction functor. To start, first note that for a subvertical partial isomorphism $\alpha$, $\alpha \alpha^{(-1)} = \rs{\alpha}$ gives $\ensuremath{\mathsf{p}}(\alpha)\ensuremath{\mathsf{p}}(\alpha^{(-1)}) = \rs{\ensuremath{\mathsf{p}}(\alpha)} = \ensuremath{\mathsf{p}}(\alpha)$, so that $\ensuremath{\mathsf{p}}(\alpha) \leq \ensuremath{\mathsf{p}}(\alpha^{(-1)})$. The reverse inequality follows similarly, and so $\ensuremath{\mathsf{p}}(\alpha) = \ensuremath{\mathsf{p}}(\alpha^{(-1)})$. We will use this to check $\ensuremath{\mathsf{p}}^*$ is well-defined. Indeed, suppose that $(v_1,h_1)$ and $(v_2,h_2)$ represent the same map in $\ensuremath{\mathbb E}\xspace^*$, so that, in particular, there is a subvertical partial isomorphism $\alpha$ so that $h_1 = \alpha h_2$ and $\rs{\alpha^{(-1)}}h_2 = h_2$. Then \[ \ensuremath{\mathsf{p}}(h_1) = \ensuremath{\mathsf{p}}(\alpha)\ensuremath{\mathsf{p}}(h_2) = \ensuremath{\mathsf{p}}(\alpha^{(-1)})\ensuremath{\mathsf{p}}(h_2) = \ensuremath{\mathsf{p}}(\rs{\alpha^{(-1)}})\ensuremath{\mathsf{p}}(h_2) = \ensuremath{\mathsf{p}}(\rs{\alpha^{(-1)}} h_2) = \ensuremath{\mathsf{p}}(h_2), \] so $\ensuremath{\mathsf{p}}^*$ is well-defined. For preservation of composition, suppose that we form the composite of $(v_1,h_1)$ and $(v_2,h_2)$: \[ \xymatrix{ & & S \ar[dl]_{v_2^*} \ar[dr]^{h_1^*} & & \\ & S \ar[dl]_{v_1} \ar[dr]^{h_1} & & T \ar[dl]_{v_2} \ar[dr]^{h_2} & \\ X & & Y & & Z } \] As in the previous lemma, we can choose $h_1^*$ to be the prone arrow over $\ensuremath{\mathsf{p}}(h_1)\ensuremath{\mathsf{p}}(v_2)$ so $\ensuremath{\mathsf{p}}(h_1^*) = \ensuremath{\mathsf{p}}(h_1)\ensuremath{\mathsf{p}}(v_2)$. Then we have \[ \ensuremath{\mathsf{p}}(h_1^*h_2) = \ensuremath{\mathsf{p}}(h_1^*)\ensuremath{\mathsf{p}}(h_2) = \ensuremath{\mathsf{p}}(h_1)\ensuremath{\mathsf{p}}(v_2)\ensuremath{\mathsf{p}}(h_2) = \ensuremath{\mathsf{p}}(h_1)\ensuremath{\mathsf{p}}(\rs{v_2})\ensuremath{\mathsf{p}}(h_2) = \ensuremath{\mathsf{p}}(h_1)\ensuremath{\mathsf{p}}(h_2) \] since $\rs{v_2} = \rs{h_2}$. Thus \[ \ensuremath{\mathsf{p}}^*[(v_1,h_1) \circ (v_2,h_2)] = \ensuremath{\mathsf{p}}^*[(v_2^*v_1,h_1^*h_2) = \ensuremath{\mathsf{p}}(h_1^*h_2) = \ensuremath{\mathsf{p}}(h_1)\ensuremath{\mathsf{p}}(h_2) = \ensuremath{\mathsf{p}}^*(v_1,h_1) \ensuremath{\mathsf{p}}^*(v_2,h_2), \] so composition is preserved. Finally, $\ensuremath{\mathsf{p}}^*$ is a restriction functor since $\ensuremath{\mathsf{p}}(\hat{h}) = \ensuremath{\mathsf{p}}(h)$ by definition of $\hat{h}$. \end{proof} The following lemma is useful when working with maps in $\ensuremath{\mathbb E}\xspace^*$. \begin{lemma}\label{lemma:basic_dual_results} Suppose that $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a hyperfibration. \begin{enumerate}[(i)] \item For any $[(v,c)]\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ in $E^*$ and any $c'\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} Z$ prone in $\ensuremath{\mathbb E}\xspace$, \[ [(v,c)] \circ [(\rs{c'},c)] = [(\rs{cc'}v,cc')]. \] \item For any $v\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} X$ subvertical in $\ensuremath{\mathbb E}\xspace$ and any $[(v',c)]\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} Z$ in $\ensuremath{\mathbb E}\xspace^*$, \[ [(v,\rs{v})] \circ [(v',c)] = [(v'v,\rs{v'v}c)]. \] \item Any $[(v,c)]\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ in $\ensuremath{\mathbb E}\xspace^*$ can be factored as: \[ (v,c) = (v,\rs{v}) \circ (\rs{c},c). \] \end{enumerate} \end{lemma} \begin{proof} For (i), by Lemma \ref{latent-pullbacks}.iii, $(v,c) \circ (\rs{f},f)$ is given by \[ \xymatrix{ & & S \ar[dl]_{\rs{cf}} \ar[dr]^{c\rs{f}} & & \\ & S \ar[dl]_{v} \ar[dr]^c & & Y \ar[dl]_{\rs{f}} \ar[dr]^{f} & \\ X & & Y & & Z } \] Thus, the composite is $(v,c) \circ (\rs{f},f) = (\rs{cf}v,cf)$; part (ii) follows similarly. For (iii), by (i), $(v,\rs{v}) \circ (\rs{c},c) = (\rs{\rs{v}c}v,\rs{v}c)$, which equals $(v,c)$ since $\rs{v} = \rs{c}$. \end{proof} \begin{lemma}\label{lemma:prone_e*} If $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a hyperfibration, and $f\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ is prone in $\ensuremath{\mathbb E}\xspace$, then $(\rs{f},f)$ is prone in $\ensuremath{\mathbb E}\xspace^*$. \end{lemma} \begin{proof} Suppose we are given \[ \xymatrix{ Z \ar[dr]^{(v,c)} & ~\ar@{}[drr]|{\textstyle\mapsto}&& \ensuremath{\mathsf{p}}(Z) \ar[dr]^{\ensuremath{\mathsf{p}}^*(v,c) = \ensuremath{\mathsf{p}}(c)} \ar[d]_{h} \\ X \ar[r]_{(\rs{f},f)} & Y && \ensuremath{\mathsf{p}}(X) \ar[r]_{\ensuremath{\mathsf{p}}(f)} & \ensuremath{\mathsf{p}}(Y)} \] such that $\rs{h} = \rs{p(c)}$. Then we claim that $(v,h')$ is the required unique fill-in, where $h'$ is the unique fill-in from the universal property of $f$ as a prone arrow in $E$: \[ \xymatrix{ S \ar@{-->}[d]_{h'} \ar[dr]^{c} & ~\ar@{}[drr]|{\textstyle\mapsto}&& \ensuremath{\mathsf{p}}(Z) \ar[dr]^{\ensuremath{\mathsf{p}}(c)} \ar[d]_{h} \\ X \ar[r]_{f} & Y && \ensuremath{\mathsf{p}}(X) \ar[r]_{\ensuremath{\mathsf{p}}(f)} & \ensuremath{\mathsf{p}}(Y)} \] First, $(v,h')$ is a well-defined arrow in $\ensuremath{\mathbb E}\xspace^*$: by Lemma \ref{lemma:left_factor_separated}, $h'$ is prone in $\ensuremath{\mathbb E}\xspace$, and by definition of $h'$ and $(v,c)$, $\rs{h'} = \rs{c} = \rs{v}$. Second, $(v,h')$ does make the triangle commute: using Lemma \ref{lemma:basic_dual_results}.i, \[ (v,h') \circ (\rs{f},f) = (\rs{h'f}v,h'f) = (\rs{c}v,c) = (v,c) \] since $\rs{c} = \rs{v}$. It remains to show that $(v,h')$ is unique with these properties. Thus, suppose we have an arrow $(w\colon T \@ifnextchar^ {\t@@}{\t@@^{}} Z, k\colon T \@ifnextchar^ {\t@@}{\t@@^{}} X)$ in $\ensuremath{\mathbb E}\xspace^*$ such that \[ \xymatrix{ Z \ar[d]_{(w,k)} \ar[dr]^{(v,c)} & ~\ar@{}[drr]|{\textstyle\mapsto}& & \ensuremath{\mathsf{p}}(Z) \ar[dr]^{\ensuremath{\mathsf{p}}(c)} \ar[d]_{h} \\ X \ar[r]_{(\rs{f},f)} & Y && \ensuremath{\mathsf{p}}(X) \ar[r]_{\ensuremath{\mathsf{p}}(f)} & \ensuremath{\mathsf{p}}(Y)} \] Since the triangle commutes, using Lemma \ref{lemma:basic_dual_results}.i, $(\rs{kf}w,kf) = (v,c)$. Thus, by definition of arrows in $\ensuremath{\mathbb E}\xspace^*$, there is some subvertical partial isomorphism $\alpha\colon S \@ifnextchar^ {\t@@}{\t@@^{}} T$ such that \[ \alpha kf = c, \ \alpha\rs{kf} w = v, \ \rs{\alpha} = \rs{c}, \mbox{ and } \rs{\alpha^{(-1)}} = \rs{kf}. \] We claim that this same $\alpha$ witnesses that $(w,k) = (v,h')$ in $\ensuremath{\mathbb E}\xspace^*$; that is, we claim that \[ \alpha k = h', \ \alpha w = v, \ \rs{\alpha} = \rs{h'}, \mbox{ and } \rs{\alpha^{(-1)}} = \rs{w}. \] Indeed, for the first requirement, we have $\alpha kf = c = h'f$. However, we also have \[ \ensuremath{\mathsf{p}}(\alpha k) = \rs{\ensuremath{\mathsf{p}}(\alpha)}\ensuremath{\mathsf{p}}(k) = \rs{\ensuremath{\mathsf{p}}(c)}\ensuremath{\mathsf{p}}(k) = \rs{h}\ensuremath{\mathsf{p}}(k) = \rs{h}h = h = \ensuremath{\mathsf{p}}(h'), \] so by proneness of $f$, $\alpha k = h'$. For the second requirement, \[ v = \alpha\rs{kf}w = \rs{\alpha kf}\alpha w = \rs{c}\alpha w = \rs{\alpha} \alpha w = w. \] For the third requirement, \[ \rs{\alpha} = \rs{c} = \rs{h'}. \] For the fourth requirement, we use hyperconnectedness: \[ \rs{\ensuremath{\mathsf{p}}(\alpha^{(-1)})} = \rs{\ensuremath{\mathsf{p}}(k)\ensuremath{\mathsf{p}}(f)} = \rs{h \ensuremath{\mathsf{p}}(f)} = \rs{h} = \rs{\ensuremath{\mathsf{p}}(k)} = \rs{\ensuremath{\mathsf{p}}(w)}, \] with the third equality since $h\ensuremath{\mathsf{p}}(f) = c$ is precise. Thus, since $\ensuremath{\mathsf{p}}$ is a hyperconnection, $\rs{\alpha^{(-1)}} = \rs{w}$. Thus $\alpha$ witnesses $(w,k) = (v,h')$ in $\ensuremath{\mathbb E}\xspace^*$, and so $(\rs{f},f)$ is prone, as required. \end{proof} \begin{theorem}\label{thm:dual_hyperfibration} If $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a hyperfibration then so is $\ensuremath{\mathsf{p}}^*\colon \ensuremath{\mathbb E}\xspace^* \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$. \end{theorem} \begin{proof} Given an $f\colon A \@ifnextchar^ {\t@@}{\t@@^{}} B$ in $\ensuremath{\mathbb B}\xspace$ and a $Y$ over $\ensuremath{\mathbb B}\xspace$ let $f'\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ be its $\ensuremath{\mathsf{p}}$-prone lift. Then the previous lemma tells us that $(\rs{f'},f')$ is $\ensuremath{\mathsf{p}}^*$-prone, and so $\ensuremath{\mathsf{p}}^*$ is a latent fibration. Moreover, it is immediately admissible as this lift is a restriction idempotent if $f$ is. Finally, it is easy to check that $\ensuremath{\mathsf{p}}^*$ is a hyperconnection since $\ensuremath{\mathsf{p}}$ is, so $\ensuremath{\mathsf{p}}^*$ is a hyperfibration. \end{proof} Our goal in the rest of this section is to further understand this latent fibration. We begin by characterizing the subvertical maps: \begin{lemma}\label{lemma:verticals_in_e*} Let $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ be a hyperfibration. A map $[(v,c)]\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ is subvertical in $\ensuremath{\mathbb E}\xspace^*$ if and only if $c$ is a partial isomorphism, if and only if $[(v,c)]$ has a unique representative of the form \[ \xymatrix{ & Y \ar[dl]_{v'} \ar[dr]^{\rs{v'}} & \\ X & & Y } \] \end{lemma} \begin{proof} If $(v,c)$ is subvertical, then $c$ is prone and subvertical, so by Proposition \ref{prop:hyper_consequences}.ii, $c$ is a partial isomorphism. Then define $v' := c^{(-1)}v$. Then the pair $(v',\rs{v'})$ represents the same map as $(v,c)$ via the subvertical partial isomorphism $c$; note that since $\rs{v} = \rs{c}$, \[ \rs{v'} = \rs{c^{(-1)}\rs{v}} = \rs{c^{(-1)}c} = \rs{c^{(-1)}}. \] Moreover, such a representative is unique. If we have any other equivalent span \[ \xymatrix{ & Y \ar[dl]_{w} \ar[dr]^{\rs{w}} & \\ X & & Y } \] then there is some subvertical partial isomorphism $\alpha\colon Y \@ifnextchar^ {\t@@}{\t@@^{}} Y$ such that (in particular) $\alpha\rs{v'} = \rs{w}$, $\rs{\alpha} = \rs{v'}$, and $\alpha v = w$. But then $\alpha \leq \alpha \rs{v'} = \rs{w}$, so $\alpha$ is a restriction idempotent, and so we have \[ w = \alpha v' = \rs{\alpha} v' = \rs{v'} v' = v' \] If we have a map $[(v,c)]$ with such a representative, then it is clearly subvertical. \end{proof} Before we look at prone maps, it will be helpful to understand certain partial isomorphisms in $\ensuremath{\mathbb E}\xspace^*$. \begin{lemma}\label{lemma:partial_isos_e*} Let $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ be a hyperfibration. If $(v,\rs{v})\colon X \@ifnextchar^ {\t@@}{\t@@^{}} S$ represents a partial isomorphism in $\ensuremath{\mathbb E}\xspace^*$, then $v$ is a partial isomorphism in $\ensuremath{\mathbb E}\xspace$. \end{lemma} \begin{proof} Since $(v,\rs{v})$ is a subvertical partial isomorphism, its partial inverse is also subvertical (and over the same map, $\ensuremath{\mathsf{p}}(\rs{v})$). Then by the previous lemma, its partial inverse can be taken to be of the form \[ \xymatrix{ & X \ar[dl]_{v'} \ar[dr]^{\rs{v'}} & \\ S & & X} \] We want to show that $v'$ is the partial inverse of $v$. Since $(v',\rs{v'})$ represents a partial inverse of $(v,\rs{v})$, we have that their composite is equivalent to $\rs{(v,\rs{v})} = (\hat{v},\hat{v})$; by Lemma \ref{lemma:basic_dual_results}.ii, we have that the composite is represented by \[ (v'v,\rs{v'v}\rs{v'}) = (v'v,\rs{v'v}). \] Thus, there is a subvertical partial isomorphism $\alpha\colon X \@ifnextchar^ {\t@@}{\t@@^{}} X$ so that in particular \[ \alpha \rs{v'v} = \hat{v}, \alpha v'v = \hat{v}, \rs{\alpha} = \rs{\hat{v}} = \hat{v}. \] Then $\alpha \leq \alpha \rs{v'v} = \hat{v}$, so $\alpha$ is a restriction idempotent, and so $\alpha = \rs{\alpha} = \hat{v}$. Morever, as noted above $\ensuremath{\mathsf{p}}(\rs{v'}) = \ensuremath{\mathsf{p}}(v) = \ensuremath{\mathsf{p}}(\hat{v})$, so since $\ensuremath{\mathsf{p}}$ is a hyperconnection, $\hat{v} = \rs{v'}$. So then \[ \rs{v'} = \hat{v} = \alpha v'v = \rs{v'}v'v = v'v. \] The other equality $(vv' = \rs{v})$ follows similarly, and so $v$ is indeed a partial isomorphism. \end{proof} We now characterize prone maps in $\ensuremath{\mathbb E}\xspace^*$: \begin{lemma}\label{lemma:prones_in_e*} Let $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ be a hyperfibration. A map $[(v,c)]\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ is prone in $\ensuremath{\mathbb E}\xspace^*$ if and only if $v$ is a partial isomorphism, if and only if $[(v,c)]$ has a unique representative of the form \[ \xymatrix{ & X \ar[dl]_{\rs{c'}} \ar[dr]^{c'} & \\ X & & Y } \] \end{lemma} \begin{proof} By Lemma \ref{lemma:basic_dual_results}.iii, \[ [(v,c)] = [(v,\rs{v})] \circ [(\rs{c},c)]. \] However, by Lemma \ref{lemma:prone_e*}, $[(\rs{c},c)]$ is prone, by assumption $[(v,c)]$ is prone, and this factorization is precise since $[(v,c)]$ and $[(v,\rs{v})]$ both have restriction $[(\hat{v},\hat{v})]$. Thus by Lemma \ref{lemma:left_factor_separated}, $[(v,\rs{v})]$ is also prone. Thus by Proposition \ref{prop:hyper_consequences}.i, $[(v,\rs{v})]$ is a partial isomorphism in $\ensuremath{\mathbb E}\xspace^*$, and so by Lemma \ref{lemma:partial_isos_e*}, $v$ is a partial isomorphism. Given that $v$ is a partial isomorphism, define $c'\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ by $c' := v^{(-1)}c$. Then it is straightforward to check that $v\colon S \@ifnextchar^ {\t@@}{\t@@^{}} X$ witnesses that $(\rs{c'},c')$ represents the same map as $[(v,c)]$. That this representative is unique is similar to the proof of a similar statement in Lemma \ref{lemma:verticals_in_e*}. The result that such maps are prone in $\ensuremath{\mathbb E}\xspace^*$ was Lemma \ref{lemma:prone_e*}. \end{proof} By Proposition \ref{prop:factorization}, any map $f\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ in the total category of a latent fibration $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ has a subvertical/prone factorization, and this factorization is unique up to vertical partial isomorphism. We will need a slight strengthening of this result in the case when $\ensuremath{\mathsf{p}}$ is a latent hyperfibration: \begin{lemma}\label{lemma:factorization_in_hyper} Suppose $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a latent hyperfibration. Then for any $f\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ in $\ensuremath{\mathbb E}\xspace$, there is a factorization \[ X \@ifnextchar^ {\t@@}{\t@@^{}}^{v} S \@ifnextchar^ {\t@@}{\t@@^{}}^{c} Y \] where $\rs{c} = \hat{v}$. Such a factorization is unique up to subvertical partial isomorphism. \end{lemma} \begin{proof} Given such an $f$, let $c\colon S \@ifnextchar^ {\t@@}{\t@@^{}} Y$ be a prone lift of $\ensuremath{\mathsf{p}}(f)$, and let $v\colon X \@ifnextchar^ {\t@@}{\t@@^{}} S$ be the induced map over $\rs{\ensuremath{\mathsf{p}}(f)}$: \[ \xymatrix{ X \ar@{-->}[d]_{v} \ar[dr]^{f} & ~ \ar@{}[drr]|{\textstyle\mapsto}&&\ensuremath{\mathsf{p}}(X) \ar[dr]^{\ensuremath{\mathsf{p}}(f)} \ar[d]_{\rs{p(f)}} \\ S \ar[r]_{c} & Y && \ensuremath{\mathsf{p}}(X) \ar[r]_{\ensuremath{\mathsf{p}}(f)} & \ensuremath{\mathsf{p}}(Y)} \] Then we have \[ \ensuremath{\mathsf{p}}(\rs{c}) = \rs{\ensuremath{\mathsf{p}}(c)} = \rs{p(f)} = \ensuremath{\mathsf{p}}(\rs{v}) = \ensuremath{\mathsf{p}}(\hat{v}) \] (with the last by definition of $\hat{v}$) and so since $\ensuremath{\mathsf{p}}$ is a hyperconnection, $\rs{c} = \hat{v}$. Uniqueness of such a factorization follows from the uniqueness property of prone arrows. \end{proof} With the above results in hand, we can now prove a result that can be used to prove that $(\ensuremath{\mathbb E}\xspace^*)^*$ is isomorphic to $\ensuremath{\mathbb E}\xspace$, but is useful in its own right. \begin{proposition}\label{prop:dual_functor_correspondence} Suppose that $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb X}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ and $\ensuremath{\mathsf{q}}\colon \ensuremath{\mathbb Y}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ are latent hyperfibrations. Then there is a bijection between morphisms of latent fibrations (i.e., restriction functors which preserve the projection and prone arrows) \[ \{F\colon \ensuremath{\mathbb X}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb Y}\xspace^*\} \cong \{G\colon \ensuremath{\mathbb X}\xspace^* \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb Y}\xspace\}. \] \end{proposition} \begin{proof} We will sketch the construction and leave most of the details to the reader. Given a morphism of latent fibrations $F\colon \ensuremath{\mathbb X}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb Y}\xspace^*$, define $\hat{F}\colon \ensuremath{\mathbb X}\xspace^* \@ifnextchar^ {\t@@}{\t@@^{}} Y$ as follows. On objects, $\hat{F}$ is defined as $F$. On a representative arrow \[ \xymatrix{ & S \ar[dl]_v \ar[dr]^c & \\ X & & Y} \] since $F$ is a restriction functor, $F(v)$ is a subvertical in $\ensuremath{\mathbb Y}\xspace^*$, and so by Lemma \ref{lemma:verticals_in_e*} corresponds to a unique subvertical arrow which we denote as $F_v(v)\colon FX \@ifnextchar^ {\t@@}{\t@@^{}} FS$. Similarly since $F$ preserves prone arrows, $F(c)$ is prone in $\ensuremath{\mathbb Y}\xspace^*$, and so by Lemma \ref{lemma:prones_in_e*}, corresponds to a unique prone arrow $F_c(c)\colon FS \@ifnextchar^ {\t@@}{\t@@^{}} FY$. We then define $\hat{F}([(v,c)])$ to be the composite \[ FX \@ifnextchar^ {\t@@}{\t@@^{}}^{F_v(v)} FS \@ifnextchar^ {\t@@}{\t@@^{}}^{F_c(c)} FY. \] This is well-defined since if $(v,c)$ is equivalent to some other $(v',c')$, then there is a partial isomorphism taking $v$ to $v'$ and $c$ to $c'$; thus $F(v)$ and $F(v')$ go to the same map under $F$, and so $F_v(v) = F_v(v')$ and similarly $F_c(c) = F_c(c')$. Conversely, suppose we have a morphism of latent fibrations $G\colon \ensuremath{\mathbb X}\xspace^* \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb Y}\xspace$. We define $\hat{G}\colon \ensuremath{\mathbb X}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb Y}\xspace^*$ on objects as $G$. For an arrow $f\colon X \@ifnextchar^ {\t@@}{\t@@^{}} Y$ in $\ensuremath{\mathbb X}\xspace$, by Lemma \ref{lemma:factorization_in_hyper} there is a factorization \[ X \@ifnextchar^ {\t@@}{\t@@^{}}^{v} S \@ifnextchar^ {\t@@}{\t@@^{}}^{c} Y \] such that $\hat{v} = \rs{c}$ (and this factorization is unique up to subvertical partial isomorphism). Then \[ \xymatrix{& X \ar[dl]_v \ar[dr]^{\rs{v}} & \\ S & & X \ar@{}[u]_{~~~~} } \xymatrix{ & S \ar[dl]_{\rs{c}} \ar[dr]^c & \\ S & & Y} \] are respectively subvertical and prone in $\ensuremath{\mathbb X}\xspace^*$. Thus we define $\hat{G}(f)$ to be the equivalence class of the span \[ \xymatrix{& GS \ar[dl]_{G([(v,\rs{v})])} \ar[dr]^{G([(\rs{c},c)])} & \\ GX & & GY } \] Note that this has the required restriction property since \[ \rs{[(v,\rs{v})]} = \hat{v} = \rs{c} = \hat{\rs{c}} = \rs{[(\rs{c},c)]}. \] Also note that $\hat{G}$ is independent of the choice of factorization since such factorizations are unique up to subvertical partial isomorphism. \end{proof} \begin{corollary} If $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$ is a latent hyperfibration, then $\ensuremath{\mathbb E}\xspace^{**}$ is isomorphic to $\ensuremath{\mathbb E}\xspace$. \end{corollary} \begin{proof} Using Proposition \ref{prop:dual_functor_correspondence}, from the identity functor $\ensuremath{\mathbb E}\xspace^* \@ifnextchar^ {\t@@}{\t@@^{}}^{1} \ensuremath{\mathbb E}\xspace^*$ we get a functor $\ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}}^{\eta} \ensuremath{\mathbb E}\xspace^{**}$ and a functor $\ensuremath{\mathbb E}\xspace^{**} \@ifnextchar^ {\t@@}{\t@@^{}}^{\varepsilon} \ensuremath{\mathbb E}\xspace$; using the previous results it is straightforward to check that these are inverse to one another. \end{proof} \section{Conclusion} Aside from developing the applications of latent fibrations to, for example, restriction differential categories and partial lenses, there is much theoretical work to be done to bring the theory of latent fibrations to a state of maturity. We certainly hope to return to this task in future papers and we also hope that others will join us in this development. While there are some aspects of this theory for which we already have a partial understanding there are other aspects which are completely open. For example, traditionally fibrations are used to model logical features such as existential and universal quantification and as a semantics of type theories, however, at this stage, we have little idea of what a type theory corresponding to a latent fibration might look like. Turning to aspects for which we do have some understanding we have shown above that every latent fibration has associated re-indexing (or substitution) restriction \emph{semi\/}functors. Nonetheless, we have not here provided a full equivalence of such a collection of such re-indexing semifunctors to a latent fibration. In other words, we have not provided the analogue of the normal Grothendieck construction for latent fibrations. However, at this stage, we do have an understanding of this construction which we hope to publish separately. Notably the construction requires more structure than for the normal case but, as in the normal case, has the potential of providing a significantly different and important perspective. The additional structure is actually a ``categorification'' of the structure used to define restriction presheaves introduced in \cite{journal:lin-garner-cocomp-rcat}. A further, rather separate issue, concerns the behaviour of factorization systems over latent fibrations. It is well-known that given a normal fibration, $\ensuremath{\mathsf{p}}\colon \ensuremath{\mathbb E}\xspace \@ifnextchar^ {\t@@}{\t@@^{}} \ensuremath{\mathbb B}\xspace$, any orthogonal factorization system in $\ensuremath{\mathbb B}\xspace$ can be lifted to one in $\ensuremath{\mathbb E}\xspace$. A similar result can be proven for latent fibrations. However, some care is needed at the outset of this project, for defining what is meant by a factorization system for a restriction categories has some, perhaps, unexpected features. This also we hope to return to in future work. At this stage in the development of the theory of latent fibrations, what we know still seems dwarfed by what we still do not know. Perhaps the major open issues concern the understanding of how logical features and, indeed, restrictional features (joins, meets, discreteness, latent limits, classifiers, etc.) interact with the structure of latent fibrations and how these, in turn, translate into type theoretic features. \bibliographystyle{plain}
{ "redpajama_set_name": "RedPajamaArXiv" }
1,253
\section{Introduction} Communication system evolution has led to the emergence of a plethora of new applications with diverse requirements in terms of capacity and reliability. As a result, many aspects of the network have had to become more complex and scalable. Fiber-optic channels currently meet the high capacity demands required, however, these optical networks will need to become elastic in the near future to support heterogeneous traffic and bitrates. Elasticity means that they are able to provide scalable bandwidth on demand and continuously adapt to ensure efficient resource utilization\cite{Liu2020}. This can be achieved by using Bandwidth Variable Transcievers (BVT's) that are capable of generating variable bitrates \cite{gerstel2012}, Re-configurable Optical Add-Drop Multiplexers (ROADM's)\cite{gerstel2012,Berthold2008} that utilize wavelength selective switches to switch between flexible spectrum, and virtualization at network or transponder level\cite{Jinno2017}. A draw back of the resultant flexibility is that impairments experienced on the network vary with time because of the constantly changing light paths. To account for this, large safety design margins are often employed for such links to guarantee reliability which leads to wastage of network resources. To meet the required spectrum efficiency, low margins must therefore be used hence it becomes crucial to monitor the performance of the optical links in real-time\cite{Morais2018,Liu2020}. In addition, networks are also becoming more intelligent which means that they have to be capable of self-optimization and self-diagnosing. In the case of fiber networks, this would mean that the network can detect anomalies along specific paths and therefore re-route traffic to other links, adapt the modulation format of signals based on link conditions and traffic and predict future network demands or failures along paths. In order to do this, they need to be able to continuously monitor the quality of signals along the various paths. Optical performance monitoring (OPM) involves acquiring and estimating different physical parameters of transmitted signals and components in an optical network either at the receiver or at an intermediate node along the path \cite{Dong2016}. This enables the transmission system parameters relating to the channel quality to be known so that they can be compensated for. Common parameters include Chromatic Dispersion (CD), Polarization Mode Dispersion (PMD), Optical Signal to Noise Ratio (OSNR), Q-factor, Polarization Dependent Loss (PDL) and fiber non-linearities. Conventional OPM techniques required complete recovery of the transmitted signal to be able to deduce these parameters. However, in order to compensate for resultant signal degradation, these performance metrics need to be known at distributed points on the fiber link hence traditional techniques would add significant complexity and cost to the monitoring system which is not desired. Machine Learning(ML) has emerged as a key technique that can be used to process the received signal at different points and learn relationships between different characteristics of the received signal and impairments without having to completely demodulate the signal\cite{Dong2016,Khan2019}. In order to reduce costs, it is also required to monitor multiple impairments simultaneously and independently. This paper aims to survey existing work where machine learning has been applied to aid in OPM and discuss the performance of the different techniques. Moreover, since the bulk of the techniques employed in the current literature require advance knowledge of the signal type, we also review some works that identify the modulation format and bitrate. Furthermore, we briefly explore work on photonic reservoir computing which has more recently been shown to be applicable to modulation format recognition. \section{Related Work} There are a number of review works on the use of ML for various applications in optical networks. The authors in \cite{Dong2016} reviewed existing and future technologies for OPM for both direct and coherent detection systems, however, their work presented a broad range of techniques and did not focus on ML techniques. In \cite{Khan2019a,Musumeci2019,Mata2018a}, a detailed review of the different optical ML techniques was given highlighting how they have been used in optical communications and networking functions such as for OPM, fault detection, non-linearity compensation and software defined networking. They, however, had limited coverage of OPM and Modulation Format Recognition (MFR). \cite{Saif2020} surveyed work on OPM and MFR in detail. We update the current literature in this work as well as include the application of photonic reservoir computing which has only recently been applied to modulation format identification. The work in \cite{Amirabadi2019} considered a detailed description of machine learning techniques and reviewed works that had applied them in the optical communications space. \section{Feature Selection for OPM} Machine learning algorithms typically take input data features and learn relationships between them, thereby being able to group the inputs in a certain way or map the relationship to a function that can predict a required output. For OPM, the outputs are the type of impairment and its amount, while the inputs are signal representations. The signal representations are obtained from monitoring the signal waveform, polarization or spectrum \cite{Dong2016} or from Digital Signal Processing (DSP) techniques in the electrical domain after detection in direct detection schemes. Coherent receivers already include powerful DSP blocks and input features can directly be obtained from the asynchronously sampled output of these blocks \cite{Tanimura2016,Cho2019}, or from constellation diagrams that can be constructed from them \cite{Kashi2017,Wang2017}. The output of these various methods can then be utilized in the form of direct images or their properties, or statistical representations for example histograms, means, variances and moments to extract different features that can then be fed to the machine learning processing blocks. The features are chosen either manually by visual inspection or learnt by the ML algorithm and they show a clear distinction among different types of impairments and their levels. Table \ref{table:1} shows a summary of monitored impairments for different feature types in current works. \begin{table*} \centering \captionsetup[]{justification=centering} \caption{\bf Summary of features and monitored impairments used in current works} \begin{tabular}{llllll} \hline Feature Source & OSNR & PMD* & CD & Non-linearity &Crosstalk \\ \hline Eye diagram&\checkmark\cite{Thrane2017,Jargon2009a,Wu2009a}&\checkmark\cite{Jargon2009a,Wu2009a,Skoog2006b}&\checkmark\cite{Wu2009a,Skoog2006b}&\checkmark\cite{Wu2009a}&\checkmark\cite{Skoog2006b}\\ ADTP (Phase portrait)&\checkmark\cite{Jargon2009,Tan2014,Wu2011,Fan2018}&\checkmark\cite{Jargon2009,Anderson2009,Tan2014,Wu2011,Fan2018}&\checkmark\cite{Jargon2009,Anderson2009,Tan2014,Wu2011,Fan2018}&&\checkmark\cite{dods2006,Celik2018}\\ Asynchronous sampled signal amplitude&\checkmark\cite{Khan2012}&\checkmark\cite{Khan2012}&\checkmark\cite{Khan2012}\\ Asynchronous Constellation diagram&\checkmark\cite{Jargon2010}&\checkmark\cite{Jargon2010}&\checkmark\cite{Jargon2010}\\ Spectrum&\checkmark\cite{Wang2019b}\\ AAH&\checkmark\cite{Wan2018a,Cheng2020,Khan2017,Xia2019}\\ Asynchronous eye diagram&\checkmark\cite{Vitor2012}&\checkmark\cite{Vitor2012}&\checkmark\cite{Vitor2012}\\ Optical power&\checkmark\cite{Zheng2020}\\ Asynchronous sampled raw data&\checkmark\cite{Tanimura2016,Cho2019,Wang2019} \\ Constellation diagram&\checkmark\cite{Kashi2017,Wang2017}&&&\checkmark\cite{Caballero2018}\\ ASCS&\checkmark\cite{Fan2019}&\checkmark\cite{Fan2019}&\checkmark\cite{Fan2019}\\ IQH&\checkmark\cite{Saif2021}&&\checkmark\cite{Saif2021}&&\\ Stokes-space constellation&\checkmark\cite{Xiang2021}&&\\ \hline PMD* refers to 1st order PMD in this paper \end{tabular} \label{table:1} \end{table*} \subsection{Eye Diagrams} An eye diagram is a graphical representation of a signal waveform showing the amplitude distribution over one or more bit periods, with the symbols overlapping each other. The quality of the signal can then be determined from various characteristics of the eye opening for example jitter, SNR, dispersion, non-linearities. Eye diagrams have been used in the current literature to monitor OSNR, PMD, CD, non-linearity and crosstalk. Figure \ref{fig:eyediagram} shows the eye diagrams for an RZ signal subjected to different impairments\cite{Wu2009a}. \begin{figure}[ht] \centering \fbox{\includegraphics[width=\linewidth]{Images/EyediagramWu2009a.PNG}} \caption{Impact of various impairments on the eye diagram of an RZ signal\cite{Wu2009a}.} \label{fig:eyediagram} \end{figure} Visual inspection shows that different impairments and different levels of the same impairment produce distinct characteristics.These characteristics can be exploited by applying image processing techniques such as in \cite{Skoog2006b}, by defining statistical features from the sampled amplitudes for example means and variances at specific points on the eye diagram \cite{Thrane2017}, or by calculating the widely used parameters of the eye diagrams such as Q-factor, eye closure, root-mean-square jitter and crossing amplitude \cite{Jargon2009a,Wu2009a}. Construction of eye diagrams is dependent on the modulation format and requires timing synchronisation hence some form of clock recovery is required which can be expensive. An eye diagram also has no phase information about the signal. \subsection{Asynchronous Delay Tap Plots (ADTP's)} This technique also provides a visual representation of a signal known as a phase portrait. The signal waveform is split and one part of the signal delayed by a certain amount $\Delta$\text{t}. The signal $s(t)$ and its delayed version $s(t+\Delta t)$ are then sampled at the same instant and the pair of values (x,y) obtained plotted in a 2D histogram \cite{dods2006,Celik2018}. Figure \ref{fig:phaseportrait} illustrates how a phase portrait is created from delay tap sample pairs \cite{Anderson2009}.The sampling period, $T_s$ is independent of the bit duration, $T$ and can therefore be several magnitudes larger. The portraits can be treated as images and exploited using pattern recognition \cite{Anderson2009,Tan2014,Anderson2009a} and then image processing algorithms applied, or specific features extracted from them for example the work in \cite{Jargon2009} divided the phase portrait into quadrants and then defined statistical means and standard deviations of the (x,y) pairs and radial coordinates. Phase portraits are also dependent on the signal properties such as bitrate and modulation format and the tap delay. The tap delay is usually a certain fraction or multiple of the symbol rate and thus needs to be adjusted exactly for different datarates to allow accurate monitoring \cite{Khan2011}. ADTP's have been used for multiple impairment monitoring of OSNR, CD, crosstalk and 1st order PMD.In figure \ref{fig:adtpcompare}, the effect of various impairments on the ADTP of a $\text{10 Gbps}$ NRZ signal at two different delays, $T$ and $T/4$, as well as the corresponding eye diagrams are shown. \cite{Celik2018}. \begin{figure}[ht] \centering \fbox{\includegraphics[width=\linewidth]{Images/phaseportraitanderson2009.PNG}} \caption{Generation of phase portrait\cite{Anderson2009}} \label{fig:phaseportrait} \end{figure} \begin{figure}[ht] \centering \fbox{\includegraphics[width=\linewidth]{Images/adtpcompare_celik2018.PNG}} \caption{ADTP's for a 10 Gb/s NRZ signal in the following scenarios: a) OSNR = 35 dB, b) OSNR = 25 dB, c) CD = 800 ps/nm , d) DGD = 40 ps, e) crosstalk = -25 dB and f) a combination of (b-f) \cite{Celik2018}}. \label{fig:adtpcompare} \end{figure} \subsection{Asynchronous Amplitude Histograms(AAH's)} AAH's are obtained from random asynchronous sampling of the signal within the bit period. The authors in \cite{Chen2004} showed that with a sufficient number of samples, the amplitude distribution can be accurately represented within a bit period. The amplitude samples are arranged in bins corresponding to their level, and then the count of samples within each bin is plotted against the bin. This is in contrast to the synchronous AH where the considered samples are within a specific window for example 10\% \cite{Celik2018} of the bit period around the center of the eye diagram at the optimal decision time. The peaks in the AAH correspond to the samples around the maximum and minimum values of the eye, and the samples in between correspond to those around the crossings of the rising and falling edges of the waveform. Amplitude histograms are simple and transparent to the transmitted signal characteristics such as modulation format and bitrate, however, the contribution of each individual impairment cannot be independently extracted hence they have not been used for multiple impairment monitoring. Furthermore, the monitoring accuracy is dependent on the number of samples \cite{Wan2018,Dong2016,Cheng2020}. The count of occurrences at each bin can then be used as input features such as in \cite{Wan2018,Khan2017b}. \cite{Xia2019} additionally used the variance of the amplitude values in each bin. Figure \ref{fig:aah} shows results of varying the OSNR on the AAH for a 16-QAM signal\cite{Khan2017b}. \begin{figure}[ht] \centering \fbox{\includegraphics[width=\linewidth]{AAHKhan2017b.PNG}} \caption{Impact of varying OSNR on the AH of a 16-QAM signal\cite{Khan2017b}}. \label{fig:aah} \end{figure} \subsection{Asynchronous Single Channel Sampling (ASCS)} In this method, the signal $s(t)$ is sampled asynchronously using one tap, and then the samples are shifted by k samples and the sample pairs $s_i(t)$ and $s_{i_{+_k(t)}}$ used to construct a phase portrait. This method is less expensive than two-tap sampling \cite{Yu2014,Fan2019,Fan2020}.The generated phase portraits can be used as images for example in \cite{Fan2019,Fan2020}. \begin{figure}[ht] \centering \fbox{\includegraphics[width=\linewidth]{ascs.PNG}} \caption{Generation of a phase portrait from ASCS \cite{Fan2020}}. \label{fig:ascs} \end{figure} \subsection{Constellation Diagrams} A constellation diagram is a graphical representation of a digitally modulated signal, where received samples are represented in an I/Q diagram.They are used in coherent detection schemes and can be generated by techniques such as linear optical sampling \cite{Dorrer2005}, however, since coherent receivers already have embedded DSP blocks, they can be directly constructed from the asynchronously sampled data output of the DSP. Thereafter, they can be used to generate manually defined features for example \cite{Caballero2018} defined tangential and normal components of the noise of each symbol and then used averages and amplitude noise covariances as inputs, or their images can be directly input the ML algorithm for image processing without the need for manual feature generation for example in \cite{Wang2017}. Constellation diagrams have only been used to measure OSNR and non-linear noise in coherent detection system since the coherent receiver can already compensate for CD and PMD and therefore these impairments can be directly monitored. \subsection{In-phase Quardrature Histograms} IQH's were proposed in \cite{Saif2019z} as an extension of AAH's to include phase information for coherent systems. They contain similar information as constellation diagrams but with an additional representation of the amplitude in colour. They showed that it can be used to identify OSNR, PMD and CD although performance degraded in the presence of multiple impairments. \begin{figure}[ht] \centering \fbox{\includegraphics[width=\linewidth]{IQHsaif2019z.PNG}} \caption{Comparison of constellation diagram, AAH and IQH \cite{Saif2019z}.} \label{fig:IQH} \end{figure} \cite{Saif2021} derived 1D features from projections of IQH's on diagonal and horizontal axes. \subsection{Stokes Space Constellation} This diagram is obtained by plotting the last three components of the Stokes vector of the received complex signals from a coherent receiver in a 3D Stokes space. Different modulation formats have a specific number of distinguishable clusters in this space. \cite{Boada2015,Mai2017,Szafraniec2010}. The authors in \cite{Xiang2021} obtained the cumulative distribution function (CDF) of one Stokes parameter while \cite{Zhang2020b} projected the constellation onto three different 2D planes and used the resultant plots as images such as in figure \ref{fig:stokes}. \begin{figure}[ht] \centering \fbox{\includegraphics[width=\linewidth]{stokes.PNG}} \caption{3D Stokes constellation of a BPSK and QPSK signal, as well as their corresponding projections in the 2D Stokes planes at OSNR=18dB \cite{Zhang2020b}.} \label{fig:stokes} \end{figure} \subsection{Other methods} The nature of asynchronous sampling means that certain information in the signal is lost, which could make it difficult in some cases to separate the effects of different impairments from the overall received signal in case they produce similar changes in the plots \cite{dods2006}. Furthermore, there is overlap in the distribution of signal amplitudes which makes it more challenging to extract individual distributions from AAH's in practice \cite{Khan2011}. Asynchronous eye diagrams \cite{Costa2012} and asynchronous constellation diagrams \cite{Jargon2010} can be constructed to mitigate this. In addition, \cite{Khan2011} also proposed asynchronously sampled amplitudes as a solution for better CD monitoring since previous works had shown that CD was severely impacted by changes in OSNR and Differential Group Delay and to eliminate the requirement for continuously adjusting the tap delay for multiple bitrates. Optical spectral data from an optical spectrum analyzer (OSA) and optical power have also been used in \cite{Wang2006} and \cite{Zheng2020} respectively. \section{Survey of ML-based Optical Performance Monitoring techniques} \subsection{OPM for Direct Detection systems} OPM modules for direct detection systems can be as simple as a photo-detector followed by an Analog to Digital Converter. In \cite{Skoog2006b}, the authors utilized multiple Support Vector Machines (SVM's) to classify different impairments using images of eye diagrams, characterised by 23 low order zernike moments. Simulation data was used to train the model after impairments of CD, PMD and cross talk were applied. Four SVM's were required, one for each impairment and an additional one for the normal case since they are binary classifiers. The number of input images used for training the model were 31, 107, 20 and 6 for CD, PMD, crosstalk and normal respectively. Experimental verification was then done using the model. Results collected from 3, 11 and 3 images for CD, PMD and crosstalk respectively showed that the method could classify the simulated and experimental data with accuracies of 95\% and 60\%. However, it could only identify the type of impairment but not the amount. The authors proposed applying a nearest neighbors technique after the SVM for to enable this. In \cite{Jargon2009a}, the authors demonstrated the use of an Artificial Neural Network (ANN) to predict multiple impairment levels simultaneously. The ANN was trained with input data from eye diagrams of signals with two different bit rates and modulation schemes i.e. 10 Gb/s non-return to zero on-off keying (NRZ-OOK) and 40 Gb/s return-to-zero differential phase shift keying (RZ-DPSK) at different combinations of CD, PMD and OSNR. The ANN had one hidden layer and 12 hidden neurons, and 4 input features were extracted from the eye diagram i.e. (Q-factor, closure, jitter and crossing amplitude/level of transition between adjacent zeros for NRZ-OOK/RZ-DPSK respectively). The model was trained using data from 125 simulations consisting of different impairment combinations and then validated with 64 simulations.The ranges used for OSNR, CD and Differential Group Delay (DGD) were 16-32 dB, 0-800 ps/nm and 0-40 ps for NRZ-OOK, and 16-32 dB, 0-60 ps/nm and 0-10 ps for RZ-DPSK. The results showed a correlation coefficient of 0.91 and 0.96 for the NRZ-OOK and RZ-DPSK signals respectively. The authors did the same in \cite{Jargon2009}, but used as input manually defined parameters from ADTP's for the 10 Gbps NRZ-OOK case. 7 input features and an ANN consisting of 1 hidden layer and 28 hidden neurons were considered. The results showed a better performance than those derived from the eye diagram achieving a correlation coefficient of 0.97. The impairment ranges used were the same as in their previous work. In \cite{Jargon2010}, the same authors extended their method to monitor the same three impairments for a 40 Gbps RZ-QPSK signal and manually defined input parameters using asynchronous constellation diagrams. The same ANN in \cite{Jargon2009} was used and the results showed a correlation coefficient of 0.987, and root mean square errors (RMSE's) of 0.77 dB, 18.71 ps/nm and 1.17 ps for OSNR, CD and DGD respectively. The impairment ranges tested were 12-32 dB, 0-200 ps/nm and 0-20 ps for OSNR, CD and DGD. \cite{Wu2009a} used the same ANN technique with 1 hidden layer and 12 hidden neurons to simulate multi- impairment monitoring on 40 Gbps RZ-OOK and RZ-DPSK signals. 4 input parameters (Q-factor,eye-closure, RMS jitter and RMS jitter) were defined from eye diagrams. The ANN was trained and tested with data from both simulation and experiment. In the simulation, 125 and 64 eye diagrams were used for training and validation respectively, achieving a correlation coefficient of 0.97 and 0.96 for OOK and DPSK respectively and average errors for OSNR, CD and DGD of 0.57 dB, 4.68 ps/nm and 1.53 ps for OOK and 0.77dB, 4.47 ps/nm and 0.92 ps for DPSK. The simulations were followed up with an experiment, in which 20 and 12 eye diagrams were used for training and testing respectively to estimate OSNR and CD.The results showed a better performance than simulation with 0.99 correlation coefficient for both signals. The average errors for OOK were 0.58 dB and 2.53 ps/nm while those for DPSK were 1.85dB and 3.18 ps/nm. The ranges tested for OSNR, CD and DGD were 16-32 dB, 0-60 ps/nm and 1.25-8.75 ps. The authors then monitored the impact of accumulated fiber non- linearity in a 40 Gb/s RZ-DPSK Wavelength Division Multiplexed (WDM) system consisting of 3-channels using a simulation in which additional features consisting of statistics of the 1 and 0 values were defined giving a total of 8 inputs. The input optical power was varied from -5 to 3 dBm, while OSNR, CD and DGD were tested over the ranges from 20-36 dB, 0-40 ps/nm and 0-8 ps. Equally good results were obtained: correlation coefficient of 0.97, and mean error of 0.46 dB, 1.45 dB, 3.98 ps/nm and 0.65 ps for optical power, OSNR, CD and DGD from 135 training samples and 32 testing samples. In \cite{Anderson2009} simultaneous measurement of CD and DGD was done on a 40 Gb/s NRZ-DPSK signal. ADTP's were generated and then kernel based ridge regression applied to predict the impairments using 900 features. Simulation was done for various combinations of CD, DGD and OSNR ranging from 0-700 ps/nm, 0-20 ps and 13-26 dB respectively. 1200 phase portraits consisting of 900 features each were used for training, and independent training for a single impairment in the presence of all other impurity ranges was done. 500 phase portraits were used for verification. RMSE's of +/-11ps/nm and +/-0.75 ps for CD and DGD respectively were achieved. Experimental verification was also done using a split of 1500:500 phase portraits for training:validation for OSNR, CD and DGD ranging from 15-25 dB, -400 to 400 ps/nm and 0-22.5 ps. The total training time was 3hrs and RMSE of +/-11ps/nm and +/-1.9ps for CD and DGD obtained. Prior knowledge of modulation and bit rate was assumed. The authors in \cite{Khan2012} simultaneously monitored OSNR, PMD and both the magnitude and sign of CD using an ANN whose input features were derived from the first five empirical moments of asynchronously sampled amplitudes. The ANN consisted of a single hidden layer with 42 neurons and was trained with simulation data for 40 and 56 Gb/s RZ-DQPSK and 40 Gb/s RZ-DPSK signals and 3627 sets of moments for each datarate and modulation format. Results collected over OSNR, CD and DGD ranges of 10-26 dB, -500 to 500 ps/nm and 0-14ps showed RMS errors of 0.1 dB, 27.3 ps/nm and 0.94 ps respectively for 40 Gb/s RZ-DQPSK system, 0.1 dB,29 ps/nm and 1.3 ps for RZ-DQPSK and 0.1 dB, 17 ps/nm and 1 ps for a 40 Gb/s RZ-DPSK system. The authors proposed increasing the number of moments to improve the results.\newline Table \ref{table:2} summarizes existing works for direct detection systems. \begin{table*} \centering \captionsetup[]{justification=centering} \caption{\bf Summary of existing OPM works for direct detection} \begin{tabular}{p{4cm}p{5cm}p{5cm}p{1cm}} \hline \textbf{ML Algorithm}&\textbf{Signal Type} &\textbf{Impairment (range)}&\textbf{Ref}\\ \hline SVM&&CD, DGD, Crosstalk &\cite{Skoog2006b}\\ ANN(1;12) & 10 Gbps NRZ-OOK&OSNR(16-32), CD(0-800), DGD(0-40) & \cite{Jargon2009a}\\ &40 Gbps RZ-DPSK&OSNR(16-32), CD(0-60), DGD(0-10)\\ ANN(1;28)&10 Gbps NRZ-OOK&OSNR(16-32), CD(0-60),DGD(0-10)&\cite{Jargon2009}\\ ANN(1;28)&40 Gbps RZ-QPSK&OSNR(12-32), CD(0-200), DGD(0-20)& \cite{Jargon2010}\\ ANN(1;12)&40 Gbps RZ-OOK&OSNR(16-32), CD(0-60), PMD (1.25-8.78) &\cite{Wu2009a}\\ &40 Gbps RZ-DPSK&OSNR(16-32),CD(0-60),PMD (1.25-8.78) \\ &40 Gbps RZ-DPSK, 3 channel WDM & optical power(-5 to 3 dBm), OSNR(20-36), CD(0-40) PMD(0-8)\\ ANN(1;40)&40 Gbps QPSK &OSNR(10-30), CD(0-200), DGD(0-25) &\cite{Costa2012}\\ ANN(1;3) &32 Gbd PDM 64-QAM& OSNR(4-30)&\cite{Thrane2017}\\ ANN(1;42)&40 Gbps RZ-DQPSK&OSNR(10-26), CD(-500-500), DGD(0-14)&\cite{Khan2012}\\ &56 GbpsRZ-DQPSK&\\ &40 Gbps RZ-DPSK\\ Kernel ridge regression &40 Gbps NRZ-DPSK&CD(0-700), DGD(0-20)&\cite{Anderson2009}\\ &&CD(-400 to 400)\textsuperscript{*}, DGD(0-22.5)\textsuperscript{*}& \\ PCA&10/20 Gbps RZ-OOK, 40/100 Gbps PDM RZ-QPSK, 100/200 Gbps PDM NRZ 16-QAM &OSNR(14-28), CD(-500-500), DGD(0-10)&\cite{Tan2014}\\ ANN(1;12)&100 Gbps QPSK&OSNR(14-32),CD(0-50),DGD (0-10)&\cite{Wu2011}\\ MTL-ANN&28 Gbd NRZ-OOK, PAM4, PAM8 &OSNR(10-25), (15-30), (20-35)& \cite{Wan2018}\\ MT-DNN(4:100,50,30,2) with transfer learning &10 Gbd PDM 16 , 64 QAM & OSNR(14-24), (23-34)&\cite{Cheng2020}\\ MTL-ANN&10 Gbd NRZ-QPSK,32 Gbd PDM-16QAM&OSNR(1-30)&\cite{Zheng2020}\\ MTL-CNN&10/20 Gb/s RZ-OOK, NRZ-OOK, NRZ-DPSK &OSNR(10-28), CD(0-450), PMD(0-10)&\cite{Fan2018}\\ MTL-CNN&60/100 Gbps QPSK, 16, 64 QAM&OSNR(10-28), CD(0-450), PMD(0-10)&\cite{Fan2019}\\ MTL-DNN&14/28 GBd QPSK, 16QAM &OSNR (10-24),(15-29), CD (0, 858.5, 1507.9)&\cite{Luo2021}\\ ANN(5;40)&4,16,32,64,128 QAM&OSNR(15-20)&\cite{Zhang2018}\\ \hline \multicolumn{4}{p{14cm}}{All units for OSNR,CD,PMD are in dB,ps/nm and ps respectively. *indicates experimental results and simulation results otherwise. ANN(x;y): x is the number of hidden layers, y is the number of neurons in the hidden layer.} \end{tabular} \label{table:2} \end{table*} The work presented in \cite{Tan2014} monitored multiple impairments and identified both modulation format and bit rate using Principal Component Analysis (PCA). Input features were derived from images of ADTP's and the method was shown to be suitable for heterogeneous networks. Simulations were used to generate 26,208 ADTP's from different combinations of impairments, modulation schemes and bitrate. Previous methods seen so far have assumed knowledge of both bitrate and modulation. OSNR, CD and DGD were varied in the range 14-28 dB, -500 to 500 ps/nm and 0-10 ps. The signal combinations used were 10/20 Gb/s RZ-OOK, 40/100 Gb/s PDM RZ-QPSK and 100/200 Gb/s PDM NRZ 16-QAM. The results showed an overall mean estimation error of 1 dB, 4 ps/nm and 1.6 ps. The authors also investigated performance of the method under fiber non linearity and accuracy slightly reduced to give mean errors of 1.2 dB, 12 ps/nm and 2.1 ps. To mitigate this, they proposed selection of additional features to characterise different non-linearity coefficients and link/span lengths. In this way CD, OSNR and DGD can be monitored without prior knowledge of the bit rate and modulation format, provided the signal type was part of the training data. The authors in \cite{Thrane2017} used an ANN for in-band OSNR monitoring on 32 Gbaud directly detected PDM-QAM signals. The input features were selected from eye diagrams. This method required knowledge of the pulse shape and modulation format hence one neural network was trained for each combination. It was composed of 1 hidden layer, 3 hidden neurons and only one input feature i.e. the variance at the maximum amplitude points on the eye diagram. Experimental verification was done for OSNR's in the range of 4-30 dB but only in white Gaussian noise. The results showed that OSNR estimation was accurate between 4-17 dB with a mean error of 0.2 dB but worsened from 17-30 dB. This was attributed to the fact that eye diagrams at higher OSNR's did not vary very significantly and hence had less distinguishable features. Since real transmission channels face other impairments, simulation was done for chromatic dispersion (CD) and the method found to be unimpaired up to 250 km on a dispersion uncompensated link. Verification of the method in the presence of Polarization Mode Dispersion (PMD) , Polarization Dependent Loss and non-linear impairments was left to future work. The authors in \cite{Wu2011} investigated multi- impairment monitoring of OSNR, CD and PMD using an ANN, consisting of 1 hidden layer and 12 neurons. The ANN was trained with simulated data from 180 ADTP's. 7 statistical features were extracted from each ADTP obtained from sampling a 100 Gb/s QPSK signal over impairment ranges for OSNR, CD and DGD of 14-32 dB, 0-50 ps/nm and 0-10 ps respectively. The validation was done with 144 samples. Balanced detection was shown to perform better than single ended detection through simulation with correlation coefficients of 0.995 and 0.96 respectively. The RMSE's were obtained as 1.62 dB, 8.75 ps/nm, 7.02 ps for OSNR, CD and DGD in single detection and 0.45 dB,3.67 ps/m and 0.8 ps for balanced detection. Experimental data was used to validate the performance for balanced detection and produced correlation of 0.997 and RMSE's of 1.27 dB,2.22 ps/nm and 0.91 ps for OSNR, PMD and DGD respectively. \cite{Costa2012} used a single hidden layer ANN with 40 neurons in the hidden layer to simultaneously monitor PMD, CD and OSNR using parametric asynchronous eye diagrams (PAED's) to extract 24 statistical features of positions in the PAED from a 40 Gb/s QPSK signal and showed their method to produce RMSE's of <20 ps/nm , < 1.3 ps and 1.5-2 dB respectively via simulation for ranges 0-200 ps/nm, 0-25 ps and 0-30 dB. In \cite{Wan2018} a Multi Task Learning ANN (MTL-ANN) was investigated using features extracted from amplitude histograms and used to monitor OSNR and to identify the modulation format. Simulations were done on 28-Gbaud NRZ-OOK, PAM4 and PAM8 over an OSNR range of 10-25 dB, 15-30 dB and 20-35 dB respectively and CD range of -100 to 100 ps/nm. A total 9072 and 1008 simulated AH's were used for training and testing respectively. Different combinations of OSNR and modulation format at specific CD values were tested achieving a MSE of 0.12dB. Experimental verification was done for OSNR ranges of 14-29, 17-32 and 22-37 dB for OOK, PAM, PAM8 and datasets consisting of 4320 and 480 AH's for training and testing. The results showed higher accuracy than single task learning ANNS (STL-ANN's), achieving MSE of 0.11 dB compared to 0.4 dB for a STL-ANN with a similar structure. This method required optimization of the bin number. Fewer bins were shown to have less accuracy while more bins led to a more complex ML structure. The authors used an optimal number of 100 in this work. OSNR and modulation format monitoring was done in \cite{Cheng2020} using a mutli-task deep neural network with transfer learning (DNN-TL) using AH's as inputs. The DNN was trained with 400 AH's generated from simulation and then experimental verification for PDM-16 and 64-QAM , 10 Gbaud signals was done and the results achieved RMSE of 1.09 dB for OSNR over a range of 14-24 dB and 23-34 dB for PDM-16 and 64-QAM respectively. The ANN structure had 4 hidden layers with 100/50/30/2 neurons respectively. TL from simulation to experiment achieved a reduction in required training samples (243) compared to when TL was not used (322) and a RMSE of 1.09 dB. \cite{Zheng2020} proposed a modulation format independent method to monitor the OSNR, in a WDM system. Optical power measured at different center wavelengths was used as input features to a MTL-ANN with 64 neurons per layer. 5 samples for each OSNR (1-30 dB) were collected and a ratio of 70:30 samples was used for training: testing and shown experimentally to estimate the OSNR with a Mean Absolute Error (MAE) of 0.28 dB and RMSE of 0.48 dB for both the 10 Gbaud NRZ-QPSK and 32 baud PDM-16QAM over an OSNR range of 1-30 dB. It was also shown to be insensitive to CD and PMD. The same ANN was shown to be capable of simultaneously monitoring baud rate and launch power without deploying two additional ANN's. For launch power in the range of 0-8 dBm, MAE and RMSE were 0.034 dB and 0.066 dB respectively. A MTL-CNN was used in \cite{Fan2018} to do multiple impairment monitoring in combination with joint bit rate and modulation format identification. 6600 Phase portraits were generated from simulations of six different signal types i.e. 10/20 Gb/s RZ-OOK, NRZ OOK and NRZ-DPSK and impairments varied over the ranges 10-28 dB, 0-10 ps and 0-450 ps/nm for OSNR, DGD and CD respectively. 90\% of the images were used to train the CNN while 10\% were reserved for testing. The results showed RMSE's of 0.73 dB, 1.34 ps/nm and 0.47 ps. The same authors improved their method by using phase portraits from ASCS in \cite{Fan2019} and features from the various CNN layers as opposed to only those in the last layer. In this method, the features were extracted from all the layers and transformed into the same space and then multiple tasks were trained for each of OPM, MFR and bitrate identification (BRI). 60/100 Gb/s signals for 3 modulation formats QPSK, 16 and 64-QAM were generated by simulation and the same impairment ranges and number of phase portraits were used. RMS errors of 1.52 ps/nm, 0.81 dB, and 0.32 ps were obtained. \cite{Luo2021} used adaptive ADTP's and AAH's as multiple inputs to a multi-task DNN to monitor OSNR in the range 10-24 dB and 15-29 dB for QPSK and 16 QAM signals respectively and identify the bitrate, modulation format and chromatic dispersion. 2 baudrates (14/28) and 3 values of CD (0, 858.5 and 1507.9 ps/nm) were experimentally tested. In the AADTP, a single ADC is used to sample the data generating $x_m$, samples ($m>=1$) as opposed to two tap delay sampling and then a fixed time delay is introduced by setting the second sample pair as a $y_m=x_m+n$ , $n>=1$. The same samples are used to generate AAH's. 36,000 AADTP's and AAH's were generated and 28,800 of them used to train the DNN. The method achieved a MAE of 0.2867 dB and CD identification accuracy of 99.83\%. A simple 3 layer ANN was used in \cite{Zhang2018} to jointly monitor OSNR (15-20 dB) and identify the MF in an IM-DD QAM-OFDM system. Two ANN's were used; one for MFI and then once the MF was known, passed to the second ANN which was trained for each modulation format separately to identify the OSNR. AAH's were derived from the IQ output by considering either the I or Q samples of 4,16, 32, 64 and 128 QAM signals. To improve the OSNR accuracy at low OSNR's, 5 distinct features were calculated from the AH's and used as input to the second ANN i.e. mean, variance, range, interquartile and median. The errors obtained OSNR prediction were < 1dB. Table \ref{tab:3} shows the performance of the different techniques that have been surveyed. \subsection{Coherent Detection} Coherent detectors already incorporate impairment compensation techniques at the receiver and therefore linear impairments - CD and PMD can already be monitored. OSNR then becomes the key impairment that still requires monitoring. Many of the previous methods discussed required the careful selection of features from sampled data. These features varied for different system parameters. As networks evolve, they will transmit data at varying bitrates and modulation formats which may change randomly hence more advanced techniques are required. The authors in \cite{Tanimura2016} used experimental data to train a Deep Neural Network (DNN) to monitor OSNR of a 16 GBd DP-QPSK signal with asynchronously sampled raw data from a coherent receiver. The DNN was trained with 3 different hidden layer structures (1,3,5) each comprising 512 neurons, and 3 training sample sizes (4000, 40,000 and 400,000). The 4 tributary output from the coherent receiver was then fed to the DNN, each tributary containing 512 samples generated from experiment. The 5 layer, 400,000 case was selected as the best case. The trained DNN was then used to test 10,000 samples resulting in an average error of 1.6 dB over an OSNR range of 7.5 - 31 dB. In \cite{Cho2019}, the same authors extended their method to a Convolutional Neural Network (CNN) with the same 512 sample, 4 tributary input which was trained with experimental data containing 1,000,000 samples from 14 GBd and 16 GBd DP-QPSK, 16-QAM and 64-QAM signals with different OSNR values ranging from 11-33 dB. The CNN was validated using 10,000 test samples for each modulation format. The results obtained showed a bias error of less than 0.3 dB, however the training phase took several hours. They showed the method to be insensitive to CD and left non-linearity to future work. \cite{Kashi2017} used an ANN with 1 hidden layer and 6 hidden neurons to estimate non-linear noise. Simulations were done for a 56.8 GBd DP 16-QAM signal transmitted over fiber channels with varying characteristics for example transmission distances, optical power, number of channels , types of fiber etc. The ANN was provided with the link parameters as well as amplitude noise co-variance (ANC) of the input symbols resulting from fiber non-linearity for 240 simulated cases. 70\% of the samples were used for training and 30\% for testing. The results showed that the errors obtained in the OSNR are less than 0.6 dB for two experimental cases. In \cite{Caballero2018a} a neural network was used to estimate both linear and non-linear noise simultaneously using input features derived from constellation plots and the amplitude noise co-variance. The ANN consisted of 1 hidden layer and 7 neurons and was trained with a 35 Gbd DP-16 QAM signal transmitted over different WDM channels, with varying fiber types and lengths of 320-1200 km, launch power of -2.5 to 0.5 dBm and different applied Amplified Stimulated Emission (ASE) to non-linear noise ratios. The total samples were 2160. Simulations and experimental data for varying optical power in an 800 km link were used and produced results with a std error of 0.23dB. In \cite{Wang2019}, a Long Short-Term Memory (LSTM) neural network was used to estimate OSNR without need for manual feature extraction. The four tributary output from the coherent receiver was used as input. The LSTM-NN was trained from simulation of 28/35 GBd PDM 16 and 64-QAM signals and OSNR varied between 15-30 dB. 512 data samples were collected for each OSNR value for a total of 32,768 samples with 70\% used for training and the rest for testing. The Mean Absolute Error (MAE) was found to be 0.1, 0.04, 0.05 and 0.04 dB for 28 GBd PDM 16 and 64-QAM and 35 GBd PDM 16 and 64-QAM respectively. The accuracy of the method was shown to be unaffected by linear impairments of CD and PMD through simulation with variable fiber length. Experimental verification of the model was done on a 34.94 GBd PDM 16-QAM signal with 5,632 samples over an OSNR range of 15-25 dB, resulting into a MAE of 0.05 dB. The work in \cite{Khan2017} used a DNN to simultaneously identify modulation format and monitor OSNR. One DNN consisting of 2 hidden layers (45 and 10 neurons respectively) was first used to identify the modulation format and then the result passed to a second stage with multiple 2-hidden layer DNN's (45/40 and 10 neurons respectively) trained per modulation format and the second DNN selected based on 1st stage results.The OSNR could then be predicted for different modulation formats. The input features were obtained from amplitude histograms of varying combinations of modulation formats and OSNR's. 133 experimentally generated AH's for different combinations of modulation format and OSNR were used to train the DNN's and then tested on 57 AH's for 112 Gb/s PDM QPSK, 112 Gb/s PDM 16-QAM, and 240 Gb/s PDM 64-QAM signals resulting in mean errors of 1.2, 0.4 and 1 dB respectively. \clearpage \begin{table*} \caption{\bf Performance comparison of existing OPM works for direct detection} \label{tab:6} \begin{tabular}{p{3cm}p{5cm}p{7cm}l} \hline {\textbf{ML algorithm}} &{\textbf{Features (training:testing)}}&{\textbf{Performance*}}&{\textbf{Ref}} \\\hline SVM& 23 zernike moments from each eye diagram (164:17)&${a=95\% }$ and ${a=60\%\textsuperscript{*} }$ & \cite{Skoog2006b}\\ ANN(1,12) &4 inputs from eye diagram &10 Gbps NRZ-OOK; ${c=0.91}$ \newline 40 Gbps RZ-DPSK; ${c=0.96}$ &\cite{Jargon2009a}\\ ANN & 9600 AH's (80:20) & MAE = 0.167\textsuperscript{*} & \cite{Yuan}\\ ANN(1,28)&7 statistics per ADTP (125:64) &10 Gbps NRZ-OOK; ${c=0.97}$ &\cite{Jargon2009}\\ ANN(1,28)&7 statistics per constellation diagram (216:125) &40 Gbps RZ-QPSK; ${c=0.97}$ \newline RMSE 0.77, 18.71, 1.17 (OSNR,CD,DGD) & \cite{Jargon2010}\\ ANN(1,12)&4 inputs per eye (125:64) \newline \newline \newline \newline (20:12)\textsuperscript{*} \newline \newline (135:32) & 40 Gbps RZ-OOK; ${c=0.97}$ \newline ME 0.57, 4.68, 1.53 (OSNR, CD, PMD) \newline 40 Gbps RZ-DPSK; ${c=0.96}$ \newline ME 0.77, 4.47, 0.92 (OSNR, CD, PMD) \newline 40 Gbps RZ-OOK; ${c=0.99}$\textsuperscript{*}, \newline ME (0.58, 2.53)\textsuperscript{*} (OSNR, CD) \newline 40 Gbps RZ-DPSK;${c=0.99}$\textsuperscript{*}, ME (1.85, 3.18)\textsuperscript{*} \newline 40 Gbps RZ-DPSK, 3 channel WDM; ${c=0.97}$ \newline ME 0.46, 1.45, 3.98, 0.65 (power, OSNR, CD, PMD) &\cite{Wu2009a}\\ ANN(1,3) & 1 feature per eye (1664:832)\textsuperscript{*} & 32 GBd PDM 64-QAM 0.2dB MSE &\cite{Thrane2017}\\ ANN(1,42)&3627 sets of empirical moments per BR and MF &40 Gbps RZ-DQPSK; RMSE 0.1, 27.3, 0.94 (OSNR, CD, PMD) \newline 56 Gbps RZ-DQPSK; RMSE 0.1, 29, 1.3 \newline 40 Gbps RZ-DPSK; RMSE 0.1, 17, 1 &\cite{Khan2012}\\ Kernel ridge regression & (1200:500) phase portraits, 900 features each (1500:500) \textsuperscript{*}&RMSE +/-11 +/-0.75 (CD, PMD) \newline RMSE +/-11\textsuperscript{*} and +/-1.9\textsuperscript{*}&\cite{Anderson2009}\\ PCA &26,208 ADTP's (70:30; 60:40; 50:50)&10/20 Gbps RZ-OOK, 40/100 Gbps PDM RZ-QPSK,100/200 Gbps PDM NRZ 16-QAM;ME 1, 4, 1.6 (OSNR, CD, PMD) &\cite{Tan2014}\\ ANN(1,12)& (180:144) ADPT's, 7 features each&100 Gbps QPSK ; balanced detection c= 0.995, 0.997* RMSE; 0.45, 1.27* (OSNR) , 3.67, 2.22* (CD), 0.8, 0.91* (PMD) \newline single detection; c=0.96 , RMSE 1.62, 8.75, 7.02 (OSNR, CD, PMD) &\cite{Wu2011}\\ MTL-ANN (1,100;2;50)& (9072:1008) \newline (4320:480)*&28 Gbd NRZ-OOK, PAM4, PAM8; MSE 0.12 \newline 0.11*& \cite{Wan2018}\\ MTDNN-TL(4,100,50,30,2)& (440:243)\textsuperscript{*} &10 Gbaud PDM 16 , 64 QAM;RMSE 1.09&\cite{Cheng2020}\\ ANN(1,40) & 24 features from PAED &40 Gbps QPSK; ME <20, < 1.3, 1.5-2 (CD, PMD, OSNR) &\cite{Costa2012}\\ MTL-ANN&5 features per OSNR&32 Gbaud PDM 16QAM and 10 Gbaud QPSK MAE 0.28,RMSE 0.48 (OSNR) &\cite{Zheng2020} \\ MTL-CNN&6600 ADTP's&RMSE 0.73, 1.34, 0.47 (OSNR, CD, PMD) &\cite{Fan2018}\\ MTL-CNN&6600 ASCS portraits & RMSE 1.52, 0.81, and 0.32 (Cd, OSNR, PMD) &\cite{Fan2019}\\ MTL-DNN &36,000 AADTPs and AAH's each &MAE 0.2867 (OSNR) a= 99.83\% (CD) &\cite{Luo2021}\\ ANN(5,40,1)&5 statistical features from AH& error <1.1 OSNR&\cite{Zhang2018}\\ \hline \multicolumn{4}{p{16cm}}{Units for OSNR, PMD, DGD are dB,ps/nm, ps respectively, Performance* indicates experimental results, else simulation results are indicated; a=accuracy, c=corellation} \end{tabular} \label{tab:3} \end{table*} \clearpage This method however was shown to take significant training time and computational power. \cite{Yuan} employed the same method for multiple QAM formats but added an anomaly detector between the MFI ANN and OSNR monitor to improve accuracy. 9600 AH's of 100 bins each were generated for 12.5 GBd signals and 6 modulation formats. The OSNR was varied over the ranges (10-25) for QPSK and 6-QAM, (15-30) for 16-QAM and (20 -35) for 16, 48 and 64-QAM. Experimental results showed a MAE of 0.167 dB. The authors in \cite{Wang2017} used a CNN to estimate OSNR and recognize modulation format using as input images of constellation diagrams. Simulations were done for 6 modulation techniques i.e. QPSK, 8PSK, 8-QAM, 16-QAM and 32-QAM over OSNR ranges of 15-30 dB and 64-QAM in the OSNR range of 20-35 dB. Experiments were carried out for 2-QPSK and 16-QAM. CD was also varied between -100 and 100 ps/nm. The training set consisted of 9600 constellations. The simulation results showed >95\% accuracy for 64-QAM and >99\% accuracy for other formats. They also compared 4 other commonly used algorithms; decision tree with 100 splits, SVM, k-nearest neighbors with 10 neighbors, and BP-ANN with 50 hidden neurons, and found that the CNN achieved better results than the rest at the expense of some computational complexity and large training time. Similar to other methods using constellation diagrams, it performed better for low SNRS <21 dB. Experimental verification was done for QPSK and 16-QAM signals, testing with 20 constellations and results showed maximum error of 0.6 and 0.7 dB respectively. In \cite{Xia2019}, the authors used a DNN with transfer learning to monitor OSNR on 56 Gb/s QPSK signals. AH's of the signals were used as input features and trained over an SNR range of 5-35 dB. Each sample AH consisted of 80 bins and the variances were also considered for a total of 81 features per sample. Physical layer parameters were also varied for example launch power (6-8 dB), dispersion (0-600 ps/nm) and bitrates (28-56 Gb/s). The ANN with 5-hidden layer structure bearing 64, 32, 16, 8 and 4 neurons respectively was trained with simulated data and then tested with 128,000 experimentally generated samples, achieving a RMSE of < 0.1 dB. In \cite{Wang2019b}, four different algorithms were applied to spectral data from a 20 Gbps QPSK signal i.e. SVM, ANN with 1 hidden layer and 100 hidden neurons, k nearest neighbors with 10 neighbors and decision tree with 20 splits in a coherent system to estimate OSNR. Training was done with 30 spectra consisting of 4096 samples each collected over an OSNR range between 15-30 dB and the ratio of training:testing data was 2:1. Experimental verification using the same amount of data found that the SVM performed better for the test parameters and took the least computation time. Estimation accuracy was found to be 100, 100, 73.124 and 65.625 for SVM, k-nearest neighbors decision tree and ANN respectively. The poor performance of the ANN was attributed to a large number of input neurons (4096) hence making it prone to under fitting due to increased model complexity. The testing time was also checked and the SVM and KNN found to take the least and longest time respectively. \cite{Zhao2020} used a binary CNN in which the activation and weights were constrained to +/-1 as opposed to floating values to predict OSNR for 9 different 12.5 GBd M-ary QAM signals. Experimental data consisting of gray-scale images of ring constellation diagrams were used. The total dataset consisted of 14,400 images, 100 images per modulation format for each of the 16 OSNR values. The OSNR range was 10-25 dB for QPSK, 6, 8 and 12-QAM, 15-30 dB for 16 and 24-QAM and 20-35 dB for 32, 48, 64-QAM. The method produced average accuracy of 98.91\% and was shown to be slightly less accurate than a floating CNN (99.95 \%) and similar to a multi-layer perceptron (98.86\%) of similar structure, however with reduced energy and execution time. In \cite{Yu2019}, the authors used a MTL-ANN to do OSNR estimation and MFI identification similar to their earlier work in \cite{Wan2018}, but applied to a coherent receiver and 9 M-QAM formats at 12.5 GBd. Experimentally generated ring constellation diagrams were transformed to AH's consisting of 200 bins each and used as input features. They were generated over an OSNR range of 10-25 dB for QPSK, 6, 8 and 12-QAM, 15 - 30 dB for 16 and 24-QAM and 20-35 dB for 32, 48, and 64-QAM. 100 AH's were generated per OSNR value and modulation format for a total dataset of 14,400 split into a training:test set of 90:10. The ANN consisted of 1 input layer with 200 neurons, and 2 specific hidden layers for OSNR, while one specific hidden layer was used for MFI, consisting of half the neurons in the previous layer. The optimal neuron number for the shared hidden layer was found to be 350. Results showed 98.7\% accuracy and RMSE of 0.68 dB when using regression and classification respectively. \cite{Wang2019x} used an LSTM-NN(160,128,2) to simultaneously predict CD (1360- 2040 ps/nm) and OSNR(15-30 dB) independent of the bitrate (BR) and MF for 28/35 GBd PDM 16/64 QAM signals, using as input the 4 tributary output of the coherent receiver. 512 data samples are generated by simulation for different MF, BR, OSNR and CD and 70\% used for training. The prediction performance obtained was MAE of <0.1dB and 0.64 ps/nm respectively. \cite{Wang2021} used an ANN to estimate OSNR using eigen values consisting of 2nd and 4th order moments and various OSNR's extracted from the rings of the constellation diagrams as input features. The system was then simulated with 112 Gb/s QPSK, 16 QAM and 120 Gb/s 64-QAM signals and OSNR ranges of 15-26, 19-29 and 22-31 dB respectively. The number of input features for each of the modulation schemes is 3, 3, 9 and the hidden neurons are 5, 5, 12. RMSE's of 0.17, 0.3, 0.68 dB were obtained. Experimental results produced RMSE's of 0.46 and 0.65 for 10/20 Gbd QPSK/16-QAM generated in OSNR ranges of 13-26 and 20-30 dB. The authors in \cite{feng2020} use a MTL-CNN to experimentally estimate OSNR and identify MF for 28 GBd PDM 8, 16, 32, 64 QAM and 8-PSK and QPSK signals resulting in mean errors of 0.26, 0.4, 0.85, 0.64, 0.17 and 0.19 respectively. A total of 30,600 images of intensity density and differential phase density at different OSNR ranges QPSK (10-30), 8PSK, 8, 16 QAM (12-30), 32 QAM (17-33), 64 QAM (18-33) are used as input features and 85\% used for training. The authors in \cite{Ye2021} monitored OSNR using an LSTM-NN but considered the prediction as a classification problem by defining the continuous OSNR range (15-24 dB) into discrete 1 dB intervals. The NN consisted of 8, 48, 64, 10 neurons for the input, memory, hidden and output layers respectively and the dataset size was 3,000 generated from the IQ output of the coherent receiver, with 75\% of the samples used for training. Simulation was done on a 30 GBd PDM 16 QAM signal resulting in standard deviation within 0.4 dB while experimental verification on a 20 GBd DP-QPSK signal resulted in a standard deviation within 0.67dB. \cite{Saif2021} considered OPM for few mode fibers and monitored OSNR, CD and mode coupling. Three ML algorithms were used i.e. SVM, random forest and CNN. The input features were obtained by considering 2D IQH's and their 1D projections in different planes. 200 datasets were generated for each impairment value. In their simulation, the CNN showed the best performance and was then used to experimentally verify the accuracy of the proposed method, resulting in coefficients of determination of 0.98, 0.92 and 0.91 for OSNR, CD and MC respectively. A 10 GBd DP-QPSK signal and ranges of 0-20 dB, 160-1120 ps/nm were used for OSNR and CD respectively, as well as different mode coupling coefficients. \cite{Xiang2019} used a single ANN to jointly monitor the MF and OSNR for a 28 GS/s PDM QPSK and 8, 16 and 64 QAM signals over the OSNR range of 10-16, 12-18,15-22 and 22-29 dB respectively. Their ANN had 50 hidden neurons and took as input two statistical features derived from the amplitude of the signals i.e. kurtosis and variance. Simulation showed mean estimation errors for the OSNR to be 0.005, 0.2, 0.17 and 0.67 using a dataset size of 400 per OSNR and MF. Experimental verification over the ranges 10-17, 14-20, 17-25 dB for QPSK, 8 and 16 QAM showed mean errors 0.15 dB, 0.41 dB and 0.49 dB when 15 hidden neurons are used. The method was extended in \cite{Xiang2021} but 50 bins of the cdf of one stokes parameter was selected as the input. With a a dataset size of 200 per OSNR and MF, OSNR ranges 10-18 dB, 12-20 dB, 12- 20 dB, 16- 24 dB, and 22- 28 dB for QPSK, 8PSK, 8,16, 64 QAM, and 60 hidden neurons, simulation produced mean square errors of 0.086 dB, 0.125 dB, 0.038 dB, 0.17 dB and 0.40 dB. Experimental verification resulted in mean OSNR estimation error of 0.13 dB, 0.29 dB, and 0.41 dB for QPSK, 8PSK and 16QAM. Table \ref{table:4} summarizes the current work on OPM for coherent detection. \subsection{Recognition of Modulation Format} Many of the OPM methods presented have assumed either advance knowledge of the modulation format or bitrate of the signal, or that it can be obtained from upper layer protocols. As a result, training of the ML algorithms and hence have been investigated for specific modulation formats and bit rates as seen in the previous section and would need to be retrained for a different signal type. It is also not practical to communicate across layers for simple OPM modules \cite{Zhang2016a,Tan2014} therefore it is necessary to review some works which have been done that have identified MFI and/or bitrate. Since elastic optical networks utilise bandwidth variable transmitters, it would be useful for the OPM module to identify modulation format and bitrate. \cite{Tan2014} proposed one such method using Principal Component Analysis (PCA), where ADTP's for different combinations of bit rate, modulation format and impairments (CD, PMD and OSNR) were generated by simulation and PCA used to create a reference database for the training dataset, and then identified test data with 100\% accuracy in the case when the PC's > 2. \cite{Khan2017} used four DNN's to identify OSNR and MF for three different signal types viz 112 Gbps PM QPSK and 16-QAM and 240 Gbps 64 QAM. One DNN was used to identify the modulation format, and the three DNN's in the second stage trained to estimate the OSNR for one of the three modulation formats. Once the MF was identified, the signal was passed to the respective DNN in stage 2. The method was applied to experimental data from the output of a coherent receiver with AH's used as input features. The method showed 100\% accuracy in all three cases.The authors in \cite{Yuan} proposed an improvement to this method by adding an anomaly detector between the MFI identifier and OSNR monitor to ensure that the MF was accurately identified before being passed to the OSNR monitor. AH's were constructed from constellation diagrams and the method experimentally verified for M-ary QAM. They achieved accuracies of 97.5\%. \cite{Wan2018} used a MTL-ANN for MFI and OSNR monitoring in conjunction with the signal AH's. Simulation and experiment for NRZ-OOK, PAM 8 and PAM 4 both yielded 100\% accuracy for MFI. The authors extended their method in \cite{Yu2019} to 9 M-QAM modulation formats and used an adaptive weight loss ratio for their ANN as opposed to a fixed optimal one and also achieved 100\% MFI identification accuracy. \cite{Zhang2016a} simulated six commonly used modulation formats at several datarates and impairment levels and then trained an ANN with features derived for AAH's, achieving an accuracy of 99.6\%. \cite{Huang2021} also used an ANN and AAH's to identify the MF for NRZ, PAM4 and PAM8 signals under stringent bandwidth conditions. The results showed 95\% and 100\% accuracy for simulation and experiment. \cite{Zhao2020}, used a Binary-CNN to identify the MF for 9 different M-ary QAM signals over different OSNR ranges. An experimentally generated data set consisting of 1600 gray scale images of ring constellations per modulation format from the I/Q output of a coherent receiver, with a signal datarate of 12.5 GBd was used. The OSNR was varied from 10-25 dB for QPSK, 6, 8 and 12 QAM, 15-30 dB for 16 and 24-QAM and 20-35 dB for 32, 48, 64 QAM. The method identified the different formats with 100\% accuracy, and required less memory and execution time compared to a multi-layer perceptron and floating CNN. In \cite{Zhang2020b}, MFI was done using a CNN that took as input 3 images generated mapping the IQ output from a coherent receiver onto a 3D stokes space, and then projecting it onto 3 2D stokes planes. Numerical simulations were done for 28 GBd PDM signals and 6 modulation formats (BPSK, QPSK, 8,16,32 and 64 QAM) in OSNR conditions varying from 9- 35 dB. 68,400 and 16,200 images in total are used to train and test the CNN respectively. Results show identification accuracy of 99.96\% when the OSNR is above 15 dB. PCA was used in \cite{Xu2020} to identify the MF of 6 formats (BPSK, QPSK, 8,16,32 and 64 QAM). 3 PC's were extracted from 2048 symbols of the stokes parameters from the received signals of a coherent receiver with OSNR varied from 8 to 40dB and used as a reference database.Testing showed that 100\% MFI accuracy could be obtained at minimum OSNR's of 10, 8, 12, 18, 14 and 23 dB for BPSK, QPSK, 8, 16, 32 and 64 QAM PDM 28 GBd signals respectively. Experimental verification was also done on a dataset containing 30,720 symbols after construction of a reference from 2048 symbols for 20 GBd QPSK, 8, 16 and 32 QAM signals and also achieved 100\% accuracy. In \cite{Fan2018} MF and bit rate were identified by a MTL-CNN using 10/20 Gbps RZ-OOK, NRZ-DPSK and NRZ-OOK signals and phase portraits over various impairment ranges for OSNR, CD and PMD. Both MF and BR were identified with 100\% accuracy. 100\% accuracy was also attained by the same authors using a similar MTL-CNN structure but combining features from the different CNN layers and constructing phase portraits from ASCS \cite{Fan2019}. A multi-input MTL-DNN was used to identify modulation format and bitrate and simultaneously monitor OSNR and CD in \cite{Luo2021}. An experiment was carried out over different OSNR ranges and three CD values using as input AADTPs and AAHs on 14/28 Gbd QPSK and 16QAM signals. MF and BR were identified with accuracy of 100 and 99.81 \% respectively. \clearpage \begin{table*} \caption{\bf Summary of existing OPM works-coherent detection} \label{tab:long} \begin{tabular}{p{3cm}p{3.5cm}p{2.8cm}p{3cm}p{3cm}l} \hline {\textbf{ML algorithm}} & {\textbf{Signal type (BR-MF)}} & {\textbf{Input features}} &{\textbf{Impairment}}&{\textbf{Performance}}&{\textbf{Ref}}\\ \hline DNN(5;500)&16\textsuperscript{b}- QPSK&IQ output &OSNR (7.5-31)&mean error 1.6 &\cite{Tanimura2016}\\ CNN&14\textsuperscript{b}- and 16\textsuperscript{b}- QPSK, 16 QAM, 64 QAM&IQ output &OSNR (11-33)&bias error <0.2&\cite{Cho2019}\\ ANN(1;6)&56.8\textsuperscript{b}-16 QAM&link parameters, ANC &OSNR&error <0.6&\cite{Kashi2017}\\ ANN(1;7)& 35\textsuperscript{b}-16 QAM&link parameters, ANC &non-linear SNR&std error <0.23& \cite{Caballero2018a}\\ ANN& 12.5\textsuperscript{b}- M-ary QAM & AH &OSNR (10-35)&MAE 0.167 \\ LSTM NN&28\textsuperscript{b}-/35\textsuperscript{b}-16 and 64 QAM&IQ output&OSNR(15 - 30)&MAE 0.1/0.05, 0.04/0.04&\cite{Wang2019}\\ DNN(2;45/10)& 112- QPSK, 16-QAM, and 240- 64 QAM&AH&OSNR&mean errors 1.2, 0.4, 1&\cite{Khan2017}\\ CNN& 25\textsuperscript{b}- QPSK, 8PSK, 8 QAM, 16 QAM, 32 QAM 25\textsuperscript{b}- 64 QAM 25\textsuperscript{b}- QPSK\textsuperscript{*}, 16 QAM\textsuperscript{*} &constellation &OSNR(15-30) OSNR(20-35)&>99\%accuracy > 95\% max. error 0.6\textsuperscript{*}, 0.7\textsuperscript{*}&\cite{Wang2017}\\ TL-DNN&56- QPSK&AH&OSNR(6-20)&RMSE <0.1&\cite{Xia2019}\\ SVM, ANN(1;100), K nearest neighbors, Decision tree &20- QPSK&Spectrum&OSNR& accuracy 100\% , 65.625\%, 100\%, 73.124\%& \cite{Wang2019b}\\ CNN&12.5\textsuperscript{b}- M-ary QAM &ring constellation &OSNR (10-35)&98.91 accuracy &\cite{Zhao2020}\\ MTL-ANN&12.5\textsuperscript{b} M-QAM&AH&OSNR(10-35)&accuracy 98.7\%&\cite{Yu2019}\\ LSTM-NN&28\textsuperscript{b}- /35\textsuperscript{b}- 16/64 QAM&IQ output&OSNR(15-30), CD(1360-2040)&MAE <0.1 and <0.64&\cite{Wang2019x}\\ ANN&112- QPSK, 16 QAM , 120- 64 QA 10\textsuperscript{b}/20\textsuperscript{b}- QPSK\textsuperscript{*} 16 QAM\textsuperscript{*} & ring constellations& OSNR(15-26,19-29,22-31) OSNR(13-26, 20-30)\textsuperscript{*} &RMSE 0.17, 0.3, 0.68 RMSE 0.46 and 0.65* &\cite{Wang2021}\\ MTL-CNN& 28\textsuperscript{b}- (8, 16), 32, 64 QAM and 8-PSK, QPSK& intensity and differential phase density diagrams &OSNR (12-30), (17-33), (18-33), (12-30), (10-30)&mean errors 0.26, 0.4, 0.85, 0.64, 0.17, 0.19&\cite{feng2020}\\ LSTM-NN& 30\textsuperscript{b}- 16 QAM, 30 \textsuperscript{b}- QPSK\textsuperscript{*}&IQ output &OSNR(15-24)&STD <0.4 , <0.67\textsuperscript{*}&\cite{Ye2021}\\ CNN&10\textsuperscript{b}- QPSK &IQH & OSNR (0-20), CD (160-1120) and different mode coupling coefficients & coefficients 0.98, 0.92, 0.91 &\cite{Saif2021}\\ ANN&28- QPSK, 8, 16, 64 QAM QPSK, 8 and 16 QAM\textsuperscript{*}& statistics from IQ & OSNR (10-16, 12-18, 15-22, 22-29) (10-17, 14-20, 17-25)\textsuperscript{*}&mean errors 0.005, 0.2, 0.17, 0.67 (0.15, 0.41, 0.49)\textsuperscript{*}&\cite{Xiang2019}\\ ANN&28- QPSK, 8 PSK, 8, 16, 64 QAM QPSK, 8PSK, 16 QAM\textsuperscript{*}& Stokes parameters & OSNR (10-18, 12-20, 12- 20, 16- 24, and 22- 28) (9.8-16.8, 12-19, 16-23)\textsuperscript{*}&MSE 0.086, 0.125, 0.038, 0.17, 0.40. Mean error (0.13, 0.29,0.41) \textsuperscript{*}&\cite{Xiang2021}\\ \hline \multicolumn{5}{p{18cm}}{All units for OSNR,CD,PMD are in dB,ps/nm and ps respectively. *indicates experimental values and simulation otherwise. BR\textsuperscript{b}-MF represents bitrate(GBd)-modulation format and bitrate in Gbps otherwise. ANN(x;y): x is the number of hidden layers, y is the number of neurons in the hidden layer.} \end{tabular} \end{table*} \clearpage \label{table:4} In \cite{Fan2018} MF and bit rate were identified by a MTL-CNN using 10/20 Gbps RZ-OOK, NRZ-DPSK and NRZ-OOK signals and phase portraits over various impairment ranges for OSNR, CD and PMD. Both MF and BR were identified with 100\% accuracy. 100\% accuracy was also attained by the same authors using a similar MTL-CNN structure but combining features from the different CNN layers and constructing phase portraits from ASCS \cite{Fan2019}. A multi-input MTL-DNN was used to identify modulation format and bitrate and simultaneously monitor OSNR and CD in \cite{Luo2021}. An experiment was carried out over different OSNR ranges and three CD values using as input AADTPs and AAHs on 14/28 Gbd QPSK and 16QAM signals. MF and BR were identified with accuracy of 100 and 99.81 \% respectively. The authors in \cite{Zhang2018} used a 3-layer ANN(202,40,5) to identify 5 QAM formats in an experimental IM-DD QAM-OFDM system using AH's as input. The MFI accuracy obtained was close to 100\% for 4 and 16 QAM over the entire range of received optical power, while 32, 64 and 128 QAM got similar accuracy when the optical powerexceededd -11dBm. \cite{feng2020} used a MTL-CNN to identify MFI with 100\% accuracy for mPSK and mQAM signals at a baud rate of 28 GBd and OSNR varied from 10-33dB. The authors in \cite{Xiang2019,Xiang2021} achieved 100\% MFI accuracy for different ranges between 10-28 dB OSNR for 5 modulation formats using a 3 layer ANN. Table \ref{table:5} summarizes some works where MFR has been done. \begin{table*} \centering \captionsetup[]{justification=centering} \caption{\bf Summary of ML methods used for MFI} \begin{tabular}{lp{2.5cm}p{5cm}p{2.5cm}l} \hline \textbf{ML method} & \textbf{Feature type} & \textbf{Modulation Format} & \textbf{Accuracy (\%)}&\textbf{Reference} \\ \hline PCA&ADTP&10/20 Gbps RZ-OOK, 40/100 Gbps PM RZ QPSK, 100/200 Gbps PM NRZ 16QAM & 100& \cite{Tan2014}\\ DNN & AH & 112 Gbps PM QPSK ,PM-16QAM, 240 gbps-PM 64-QAM & 97.5 &\cite{Khan2017}\\ ANN&AH& M-ary QAM&95.7&\cite{Yuan}\\ MTL-ANN&AH&NRZ-OOK, PAM4 , PAM8 & 100&\cite{Wan2018} \\ &&M-QAM&100&\cite{Yu2019}\\ ANN&AH&10 Gb/s RZ-OOK, 40 Gb/s NRZ-DPSK, 40 Gbps ODB, 40 Gbps RZ-DQPSK, 100 Gbps PM RZ-QPSK, 200 GBPS PM-NRZ 16QAM &99.6&\cite{Zhang2016a}\\ ANN&AH&NRZ, PAM4, PAM8&95 simulation,\newline 100 experiment &\cite{Huang2021}\\ B-CNN&ring constellation images&M-ary QAM&100 experiment & \cite{Zhao2020}\\ CNN&2D stokes plane images &M-QAM &99.96&\cite{Zhang2020b}\\ PCA&Stokes parameters&M-QAM&100&\cite{Xu2020}\\ MTL-CNN&ADTPs&10/20Gb/s RZ-OOK, NRZ-OOK, NRZ-DPSK &100 &\cite{Fan2018}\\ MTL-ANN&ASCS phase portraits & 60/100 Gb/s QPSK, 16, 64 QAM&100&\cite{Fan2019}\\ MTL-DNN &AADTP's and AAH's&14/28 Gbd QPSK, 16 QAM & 100 & \cite{Luo2021}\\ ANN(202,40,5)&AHs&4,16,32,64,128 QAM&close to 100 &\cite{Zhang2018}\\ MTL-CNN&intensity density and differential phase density diagram & 28 GBd mQAM and mPSK &100&\cite{feng2020}\\ ANN&amplitude statistics and stokes parameter&mPSK and mQAM &100&\cite{Xiang2019,Xiang2021}\\ \end{tabular} \label{table:5} \end{table*} \subsection{Application of Photonic Reservoir Computing in OPM} Photonic reservoir computing in the optical domain has been considered as an alternative to Digital Signal Processing for some years\cite{Pachnike2020}. A reservoir computer (RC) typically consists of an input, reservoir and readout. A input signal is fed to the reservoir, consisting of multiple randomly connected non-linear nodes, that function like a neural network. The input signal can alter the current and future states of the reservoir. The output of the reservoir is then readout as a linear combination of the different states in the reservoir. The input weights and node connections are fixed and thus the training complexity is reduced to a linear one at a single node at the readout \cite{Pachnike2020,Vandoorne2008,appeltant2011}. A common implementation that has been presented in the literature uses a single non-linear element in combination with a delay loop \cite{appeltant2011}, which can be implemented in the optical domain using a semiconductor laser and a fiber loop \cite{appeltant2011,Brunner2013,Larger2012}. Other approaches have used a network of several interconnected Semiconductor Optical Amplifiers(SOA's) \cite{Vandoorne2008,VANDOORNE2011}, and silicon micro-ring resonators \cite{Mesaritakis2013}. \cite{Vandoorne2014} has also shown a RC implementation using a passive silicon chip where the non-linearity is transferred to the readout, whose output is then passed to a linear classifier. Implementing the RC using photonic devices brings several advantages such as speed due to their inherently parallel computation nature, low power consumption and high bandwidth operation which are direct results of using light rather than electrical signals \cite{Larger2012,Vandoorne2014}. The authors in \cite{Ai2021} have applied this concept of reservoir computing using a semiconductor laser and delay line to identify the modulation format of 10 Gb/s OOK, 40 Gb/s DQPSK and 100 Gb/s 16-QAM signals in varying OSNR (12-26 dB), CD (-500 to 500 ps/nm) and DGD (0-20 ps) conditions. The input features were derived from AAH's. From a dataset size of 11,700, 2700 modulation signals were used to train the the model using ridge regression and 100 samples used for testing. The training and testing process is repeated five times with the different sample sets and using 400 virtual nodes. The method achieved a classification accuracy of 95.1\%, 95.7\% and 95.5\% for OOK, DPQSK and 16-QAM. \section{Discussion} The most common features used in the current OPM works for feature selection are eye diagrams, phase portraits and amplitude histograms. In some cases, widely known features from these plots such as statistical means, variances, standard deviations etc, counts of occurrences per bin, eye diagram parameters like eye closure, crossing amplitude etc have been used, while in others new features have been defined to exploit visible differences in the plots \cite{Jargon2009,Caballero2018,Saif2021}. Manual definition of features is a difficult task which requires experience and also makes it impossible to distinguish patterns when there are only slight differences for example, the performance of ANN's have been shown to deteriorate beyond certain OSNR's because there is very little distinction between the eye diagrams especially for higher modulation formats\cite{Thrane2017}. It also makes it difficult to scale the ML algorithm to a different signal type than what it was trained with. To mitigate this, deep learning techniques have been studied where the algorithm can learn its own features from the input data, the commonest way being by supplying it with processed images\cite{Fan2019,Fan2020,Wang2017,Zhang2020b} and the 4-tributary output of the coherent receiver. Of course, this comes with more complexity since deep learning algorithms are generally more difficult to train. Furthermore, in cases where images are used, some amount of image processing is required \cite{Zhang2020b,Skoog2006b}. Artificial neural networks have been very widely used for OPM in direct detection systems. The reviewed works have shown that in some cases, even simple ANN's with 1 hidden layer and as low as 3 hidden neurons and as few as one input feature are capable of accurately predicting OSNR, CD and PMD. Correlations of upto 0.997 have been obtained. The performance of the ANN depends on the input features selected and their number and also on the signal type. SVMs, PCA and ridge regression have also been used for but in very limited works. Deep learning techniques have also been shown in the literature but require significant time and more features to accurately train. Many of the techniques used are dependent on the signal type hence it is assumed that the monitoring unit already has knowledge of the signal type. Moreover, in the cases where multi-impairment monitoring is required of different signal types, the ANN has to be trained more than once or multiple ANN's have to be used for each signal type. \cite{Tan2014} proposed a method using PCA and that was transparent to the BR and MF but required training with multiple combinations of MF-BR-impairments hence required a significant amount of training data. More recently, \cite{Zheng2020} has shown a method which is transparent to the signal type and only requires input power as a feature. However, it has only been used to measure OSNR. Other works have also utilised multi-task learning and deep learning \cite{Cheng2020,Fan2018,Wan2018} to simultaneously identify the signal type and impairments. These also required generating large training datasets with different combinations of the signal type and impairment levels. Very few works have measured other impairments such as non-linearity whose monitoring is also crucial for optical networks. For coherent detection systems, neural networks have been used and shown to perform better than other methods where there have been compared except in one case in \cite{Wang2019b}. ANN's still suffer from manual feature generation and as such most of the literature uses DNN's and CNN's for coherent detection systems which can learn their own features from the 4 tributary output of the coherent receiver, images of constellations in the Jones or Stokes space or AH's. The challenge is that the training takes a considerable amount of time and a very large number of samples are required to produce accurate models. Nevertheless, after the training stage, the monitoring stage takes a shorter time, which is the critical time for an OPM monitor in a real system, since he training can be done off-line. Many of the methods have also been shown to maintain their accuracy in the presence of linear impairments. \cite{Zhao2020} tried to compare the performance of their joint MFI and SNR predictor by simulation for different transmission parameters noting that future networks will have varying parameters. They varied the transmission distance and launch power. They showed that if the DNN's were trained each time there was a change in one parameter, 100\% accuracy could be obtained for both MFI and OPM, whereas lower accuracy was obtained if trained once with a dataset consisting of all the possible parameter variations. It is difficult to directly compare one ML implementation in one work over the other because different authors have carried out their simulations/experiments for different impairment ranges, signal types and they have classified the performance of their algorithms in different ways. In the reviewed literature where MFR and BRI have been investigated, again ANN's and deep learning neural networks have been the most common method of choice and the bulk of the work has achieved 100\% identification accuracy. Photonic reservoir computing is a promising technology for OPM and MFR since it reduces the training complexity of neural network based methods which has been highighted as a key challenge in teh reviewed works that have employed them. Moroever, signal processing in the optical domain allows for high speed and high bandwidtch operation which are critical for future communication networks. \section{Conclusion} Optical performance monitoring has been an important aspect of optical communications for a very long time. As networks have become more heterogeneous and dynamic, they have also become more complex and fiber network technology has had to evolve along with it to meet the reliability demands, since it can already provide the required capacity. Since the light paths are expected to constantly change as they become elastic to provide bandwidth on demand, and the signal parameters are also expected to change during transmission in accordance with link conditions, real time link performance has become important. Application of machine learning to OPM has garnered significant interest as a promising technology to aid in this task and has been shown to be possible, and to provide accurate prediction for multiple impairments as long as the algorithm is well trained. \section{Acknowledgments} \bigskip \section{Disclosures} \noindent\textbf{Disclosures.} The authors declare no conflicts of interest. \section{References} \bigskip
{ "redpajama_set_name": "RedPajamaArXiv" }
2,273
\section{Introduction} \label{sec:intro} Deep neural networks have enjoyed remarkable success in recent years, but they require large datasets for effective training~\citep{lake2017building, garnelo2016towards}. One way to mitigate this data efficiency problem is to approach learning in two phases. The first phase learns the statistics of a generic domain, drawing on a large training set, but without committing to a specific learning task within that domain. The second phase learns a function for a specific task, but does so using only a small number of data points by exploiting the domain-wide statistics already learned. Meta-learning with neural networks is one example of this approach~\cite{wang2016learning, reed2017few}. For example, consider supervised learning problems. Many of these can be framed as function approximation given a finite set of observations. Consider a dataset $\{(x_i, y_i)\}_{i=0}^{n-1}$ of $n$ inputs $x_i \in X$ and outputs $y_i \in Y$. Assume that these represent evaluations $y_i = f(x_i)$ of some unknown function $f : X \to Y$, which may be fixed or a realization of some random function. A supervised learning algorithm returns an approximating function $g : X \to Y$ or a distribution over such functions. The aim is to minimize a loss between $f$ and $g$ on the entire space~$X$, but in practice the routine is evaluated on a finite set of observations that are held-out (making them effectively unlabelled). We call these unlabelled data points targets (see figure \ref{fig:model}). Classification, regression, dynamics modeling, and image generation can all be cast in this framework. \begin{figure}[t] \begin{center} \centerline{\includegraphics[width=0.85\columnwidth]{vectorNP.pdf}} \caption{\textbf{\acl{dnp}}. a) Data description b)~Training regime of conventional supervised deep learning models c) Our model.} \label{fig:model} \end{center} \vspace{-2\baselineskip} \end{figure} One approach to supervised problems is to randomly initialize a parametric function $g$ anew for each new task and spend the bulk of computation on a costly fitting phase. Prior information that a practitioner may have about $f$ is specified via the architecture of $g$, the loss function, or the training details. This approach encompasses most of deep supervised learning. Since the extent of prior knowledge that can be expressed in this way is relatively limited, and learning cannot be shared between different tasks, the amount of training required is large, and deep learning methods tend to fail when training data is not plentiful. Another approach is to take a probabilistic stance and specify a distribution over functions, known as stochastic processes; \aclp{gp} (\acp{gp}) are an example \cite{rasmussen2004gaussian}. On this view, a practitioner's prior knowledge about $f$ is captured in the distributional assumptions about the prior process and learning corresponds to Bayesian inference over the functional space conditioned on the observed values. In the \ac{gp} example, assumptions on the smoothness of $f$ are captured a priori via a parametric kernel function, and $g$ is taken to be a random function distributed according to the predictive posterior distribution. Unfortunately, such Bayesian approaches quickly become computationally intractable as the dataset or dimensionality grows \cite{snelson2006sparse}. In this work we propose a family of models that represent solutions to the supervised problem, and an end-to-end training approach to learning them, that combine neural networks with features reminiscent of Gaussian Processes. We call this family of models \emph{\aclp{dnp}}~(CNPs), as an allusion to the fact that they define conditional distributions over functions given a set of observations. The dependence of a \ac{dnp} on the observations is parametrized by a neural network that is invariant under permutations of its inputs. We focus on architectures that scale as $\mathcal{O}(n+m)$ at test time, where $n,m$ are the number of observations and targets, respectively. In its most basic form a \ac{dnp} embeds each observation, aggregates these embeddings into a further embedding of fixed dimension with a symmetric aggregator, and conditions the function $g$ on the aggregate embedding; see Figure \ref{fig:model} for a schematic representation. \acp{dnp} are trained by sampling a random dataset and following a gradient step to maximize the conditional likelihood of a random subset of targets given a random observation set. This encourages \acp{dnp} to perform well across a variety of settings, i.e. $n \ll m$ or $n \gg m$. This paper is structured as follows. In Section \ref{sec:model} we introduce \aclp{dnp}, propose our implementation, and describe how they can be trained efficiently using standard deep learning libraries. In Section \ref{sec:related} we discuss related work, particularly Bayesian and meta-learning approaches. In Section \ref{sec:experiments} we apply \acp{dnp} to several domains, including regression, classification and image completion, comparing its benefits to classical approaches. We emphasize that although CNPs share some similarities with Bayesian approaches, they do not implement Bayesian inference directly and it is not necessarily true that the conditional distributions will be consistent with respect to some prior process. However, the ability to extract prior knowledge directly from training data tied with scalability at test time can be of equal or greater importance. \section{Model} \label{sec:model} \subsection{Stochastic Processes} Consider a set $O =\{(x_i, y_i) \}_{i=0}^{n-1} \subset X \times Y$ of pairs of inputs $x_i \in X$ and outputs $y_i \in Y$ and another set $T=\{x_i\}_{i=n}^{n+m-1} \subset X$ of unlabelled points. We call these the set of observations and targets, respectively. We assume that the outputs are a realization of the following process; let $P$ be a probability distribution over functions $f : X \to Y$, formally known as a stochastic process\footnote{$P$ is a measure traditionally defined as the extension of a set of consistent measures on finite collections of $f(x_i)$ for $x_i \in X$. We overload $P$ for the marginal and conditional densities of these measures.}, then for $f \sim P$, set $y_i = f(x_i)$. $P$ defines a joint distribution over the random variables $\{f(x_i)\}_{i=0}^{n + m -1}$, and therefore a conditional distribution $P(f(T) \given O, T)$; our task is to predict the output values $f(x)$ for every $x \in T$ given $O$. As a motivating example, consider a random \mbox{1-dimensional} function $f \sim P$ defined on the real line (i.e.,~$X \mathrel{\mathop:}= \mathbb{R}$, $Y \mathrel{\mathop:}= \mathbb{R}$). $O$ would constitute $n$ observations of $f$'s value $y_i$ at different locations $x_i$ on the real line. Given these observations, we are interested in predicting $f$'s value at new locations on the real line. A classic assumption to make on $P$ is that all finite sets of function evaluations of $f$ are jointly Gaussian distributed. This class of random functions are known as \acfp{gp}. In this case, the predictive distribution $P(f(T) \given O, T)$ has a simple analytic form defined by prior assumptions on the pairwise correlation structure (specified via a kernel function). The framework of stochastic processes is appealing, because Bayes' rule allows one to reason consistently about the predictive distribution over $f$ imposed by observing $O$ under a set of probabilistic assumptions. This allows the model to be data efficient, an uncommon characteristic in most deep learning models. However, in practice, it is difficult to design appropriate priors and most interesting examples of stochastic processes are computationally expensive, scaling poorly with $n$ and $m$. This includes GPs which scale as $\mathcal{O}((n+m)^3)$. \subsection{\acfp{dnp}} As an alternative we propose \acfp{dnp}, models that directly parametrize conditional stochastic processes without imposing consistency with respect to some prior process. \acp{dnp} parametrize distributions over $f(T)$ given a distributed representation of $O$ of \emph{fixed} dimensionality. By doing so we give up the mathematical guarantees associated with stochastic processes, trading this off for functional flexibility and scalability. Specifically, given a set of observations $O$, a \ac{dnp} is a conditional stochastic process $Q_{\theta}$ that defines distributions over $f(x)$ for inputs $x \in T$. $\theta$ is the real vector of all parameters defining $Q$. Inheriting from the properties of stochastic processes, we assume that $Q_{\theta}$ is invariant to permutations of $O$ and $T$. If $O^{\prime}, T^{\prime}$ are permutations of $O$ and $T$, respectively, then $Q_{\theta}(f(T) \given O,T) = Q_{\theta}(f(T^{\prime}) \given O, T^{\prime}) = Q_{\theta}(f(T) \given O^{\prime}, T)$. In this work, we generally enforce permutation invariance with respect to $T$ by assuming a factored structure. Specifically, we consider $Q_{\theta}$s that factor $Q_{\theta}(f(T) \given O, T) = \prod_{x \in T} Q_{\theta}(f(x) \given O, x)$. In the absence of assumptions on output space $Y$, this is the easiest way to ensure a valid stochastic process. Still, this framework can be extended to non-factored distributions, we consider such a model in the experimental section. The defining characteristic of a \ac{dnp} is that it conditions on $O$ via an embedding of fixed dimensionality. In more detail, we use the following architecture, \begin{align} r_i &= h_{\theta}(x_i,y_i) \qquad \forall (x_i, y_i) \in O\\ r &= r_1 \oplus r_2 \oplus \ldots r_{n-1} \oplus r_n\\ \phi_i &= g_{\theta}(x_i, r) \qquad \forall (x_i) \in T \end{align} where $h_{\theta} : X \times Y \to \mathbb{R}^d$ and $g_{\theta} : X \times \mathbb{R}^d \to \mathbb{R}^e$ are neural networks, $\oplus$ is a commutative operation that takes elements in $\mathbb{R}^d$ and maps them into a single element of $\mathbb{R}^d$, and $\phi_i$ are parameters for $Q_{\theta}(f(x_i) \given O, x_i) = Q(f(x_i) \given \phi_i)$. Depending on the task the model learns to parametrize a different output distribution. This architecture ensures permutation invariance and $\mathcal{O}(n+m)$ scaling for conditional prediction. We note that, since $ r_1 \oplus \ldots \oplus r_n$ can be computed in $\mathcal{O}(1)$ from $ r_1 \oplus \ldots \oplus r_{n-1}$, this architecture supports streaming observations with minimal overhead. For regression tasks we use $\phi_i$ to parametrize the mean and variance $\phi_i = (\mu_i, \sigma_i^2)$ of a Gaussian distribution $\mathcal{N}(\mu_i, \sigma_i^2)$ for every $x_i \in T$. For classification tasks $\phi_i$ parametrizes the logits of the class probabilities $p_c$ over the $c$ classes of a categorical distribution. In most of our experiments we take $a_1 \oplus \ldots \oplus a_n$ to be the mean operation $(a_1 + \ldots + a_n)/n$. \subsection{Training \acp{dnp}} We train $Q_{\theta}$ by asking it to predict $O$ conditioned on a randomly chosen subset of $O$. This gives the model a signal of the uncertainty over the space $X$ inherent in the distribution $P$ given a set of observations. More precisely, let $f \sim P$, $O = \{(x_i, y_i)\}_{i=0}^{n-1}$ be a set of observations, $N \sim \mathrm{uniform}[0, \ldots, n-1]$. We condition on the subset $O_N = \{(x_i, y_i)\}_{i=0}^{N} \subset O$, the first $N$ elements of $O$. We minimize the negative conditional log probability \begin{equation} \begin{aligned} \mathcal{L}(\theta) = - \mathbb{E}_{f \sim P} \Big[ \mathbb{E}_{N}\Big[ \log Q_{\theta}(\{y_i\}_{i=0}^{n-1} | O_N, \{x_i\}_{i=0}^{n-1})\Big] \Big] \end{aligned} \end{equation} Thus, the targets it scores $Q_{\theta}$ on include \emph{both} the observed and unobserved values. In practice, we take Monte Carlo estimates of the gradient of this loss by sampling $f$ and $N$. This approach shifts the burden of imposing prior knowledge from an analytic prior to empirical data. This has the advantage of liberating a practitioner from having to specify an analytic form for the prior, which is ultimately intended to summarize their empirical experience. Still, we emphasize that the $Q_{\theta}$ are not necessarily a consistent set of conditionals for all observation sets, and the training routine does not guarantee that. In summary, \begin{enumerate} \item A \ac{dnp} is a conditional distribution over functions trained to model the empirical conditional distributions of functions $f \sim P$. \item A \ac{dnp} is permutation invariant in $O$ and $T$. \item A CNP is scalable, achieving a running time complexity of $\mathcal{O}(n+m)$ for making $m$ predictions with $n$ observations. \end{enumerate} Within this specification of the model there are still some aspects that can be modified to suit specific requirements. The exact implementation of $h$, for example, can be adapted to the data type. For low dimensional data the encoder can be implemented as an MLP, whereas for inputs with larger dimensions and spatial correlations it can also include convolutions. Finally, in the setup described the model is not able to produce any coherent samples, as it learns to model only a factored prediction of the mean and the variances, disregarding the covariance between target points. This is a result of this particular implementation of the model. One way we can obtain coherent samples is by introducing a latent variable that we can sample from. We carry out some proof-of-concept experiments on such a model in section~\ref{latent_var_model}. \section{Related research} \label{sec:related} \subsection{Gaussian Processes} The goal of our research is to incorporate ideas from GP inference into a NN training regime to overcome certain drawbacks of both. There are a number of papers that address some of the same issues within the GP framework. Scaling issues with GPs have been addressed by sparse GPs~\citep{snelson2006sparse}, while overcoming the limited expressivity resulting from functional restrictions is the motivation for Deep GPs~\citep{damianou2013deep, salimbeni2017doubly}. The authors of Deep Kernel learning~\citep{wilson2016deep}, also combine ideas from DL and GPs. Their model, however, remains closer to GPs as the neural network is used to learn more expressive kernels that are then used within a GP. \subsection{Meta-Learning} Deep learning models are generally more scalable and are very successful at learning features and prior knowledge from the data directly. However they tend to be less flexible with regards to input size and order. Additionally, in general they only approximate one function as opposed to distributions over functions. Meta-learning approaches address the latter and share our core motivations. Recently meta-learning has been applied to a wide range of tasks like RL~\citep{wang2016learning, finn2017model} or program induction~\citep{devlin2017neural}. Often meta-learning algorithms are implemented as deep generative models that learn to do few-shot estimations of the underlying density of the data. Generative Query Networks (GQN), for example, predict new viewpoints in 3D scenes given some context observations using a similar training regime to NPs~\citep{eslami2018neural}. As such, NPs can be seen as a generalisation of GQN to few-shot prediction tasks beyond scene understanding, such as regression and classification. Another way of carrying out few-shot density estimation is by updating existing models like PixelCNN~\citep{van2016conditional} and augmenting them with attention mechanisms~\citep{reed2017few} or including a memory unit in a VAE model~\citep{bornschein2017variational}. Another successful latent variable approach is to explicitly condition on some context during inference~\citep{rezende2016one}. Given the generative nature of these models they are usually applied to image generation tasks, but models that include a conditioning class-variable can be used for classification as well. Classification itself is another common task in meta-learning. Few-shot classification algorithms usually rely on some distance metric in feature space to compare target images to the observations provided ~\citep{koch2015siamese}, ~\citep{santoro2016one}. Matching networks~\citep{vinyals2016matching, bartunov2016fast} are closely related to CNPs. In their case features of samples are compared with target features using an attention kernel. At a higher level one can interpret this model as a CNP where the aggregator is just the concatenation over all input samples and the decoder $g$ contains an explicitly defined distance kernel. In this sense matching networks are closer to GPs than to CNPs, since they require the specification of a distance kernel that CNPs learn from the data instead. In addition, as MNs carry out all-to-all comparisons they scale with $\mathcal{O}(n \times m)$, although they can be modified to have the same complexity of $\mathcal{O}(n + m)$ as CNPs~\citep{snell2017prototypical}. A model that is conceptually very similar to CNPs (and in particular the latent variable version) is the ``neural statistician'' paper~\citep{edwards2016towards} and the related variational homoencoder~\citep{hewitt2018variational}. As with the other generative models the neural statistician learns to estimate the density of the observed data but does not allow for targeted sampling at what we have been referring to as input positions $x_i$. Instead, one can only generate i.i.d. samples from the estimated density. Finally, the latent variant of CNP can also be seen as an approximated amortized version of Bayesian DL \cite{gal2016dropout, blundell2015weight, louizos2017bayesian, louizos2017multiplicative} \section{Experimental Results} \label{sec:experiments} \begin{figure}[ht] \vskip 0.2in \begin{center} \centerline{\includegraphics[width=\columnwidth]{regression_task.jpg}} \caption{\textbf{1-D Regression}. Regression results on a 1-D curve (black line) using 5 (left column) and 50 (right column) context points (black dots). The first two rows show the predicted mean and variance for the regression of a single underlying kernel for GPs (red) and CNPs (blue). The bottom row shows the predictions of CNPs for a curve with switching kernel parameters.} \label{regression_results} \end{center} \vskip -0.2in \end{figure} \begin{figure*}[ht] \vskip 0.2in \begin{center} \centerline{\includegraphics[width=\columnwidth*2]{mnist_with_labels.png}} \caption{\textbf{Pixel-wise image regression on MNIST.} Left: Two examples of image regression with varying numbers of observations. We provide the model with 1, 40, 200 and 728 context points (top row) and query the entire image. The resulting mean (middle row) and variance (bottom row) at each pixel position is shown for each of the context images. Right: model accuracy with increasing number of observations that are either chosen at random (blue) or by selecting the pixel with the highest variance (red).} \label{ic_mnist_results} \end{center} \vskip -0.2in \end{figure*} \subsection{Function Regression} \label{sec:regression} As a first experiment we test CNP on the classical 1D regression task that is used as a common baseline for GPs. We generate two different datasets that consist of functions generated from a GP with an exponential kernel. In the first dataset we use a kernel with fixed parameters, and in the second dataset the function switches at some random point on the real line between two functions each sampled with different kernel parameters. At every training step we sample a curve from the GP, select a subset of $n$ points $(x_i,y_i)$ as observations, and a subset of points $(x_t, y_t)$ as target points. Using the model described in Figure \ref{fig:model}, the observed points are encoded using a three layer MLP encoder $h$ with a $128$ dimensional output representation $r_i$. The representations are aggregated into a single representation $r = \frac{1}{n}\sum r_i$ which is concatenated to ${x_t}$ and passed to a decoder $g$ consisting of a five layer MLP. The decoder outputs a Gaussian mean and variance for the target outputs $\hat{y}_t$. We train the model to maximize the log-likelihood of the target points using the Adam optimizer~\cite{kingma2014adam}. Two examples of the regression results obtained for each of the datasets are shown in Figure~\ref{regression_results}. We compare the model to the predictions generated by a GP with the correct hyperparameters, which constitutes an upper bound on our performance. Although the prediction generated by the GP is smoother than the CNP's prediction both for the mean and variance, the model is able to learn to regress from a few context points for both the fixed kernels and switching kernels. As the number of context points grows, the accuracy of the model improves and the approximated uncertainty of the model decreases. Crucially, we see the model learns to estimate its own uncertainty given the observations very accurately. Nonetheless it provides a good approximation that increases in accuracy as the number of context points increases. Furthermore the model achieves similarly good performance on the switching kernel task. This type of regression task is not trivial for GPs whereas in our case we only have to change the dataset used for training. \subsection{Image Completion} \label{sec:completion} We consider image completion as a regression task over functions in either $f: [0,1]^2 \to [0,1]$ for grayscale images, or $f: [0,1]^2 \to [0,1]^3$ for RGB images. The input $x$ is the 2D pixel coordinates normalized to $[0,1]^2$, and the output $y$ is either the grayscale intensity or a vector of the RGB intensities of the corresponding pixel. For this completion task we use exactly the same model architecture as for 1D function regression (with the exception of making the last layer 3-dimensional for RGB). We test CNP on two different data sets: the MNIST handwritten digit database~\cite{lecun1998gradient} and large-scale CelebFaces Attributes (CelebA) dataset~\cite{liu2015faceattributes}. The model and training procedure are the same for both: at each step we select an image from the dataset and pick a subset of the pixels as observations. Conditioned on these, the model is trained to predict the values of all the pixels in the image (including the ones it has been conditioned on). Like in 1D regression, the model outputs a Gaussian mean and variance for each pixel and is optimized with respect to the log-likelihood of the ground-truth image. It is important to point out that we choose images as our dataset because they constitute a complex 2-D function that is easy to evaluate visually, not to compare to generative models benchmarks. \subsubsection{MNIST} We first test CNP on the MNIST dataset and use the test set to evaluate its performance. As shown in Figure~\ref{ic_mnist_results}a the model learns to make good predictions of the underlying digit even for a small number of context points. Crucially, when conditioned only on one non-informative context point (e.g. a black pixel on the edge) the model's prediction corresponds to the average over all MNIST digits. As the number of context points increases the predictions become more similar to the underlying ground truth. This demonstrates the model's capacity to extract dataset specific prior knowledge. It is worth mentioning that even with a complete set of observations the model does not achieve pixel-perfect reconstruction, as we have a bottleneck at the representation level. Since this implementation of CNP returns factored outputs, the best prediction it can produce given limited context information is to average over all possible predictions that agree with the context. An alternative to this is to add latent variables in the model such that they can be sampled conditioned on the context to produce predictions with high probability in the data distribution. We consider this model later in section~\ref{latent_var_model}. An important aspect of the model is its ability to estimate the uncertainty of the prediction. As shown in the bottom row of Figure~\ref{ic_mnist_results}a, as we add more observations, the variance shifts from being almost uniformly spread over the digit positions to being localized around areas that are specific to the underlying digit, specifically its edges. Being able to model the uncertainty given some context can be helpful for many tasks. One example is active exploration, where the model has a choice over where to observe. We test this by comparing the predictions of CNP when the observations are chosen according to uncertainty (i.e.\ the pixel with the highest variance at each step), versus random pixels (Figure~\ref{ic_mnist_results}b). This method is a very simple way of doing active exploration, but it already produces better prediction results than selecting the conditioning points at random. \begin{figure}[ht] \vskip 0.2in \begin{center} \centerline{\includegraphics[width=0.85\columnwidth]{celeba_with_labels.png}} \caption{\textbf{Pixel-wise image completion on CelebA.} Two examples of CelebA image regression with varying numbers of observations. We provide the model with 1, 10, 100 and 1000 context points (top row) and query the entire image. The resulting mean (middle row) and variance (bottom row) at each pixel position is shown for each of the context images. } \label{ic_celeba_results} \end{center} \vskip -0.2in \end{figure} \begin{figure}[ht] \vskip 0.2in \begin{center} \centerline{\includegraphics[width=0.8\columnwidth]{flexi_with_labels.png}} \caption{\textbf{Flexible image completion}. In contrast to standard conditional models, CNPs can be directly conditioned on observed pixels in arbitrary patterns, even ones which were never seen in the training set. Similarly, the model can predict values for pixel coordinates that were never included in the training set, like subpixel values in different resolutions. The dotted white lines were added for clarity after generation.} \label{flexible_celeba} \end{center} \vskip -0.2in \end{figure} \begin{figure}[ht] \vskip 0.2in \begin{center} \centerline{\includegraphics[width=0.5\columnwidth]{latent_mnist4} \includegraphics[width=0.5\columnwidth]{latent_celeba_534517_11}} \caption{\textbf{Image completion with a latent variable model}. The latent variables capture the global uncertainty, allowing the sampling of different coherent images which conform to the observations. As the number of observations increases, uncertainty is reduced and the samples converge to a single estimate.} \label{latent} \end{center} \vskip -0.2in \end{figure} \subsubsection{CelebA} We also apply CNP to CelebA, a dataset of images of celebrity faces, and report performance obtained on the test set. As shown in Figure~\ref{ic_celeba_results} our model is able to capture the complex shapes and colours of this dataset with predictions conditioned on less than 10\% of the pixels being already close to ground truth. As before, given few context points the model averages over all possible faces, but as the number of context pairs increases the predictions capture image-specific details like face orientation and facial expression. Furthermore, as the number of context points increases the variance is shifted towards the edges in the image. An important aspect of CNPs demonstrated in Figure~\ref{flexible_celeba}, is its flexibility not only in the number of observations and targets it receives but also with regards to their input values. It is interesting to compare this property to GPs on one hand, and to trained generative models \cite{van2016conditional, gregor2015draw} on the other hand. The first type of flexibility can be seen when conditioning on subsets that the model has not encountered during training. Consider conditioning the model on one half of the image, fox example. This forces the model to not only predict pixel values according to some stationary smoothness property of the images, but also according to global spatial properties, e.g. symmetry and the relative location of different parts of faces. As seen in the first row of the figure, CNPs are able to capture those properties. A GP with a stationary kernel cannot capture this, and in the absence of observations would revert to its mean (the mean itself can be non-stationary but usually this would not be enough to capture the interesting properties). In addition, the model is flexible with regards to the target input values. This means, e.g., we can query the model at resolutions it has not seen during training. We take a model that has only been trained using pixel coordinates of a specific resolution, and predict at test time subpixel values for targets between the original coordinates. As shown in Figure~\ref{flexible_celeba}, with one forward pass we can query the model at different resolutions. While GPs also exhibit this type of flexibility, it is not the case for trained generative models, which can only predict values for the pixel coordinates on which they were trained. In this sense, CNPs capture the best of both worlds -- it is flexible in regards to the conditioning and prediction task, and has the capacity to extract domain knowledge from a training set. We compare CNPs quantitatively to two related models: kNNs and GPs. As shown in Table~\ref{image_recon} CNPs outperform the latter when number of context points is small (empirically when half of the image or less is provided as context). When the majority of the image is given as context exact methods like GPs and kNN will perform better. From the table we can also see that the order in which the context points are provided is less important for CNPs, since providing the context points in order from top to bottom still results in good performance. Both insights point to the fact that CNPs learn a data-specific `prior' that will generate good samples even when the number of context points is very small. \subsubsection{Latent variable model} \label{latent_var_model} The main model we use throughout this paper is a factored model that predicts the mean and variance of the target outputs. Although we have shown that the mean is by itself a useful prediction, and that the variance is a good way to capture the uncertainty, this factored model prevents us from obtaining coherent samples over multiple targets. Consider the MNIST experiment in Figure~\ref{ic_mnist_results}, conditioned on a small number of observations. Rather than predicting only the mean of all digits, sometimes we need a model that can be used to sample different coherent images of all the possible digits conditioned on the observations. GPs can do this because they contain a parametric kernel predicting the covariance between all the points in the observations and targets. This forms a multivariate Gaussian which can be used to coherently draw samples. In order to maintain this property in a trained model, one approach is to train the model to predict a GP kernel \cite{wilson2016deep}. However the difficulty is the need to back-propagate through the sampling which involves a large matrix inversion (or some approximation of it). \begin{table}[H] \begin{center} \begin{tabular}{c | c c c | c c c} \toprule & \multicolumn{3}{c}{Random Context} & \multicolumn{3}{c}{Ordered Context} \\ [0.5ex] \# & 10 & 100 & 1000 & 10 & 100 & 1000\\ \midrule \small{kNN} & 0.215 & 0.052 & 0.007 & 0.370 & 0.273 & 0.007\\ \small{GP} & 0.247 & 0.137 & \textbf{0.001} & 0.257 & 0.220 & \textbf{0.002}\\ \small{\ac{dnp}} & \textbf{0.039} & \textbf{0.016} & 0.009 & \textbf{0.057} & \textbf{0.047} & 0.021\\ \bottomrule \end{tabular} \label{image_recon} \caption{Pixel-wise mean squared error for all of the pixels in the image completion task on the CelebA data set with increasing number of context points (10, 100, 1000). The context points are chosen either at random or ordered from the top-left corner to the bottom-right. With fewer context points CNPs outperform kNNs and GPs. In addition CNPs perform well regardless of the order of the context points, whereas GPs and kNNs perform worse when the context is ordered.} \end{center} \end{table} In contrast, the approach we use is to simply add latent variables $z$ to our decoder $g$, allowing our model to capture global uncertainty. In order to generate a coherent sample, we compute the representation $r$ from the observations, which parametrizes a Gaussian distribution over the latents $z$. $z$ is then sampled once and used to generate the predictions for all targets. To get a different coherent sample we draw a new sample from the latents $z$ and run the decoder again for all targets. Similar to the standard VAE \cite{kingma2013auto, rezende2014stochastic} we train this model by optimizing a variational lower bound of the log-likelihood, using a conditional Gaussian prior $p(z|O)$ that is conditioned on the observations, and a Gaussian posterior $p(z|O, T)$ that is also conditioned on the target points. We apply this model to MNIST and CelebA (Figure~\ref{latent}). We use the same models as before, but we concatenate the representation $r$ to a vector of latent variables $z$ of size $64$ (for CelebA we use bigger models where the sizes of $r$ and $z$ are $1024$ and $128$ respectively). For both the prior and posterior models, we use three layered MLPs and average their outputs. We emphasize that the difference between the prior and posterior is that the prior only sees the observed pixels, while the posterior sees both the observed and the target pixels. When sampling from this model with a small number of observed pixels, we get coherent samples and we see that the variability of the datasets is captured. As the model is conditioned on more and more observations, the variability of the samples drops and they eventually converge to a single possibility. \subsection{Classification} \label{sec:classification} Finally, we apply the model to one-shot classification using the Omniglot dataset~\cite{lake2015human} (see Figure~\ref{classification} for an overview of the task). This dataset consists of 1,623 classes of characters from 50 different alphabets. Each class has only 20 examples and as such this dataset is particularly suitable for few-shot learning algorithms. As in~\citep{vinyals2016matching} we use 1,200 randomly selected classes as our training set and the remainder as our testing data set. In addition we augment the dataset following the protocol described in~\cite{santoro2016one}. This includes cropping the image from $32\times32$ to $28\times28$, applying small random translations and rotations to the inputs, and also increasing the number of classes by rotating every character by 90 degrees and defining that to be a new class. We generate the labels for an N-way classification task by choosing N random classes at each training step and arbitrarily assigning the labels $0, ..., N-1$ to each. \begin{figure}[ht] \vskip 0.2in \begin{center} \centerline{\includegraphics[width=\columnwidth]{classification.pdf}} \caption{\textbf{One-shot Omniglot classification}. At test time the model is presented with a labelled example for each class, and outputs the classification probabilities of a new unlabelled example. The model uncertainty grows when the new example comes from an un-observed class.} \label{classification} \end{center} \vskip -0.2in \end{figure} Given that the input points are images, we modify the architecture of the encoder $h$ to include convolution layers as mentioned in section~\ref{sec:model}. In addition we only aggregate over inputs of the same class by using the information provided by the input label. The aggregated class-specific representations are then concatenated to form the final representation. Given that both the size of the class-specific representations and the number of classes are constant, the size of the final representation is still constant and thus the $\mathcal{O}(n+m)$ runtime still holds. The results of the classification are summarized in Table~\ref{table_results}. CNPs achieve higher accuracy than models that are significantly more complex (like MANN). While CNPs do not beat state of the art for one-shot classification our accuracy values are comparable. Crucially, we reach those values using a significantly simpler architecture (three convolutional layers for the encoder and a three-layer MLP for the decoder) and with a lower runtime of $\mathcal{O}(n+m)$ at test time as opposed to $\mathcal{O}(nm)$. \begin{table} \begin{center} \begin{tabular}{l | c c c c l} \toprule & \multicolumn{2}{c}{5-way Acc} & \multicolumn{2}{c}{20-way Acc} &\small{Runtime} \\ [0.5ex] & 1-shot & 5-shot & 1-shot & 5-shot &\\ \midrule \small{MANN} & 82.8\% & 94.9\% & - & - &\tiny{$\mathcal{O}(nm)$}\\ \small{MN} & \textbf{98.1\%} & \textbf{98.9\%} & \textbf{93.8\%} & \textbf{98.5\%} &\tiny{$\mathcal{O}(nm)$}\\ \small{\ac{dnp}} & 95.3\% & 98.5\% & 89.9\% & 96.8\% &\tiny{$\mathcal{O}(n+m)$}\\ \bottomrule \end{tabular} \end{center} \caption{\textbf{Classification results on Omniglot}. Results on the same task for MANN~\citep{santoro2016one}, and matching networks (MN)~\citep{vinyals2016matching} and CNP.} \label{table_results} \end{table} \section{Discussion} \label{sec:discussion} In this paper we have introduced \aclp{dnp}, a model that is both flexible at test time and has the capacity to extract prior knowledge from training data. We have demonstrated its ability to perform a variety of tasks including regression, classification and image completion. We compared CNPs to Gaussian Processes on one hand, and deep learning methods on the other, and also discussed the relation to meta-learning and few-shot learning. It is important to note that the specific \ac{dnp} implementations described here are just simple proofs-of-concept and can be substantially extended, e.g.\ by including more elaborate architectures in line with modern deep learning advances. To summarize, this work can be seen as a step towards learning high-level abstractions, one of the grand challenges of contemporary machine learning. Functions learned by most conventional deep learning models are tied to a specific, constrained statistical context at any stage of training. A trained CNP is more general, in that it encapsulates the high-level statistics of a family of functions. As such it constitutes a high-level abstraction that can be reused for multiple tasks. In future work we will explore how far these models can help in tackling the many key machine learning problems that seem to hinge on abstraction, such as transfer learning, meta-learning, and data efficiency. \section*{Acknowledgements} We would like to thank Shakir Mohamed, Fabio Viola, Oriol Vinyals, Irene Garnelo, Daniel Burgess, Kevin McKee and Ellen Clancy for insightful discussions and being pretty cool. \section{Electronic Submission} \label{submission} Submission to ICML 2018 will be entirely electronic, via a web site (not email). Information about the submission process and \LaTeX\ templates are available on the conference web site at: \begin{center} \textbf{\texttt{http://icml.cc/2018/}} \end{center} The guidelines below will be enforced for initial submissions and camera-ready copies. Here is a brief summary: \begin{itemize} \item Submissions must be in PDF\@. \item The maximum paper length is \textbf{8 pages excluding references and acknowledgements, and 10 pages including references and acknowledgements} (pages 9 and 10 must contain only references and acknowledgements). \item \textbf{Do not include author information or acknowledgements} in your initial submission. \item Your paper should be in \textbf{10 point Times font}. \item Make sure your PDF file only uses Type-1 fonts. \item Place figure captions \emph{under} the figure (and omit titles from inside the graphic file itself). Place table captions \emph{over} the table. \item References must include page numbers whenever possible and be as complete as possible. Place multiple citations in chronological order. \item Do not alter the style template; in particular, do not compress the paper format by reducing the vertical spaces. \item Keep your abstract brief and self-contained, one paragraph and roughly 4--6 sentences. Gross violations will require correction at the camera-ready phase. The title should have content words capitalized. \end{itemize} \subsection{Submitting Papers} \textbf{Paper Deadline:} The deadline for paper submission that is advertised on the conference website is strict. If your full, anonymized, submission does not reach us on time, it will not be considered for publication. There is no separate abstract submission. \textbf{Anonymous Submission:} ICML uses double-blind review: no identifying author information may appear on the title page or in the paper itself. Section~\ref{author info} gives further details. \textbf{Simultaneous Submission:} ICML will not accept any paper which, at the time of submission, is under review for another conference or has already been published. This policy also applies to papers that overlap substantially in technical content with conference papers under review or previously published. ICML submissions must not be submitted to other conferences during ICML's review period. Authors may submit to ICML substantially different versions of journal papers that are currently under review by the journal, but not yet accepted at the time of submission. Informal publications, such as technical reports or papers in workshop proceedings which do not appear in print, do not fall under these restrictions. \medskip Authors must provide their manuscripts in \textbf{PDF} format. Furthermore, please make sure that files contain only embedded Type-1 fonts (e.g.,~using the program \texttt{pdffonts} in linux or using File/DocumentProperties/Fonts in Acrobat). Other fonts (like Type-3) might come from graphics files imported into the document. Authors using \textbf{Word} must convert their document to PDF\@. Most of the latest versions of Word have the facility to do this automatically. Submissions will not be accepted in Word format or any format other than PDF\@. Really. We're not joking. Don't send Word. Those who use \textbf{\LaTeX} should avoid including Type-3 fonts. Those using \texttt{latex} and \texttt{dvips} may need the following two commands: {\footnotesize \begin{verbatim} dvips -Ppdf -tletter -G0 -o paper.ps paper.dvi ps2pdf paper.ps \end{verbatim}} It is a zero following the ``-G'', which tells dvips to use the config.pdf file. Newer \TeX\ distributions don't always need this option. Using \texttt{pdflatex} rather than \texttt{latex}, often gives better results. This program avoids the Type-3 font problem, and supports more advanced features in the \texttt{microtype} package. \textbf{Graphics files} should be a reasonable size, and included from an appropriate format. Use vector formats (.eps/.pdf) for plots, lossless bitmap formats (.png) for raster graphics with sharp lines, and jpeg for photo-like images. The style file uses the \texttt{hyperref} package to make clickable links in documents. If this causes problems for you, add \texttt{nohyperref} as one of the options to the \texttt{icml2018} usepackage statement. \subsection{Submitting Final Camera-Ready Copy} The final versions of papers accepted for publication should follow the same format and naming convention as initial submissions, except that author information (names and affiliations) should be given. See Section~\ref{final author} for formatting instructions. The footnote, ``Preliminary work. Under review by the International Conference on Machine Learning (ICML). Do not distribute.'' must be modified to ``\textit{Proceedings of the $\mathit{35}^{th}$ International Conference on Machine Learning}, Stockholm, Sweden, PMLR 80, 2018. Copyright 2018 by the author(s).'' For those using the \textbf{\LaTeX} style file, this change (and others) is handled automatically by simply changing $\mathtt{\backslash usepackage\{icml2018\}}$ to $$\mathtt{\backslash usepackage[accepted]\{icml2018\}}$$ Authors using \textbf{Word} must edit the footnote on the first page of the document themselves. Camera-ready copies should have the title of the paper as running head on each page except the first one. The running title consists of a single line centered above a horizontal rule which is $1$~point thick. The running head should be centered, bold and in $9$~point type. The rule should be $10$~points above the main text. For those using the \textbf{\LaTeX} style file, the original title is automatically set as running head using the \texttt{fancyhdr} package which is included in the ICML 2018 style file package. In case that the original title exceeds the size restrictions, a shorter form can be supplied by using \verb|\icmltitlerunning{...}| just before $\mathtt{\backslash begin\{document\}}$. Authors using \textbf{Word} must edit the header of the document themselves. \section{Format of the Paper} All submissions must follow the specified format. \subsection{Length and Dimensions} Papers must not exceed eight (8) pages, including all figures, tables, and appendices, but excluding references and acknowledgements. When references and acknowledgements are included, the paper must not exceed ten (10) pages. Acknowledgements should be limited to grants and people who contributed to the paper. Any submission that exceeds this page limit, or that diverges significantly from the specified format, will be rejected without review. The text of the paper should be formatted in two columns, with an overall width of 6.75~inches, height of 9.0~inches, and 0.25~inches between the columns. The left margin should be 0.75~inches and the top margin 1.0~inch (2.54~cm). The right and bottom margins will depend on whether you print on US letter or A4 paper, but all final versions must be produced for US letter size. The paper body should be set in 10~point type with a vertical spacing of 11~points. Please use Times typeface throughout the text. \subsection{Title} The paper title should be set in 14~point bold type and centered between two horizontal rules that are 1~point thick, with 1.0~inch between the top rule and the top edge of the page. Capitalize the first letter of content words and put the rest of the title in lower case. \subsection{Author Information for Submission} \label{author info} ICML uses double-blind review, so author information must not appear. If you are using \LaTeX\/ and the \texttt{icml2018.sty} file, use \verb+\icmlauthor{...}+ to specify authors and \verb+\icmlaffiliation{...}+ to specify affiliations. (Read the TeX code used to produce this document for an example usage.) The author information will not be printed unless \texttt{accepted} is passed as an argument to the style file. Submissions that include the author information will not be reviewed. \subsubsection{Self-Citations} If you are citing published papers for which you are an author, refer to yourself in the third person. In particular, do not use phrases that reveal your identity (e.g., ``in previous work \cite{langley00}, we have shown \ldots''). Do not anonymize citations in the reference section. The only exception are manuscripts that are not yet published (e.g., under submission). If you choose to refer to such unpublished manuscripts \cite{anonymous}, anonymized copies have to be submitted as Supplementary Material via CMT\@. However, keep in mind that an ICML paper should be self contained and should contain sufficient detail for the reviewers to evaluate the work. In particular, reviewers are not required to look at the Supplementary Material when writing their review. \subsubsection{Camera-Ready Author Information} \label{final author} If a paper is accepted, a final camera-ready copy must be prepared. For camera-ready papers, author information should start 0.3~inches below the bottom rule surrounding the title. The authors' names should appear in 10~point bold type, in a row, separated by white space, and centered. Author names should not be broken across lines. Unbolded superscripted numbers, starting 1, should be used to refer to affiliations. Affiliations should be numbered in the order of appearance. A single footnote block of text should be used to list all the affiliations. (Academic affiliations should list Department, University, City, State/Region, Country. Similarly for industrial affiliations.) Each distinct affiliations should be listed once. If an author has multiple affiliations, multiple superscripts should be placed after the name, separated by thin spaces. If the authors would like to highlight equal contribution by multiple first authors, those authors should have an asterisk placed after their name in superscript, and the term ``\textsuperscript{*}Equal contribution" should be placed in the footnote block ahead of the list of affiliations. A list of corresponding authors and their emails (in the format Full Name \textless{}email@domain.com\textgreater{}) can follow the list of affiliations. Ideally only one or two names should be listed. A sample file with author names is included in the ICML2018 style file package. Turn on the \texttt{[accepted]} option to the stylefile to see the names rendered. All of the guidelines above are implemented by the \LaTeX\ style file. \subsection{Abstract} The paper abstract should begin in the left column, 0.4~inches below the final address. The heading `Abstract' should be centered, bold, and in 11~point type. The abstract body should use 10~point type, with a vertical spacing of 11~points, and should be indented 0.25~inches more than normal on left-hand and right-hand margins. Insert 0.4~inches of blank space after the body. Keep your abstract brief and self-contained, limiting it to one paragraph and roughly 4--6 sentences. Gross violations will require correction at the camera-ready phase. \subsection{Partitioning the Text} You should organize your paper into sections and paragraphs to help readers place a structure on the material and understand its contributions. \subsubsection{Sections and Subsections} Section headings should be numbered, flush left, and set in 11~pt bold type with the content words capitalized. Leave 0.25~inches of space before the heading and 0.15~inches after the heading. Similarly, subsection headings should be numbered, flush left, and set in 10~pt bold type with the content words capitalized. Leave 0.2~inches of space before the heading and 0.13~inches afterward. Finally, subsubsection headings should be numbered, flush left, and set in 10~pt small caps with the content words capitalized. Leave 0.18~inches of space before the heading and 0.1~inches after the heading. Please use no more than three levels of headings. \subsubsection{Paragraphs and Footnotes} Within each section or subsection, you should further partition the paper into paragraphs. Do not indent the first line of a given paragraph, but insert a blank line between succeeding ones. You can use footnotes\footnote{Footnotes should be complete sentences.} to provide readers with additional information about a topic without interrupting the flow of the paper. Indicate footnotes with a number in the text where the point is most relevant. Place the footnote in 9~point type at the bottom of the column in which it appears. Precede the first footnote in a column with a horizontal rule of 0.8~inches.\footnote{Multiple footnotes can appear in each column, in the same order as they appear in the text, but spread them across columns and pages if possible.} \begin{figure}[ht] \vskip 0.2in \begin{center} \centerline{\includegraphics[width=\columnwidth]{icml_numpapers}} \caption{Historical locations and number of accepted papers for International Machine Learning Conferences (ICML 1993 -- ICML 2008) and International Workshops on Machine Learning (ML 1988 -- ML 1992). At the time this figure was produced, the number of accepted papers for ICML 2008 was unknown and instead estimated.} \label{icml-historical} \end{center} \vskip -0.2in \end{figure} \subsection{Figures} You may want to include figures in the paper to illustrate your approach and results. Such artwork should be centered, legible, and separated from the text. Lines should be dark and at least 0.5~points thick for purposes of reproduction, and text should not appear on a gray background. Label all distinct components of each figure. If the figure takes the form of a graph, then give a name for each axis and include a legend that briefly describes each curve. Do not include a title inside the figure; instead, the caption should serve this function. Number figures sequentially, placing the figure number and caption \emph{after} the graphics, with at least 0.1~inches of space before the caption and 0.1~inches after it, as in Figure~\ref{icml-historical}. The figure caption should be set in 9~point type and centered unless it runs two or more lines, in which case it should be flush left. You may float figures to the top or bottom of a column, and you may set wide figures across both columns (use the environment \texttt{figure*} in \LaTeX). Always place two-column figures at the top or bottom of the page. \subsection{Algorithms} If you are using \LaTeX, please use the ``algorithm'' and ``algorithmic'' environments to format pseudocode. These require the corresponding stylefiles, algorithm.sty and algorithmic.sty, which are supplied with this package. Algorithm~\ref{alg:example} shows an example. \begin{algorithm}[tb] \caption{Bubble Sort} \label{alg:example} \begin{algorithmic} \STATE {\bfseries Input:} data $x_i$, size $m$ \REPEAT \STATE Initialize $noChange = true$. \FOR{$i=1$ {\bfseries to} $m-1$} \IF{$x_i > x_{i+1}$} \STATE Swap $x_i$ and $x_{i+1}$ \STATE $noChange = false$ \ENDIF \ENDFOR \UNTIL{$noChange$ is $true$} \end{algorithmic} \end{algorithm} \subsection{Tables} You may also want to include tables that summarize material. Like figures, these should be centered, legible, and numbered consecutively. However, place the title \emph{above} the table with at least 0.1~inches of space before the title and the same after it, as in Table~\ref{sample-table}. The table title should be set in 9~point type and centered unless it runs two or more lines, in which case it should be flush left. \begin{table}[t] \caption{Classification accuracies for naive Bayes and flexible Bayes on various data sets.} \label{sample-table} \vskip 0.15in \begin{center} \begin{small} \begin{sc} \begin{tabular}{lcccr} \toprule Data set & Naive & Flexible & Better? \\ \midrule Breast & 95.9$\pm$ 0.2& 96.7$\pm$ 0.2& $\surd$ \\ Cleveland & 83.3$\pm$ 0.6& 80.0$\pm$ 0.6& $\times$\\ Glass2 & 61.9$\pm$ 1.4& 83.8$\pm$ 0.7& $\surd$ \\ Credit & 74.8$\pm$ 0.5& 78.3$\pm$ 0.6& \\ Horse & 73.3$\pm$ 0.9& 69.7$\pm$ 1.0& $\times$\\ Meta & 67.1$\pm$ 0.6& 76.5$\pm$ 0.5& $\surd$ \\ Pima & 75.1$\pm$ 0.6& 73.9$\pm$ 0.5& \\ Vehicle & 44.9$\pm$ 0.6& 61.5$\pm$ 0.4& $\surd$ \\ \bottomrule \end{tabular} \end{sc} \end{small} \end{center} \vskip -0.1in \end{table} Tables contain textual material, whereas figures contain graphical material. Specify the contents of each row and column in the table's topmost row. Again, you may float tables to a column's top or bottom, and set wide tables across both columns. Place two-column tables at the top or bottom of the page. \subsection{Citations and References} Please use APA reference format regardless of your formatter or word processor. If you rely on the \LaTeX\/ bibliographic facility, use \texttt{natbib.sty} and \texttt{icml2018.bst} included in the style-file package to obtain this format. Citations within the text should include the authors' last names and year. If the authors' names are included in the sentence, place only the year in parentheses, for example when referencing Arthur Samuel's pioneering work \yrcite{Samuel59}. Otherwise place the entire reference in parentheses with the authors and year separated by a comma \cite{Samuel59}. List multiple references separated by semicolons \cite{kearns89,Samuel59,mitchell80}. Use the `et~al.' construct only for citations with three or more authors or after listing all authors to a publication in an earlier reference \cite{MachineLearningI}. Authors should cite their own work in the third person in the initial version of their paper submitted for blind review. Please refer to Section~\ref{author info} for detailed instructions on how to cite your own papers. Use an unnumbered first-level section heading for the references, and use a hanging indent style, with the first line of the reference flush against the left margin and subsequent lines indented by 10 points. The references at the end of this document give examples for journal articles \cite{Samuel59}, conference publications \cite{langley00}, book chapters \cite{Newell81}, books \cite{DudaHart2nd}, edited volumes \cite{MachineLearningI}, technical reports \cite{mitchell80}, and dissertations \cite{kearns89}. Alphabetize references by the surnames of the first authors, with single author entries preceding multiple author entries. Order references for the same authors by year of publication, with the earliest first. Make sure that each reference includes all relevant information (e.g., page numbers). Please put some effort into making references complete, presentable, and consistent. If using bibtex, please protect capital letters of names and abbreviations in titles, for example, use \{B\}ayesian or \{L\}ipschitz in your .bib file. \subsection{Software and Data} We strongly encourage the publication of software and data with the camera-ready version of the paper whenever appropriate. This can be done by including a URL in the camera-ready copy. However, do not include URLs that reveal your institution or identity in your submission for review. Instead, provide an anonymous URL or upload the material as ``Supplementary Material'' into the CMT reviewing system. Note that reviewers are not required to look a this material when writing their review. \section*{Acknowledgements} \textbf{Do not} include acknowledgements in the initial version of the paper submitted for blind review. If a paper is accepted, the final camera-ready version can (and probably should) include acknowledgements. In this case, please place such acknowledgements in an unnumbered section at the end of the paper. Typically, this will include thanks to reviewers who gave useful comments, to colleagues who contributed to the ideas, and to funding agencies and corporate sponsors that provided financial support. \nocite{langley00}
{ "redpajama_set_name": "RedPajamaArXiv" }
4,115
Martian Hazard #271: Falling Frost Avalanches! As spring comes to Mars' north polar latitudes, the added sunlight warms layers of subsurface CO2 ice, which can rapidly sublimate and force its way outwards and upwards. When this occurs along the edges of steep scarps, as seen in the image above, the rapid expansion of the CO2 – literally 'dry ice' – can force ice, dust and soil off the cliff faces in avalanches that fall hundreds of feet to the base of the cliffs. These "avalanches" have been seen before on Mars, and they are another fantastic reminder that the Red Planet is still very much an active place! The image here is a section of a HiRISE scan (ESP_025010_2650) acquired on November 27, 2011 and included in the February 2012 PDS release. Over 400 new images of Mars were added to the Planetary Data System, enough awesome Martian geology pics to keep a desktop explorer entertained for quite some time! I'm going to work on a high-res version later today, using the full JP2 data. Watch my Twitter feed for updates. Image: NASA / JPL / University of Arizona. Edited by J. Major. UPDATE: See a high-resolution version of the avalanche above here. This photo is such a great moment for scientists and space lovers! Before finding life somewhere in space, a little bit of motion (even atsmospherical of geological) in another planet gives it some life! You have a really good blog, that I found when searching for good Mars pictures. I appreciate how you have a cover on all the different planets in your content. Keep the good work!
{ "redpajama_set_name": "RedPajamaC4" }
9,476
CREATE TABLE problems (id SERIAL PRIMARY KEY, title VARCHAR(50) NOT NULL, submitted TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, difficulty INT NOT NULL DEFAULT 0, submitter VARCHAR(50) NOT NULL, times_solved INT NOT NULL DEFAULT 0, description VARCHAR(500) NOT NULL, code VARCHAR(300) NOT NULL, solved BOOLEAN NOT NULL DEFAULT false); INSERT INTO problems (title, submitter, description, code) values ('This is a test problem.', 'Alice', 'Test description', '(+ 1 1)'); INSERT INTO problems (title, submitter, description, code) values ('The 2nd Problem', 'Bob', 'Another description', '(* 2 2)'); INSERT INTO problems (title, submitter, description, code) values ('The 3rd and final problem', 'Charles', 'No description for you!', '(print "hello"');
{ "redpajama_set_name": "RedPajamaGithub" }
2,935
Q: Can font rendering quality be affected by programming, or is it entirely handled by the OS? I am working with a programmer who is using Java to create an interface for an application. The fonts in the program look ugly to me. Somehow too harsh, as if they aren't anti-aliased or something. Sorry for being vague, but they just look "clunky" to me. I know that there are settings within an OS to change how a font is rendered, such as subpixel smoothing, or grayscale smoothing, as well as subpixel order and hinting. Since the fonts only look bad in this one program I wonder - is there anything on the programming side that can be done to improve font rendering? A: If you are using Java 6, there is a system property called awt.useSystemAAFontSettings you can use to control anti-aliasing. Possible values are: "lcd" ClearType style sub-pixel anti-aliasing. "false" Disable anti-aliasing. "on" Best contrast. "gasp" Standar anti-aliasing.
{ "redpajama_set_name": "RedPajamaStackExchange" }
4,326
The Best Western John Howard Hotel in Kensington, central London is a 4 star hotel with en-suite bedrooms and self-contained apartments in a tranquil and elegant location. Within period buildings dating from around 1815, the hotel features a range of rooms, free Wi-Fi and an in-house restaurant 'Manor at the Gate Restaurant'. An ideal location in London, the BW John Howard Hotel in Kensington is near the Royal Albert Hall, Natural History Museum, Science Museum and Kensington Palace. The John Hotel comprises of five period properties built around 1815 and are listed by English Heritage for architectural and historical purposes. Every apartment has a different layout and the plans shown are not representative of all apartments. Some apartments have seperate kitchens, some have a patio and some have a split level arrangement. The John Howard Hotel is an oasis of tranquillity and elegance in the heart of Central London. Located in a tree lined boulevard in the exclusive Royal Borough of Kensington and Chelsea the John Howard Hotel offers a choice of 40 luxurious bedrooms and 12 self-contained apartments. The Manor at the Gate Restaurant is the in-house restaurant at Best Western John Howard Hotel with a la carte and set menus featuring a wide range of dishes and a sumptuous buffet breakfast every morning, incorporating a traditional cooked English breakfast. For pre-dinner drinks the friendly bar is the ideal place to relax, or enjoy a drink here before a night out in London. Close to fashionable High Street Kensington and Knightsbridge, Harrods, Harvey Nichols and other famous designer stores, the John Howard is only a few minute's walk to Hyde Park, Kensington Gardens, Kensington Palace, the Natural History Museum, Victoria and Albert Museum, the Science Museum the world renowned Royal Albert Hall. Also within easy access to Earls Court and Olympia exhibition halls, London's West End shopping and theatres and London's vibrant night life. So if you're visiting London for a short break, a romantic outing, a family holiday, a business trip or going to a concert at the Royal Albert Hall enjoy a combination of old world elegance and modern day comforts at the John Howard Hotel. Luxurious en-suite bedrooms, 24 hour room service await you at this exclusive hotel. Our friendly staff are always on hand to offer efficient and prompt service, ensuring you get the most out of you stay in London. The John Howard offers twelve traditionally furnished apartments for families or business travellers wanting greater privacy, space to entertain, or for those wanting the privacy of an apartment but the luxury of 24 hour room service and a restaurant. The John Howard has 4 two bedroom apartments, 4 one bedroom apartments and 4 studio apartments. Own kitchen equipped with oven, hob, microwave and refrigerator. A range of cooking utensils and tableware, kettle and toaster. Daily maid service including change of linen and providing fresh towels. Same day personal laundry and dry cleaning service Iron and ironing board. Starter packs available on request - milk, bread, cereal, eggs, juice, butter, fruit. Fruit basket, flowers, champagne & chocolates can be organised for that special occasion. Close to bus stops and underground station, Garage parking close by. We are within easy walking distance from Gloucester Road Underground Station which is on the Piccadilly, Circle and District lines. You can take the Piccadilly line directly to London Heathrow Airport or you can take the London Heathrow Express to Paddington Station and connect on the Circle line to Gloucester Road Underground Station. For connections to London Gatwick Airport take the Gatwick Express from Victoria Station which is on the Circle, District and Victoria lines.
{ "redpajama_set_name": "RedPajamaC4" }
4,119
Report: Adrian Wojnarowski says Kyle Lowry "could'' be dealt at the NBA Trade Deadline Now one week out, ESPN's Senior Insider went in-depth about the NBA trade deadline. By Harrison Grimm@Harrison_Grimm Mar 18, 2021, 6:00pm EDT Share All sharing options for: Report: Adrian Wojnarowski says Kyle Lowry "could'' be dealt at the NBA Trade Deadline Photo by Nic Antaya/Getty Images NBA trade talk and rumors continue to heat up, as we are now a week away from the NBA trade deadline. ESPN Insider Adrian Wojnarowski went in-depth about some of the possible storylines that could solidify over the next few weeks on the most recent episode of the Woj Pod — including some involving the Philadelphia 76ers. The Sixers are expected to be very active over the next week. After all, Daryl Morey is in charge and no stranger to wheeling and dealing in the NBA. The major talking point of the episode, mentioned numerous times, was in regards to Toronto's Kyle Lowry. The main takeaway from the episode is that Lowry's trade status is very much up in the air. Woj had this to say to Tim Bontemps in the first half of the episode: "They're not gonna send him somewhere you'd consider an outpost. What he's done for the Raptors...I know Masai is going to send him somewhere he wants to go. Philly makes sense in a lot of ways. It's home for him. That's gonna be one to watch." This is a pretty bold quote from the biggest newsbreaker in the NBA. My biggest takeaway from the first mention of Lowry in this episode is definitely how he ends this question with: "that's gonna be the one to watch". Lowry has been rumored to Miami and Los Angeles (Clippers), along with Philly. The main difficulty in trading for Lowry remains his $30 million in salary, as it's simply a big number to match in a trade. Miami recently pulled the trigger on a deal with the Thunder, trading Meyers Leonard for Trevor Ariza. Leonard's salary of just under $10 million was viewed as a huge piece in any theoretical Lowry-to-Miami deal, and constructing a deal between those two teams may be more difficult now than originally thought from a salary standpoint. It's also important to note that both the Clippers and Heat have very little to offer from a draft compensation standpoint. All of these factors, along with Lowry being a Philadelphia native, point to the Sixers having the best odds in the event that Lowry requests a trade. Woj was even asked again at the end of the episode if he felt that Lowry would be dealt at the deadline. He responded: "I think he could. I still think Toronto will give him a lot of voice [in regards to] if he wants to be moved and where he would like to play. I know Masai Ujiri won't send Kyle Lowry to an outpost, or place that he doesn't have interest being in." These two quotes give us a descriptive idea of what Kyle Lowry's trade deadline might hold. By the sounds of it, the ball is figuratively in Lowry's court. It seems like Toronto will either be perfectly content to ride out the season with their star point guard or do their best to honor his request of a trade. The Sixers are one of the favorites in the East and would be a great fit for the twilight of Lowry's career. Expect rumors to continue to heat up as we approach the trade deadline on March 25. We will keep you updated with any and all trade news here at Liberty Ballers. Meet the 2023 NBA All-Star Game starters A weird NBA All-Star Game voting rule is going to ruin the East starting lineup Kevin Durant and Joel Embiid are beefing over crotch chops and tweets Cavs and Thunder can follow the path the Grizzlies and Pelicans set Stephen Curry was ejected for throwing his mouthpiece, but the Warriors won anyway NBA trade deadline tracker: Will the Sixers make a move to improve the team?
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
683
KAIST and Google Partner to Develop AI Curriculum Two KAIST professors, Hyun Wook Ka from the School of Transdisciplinary Studies and Young Jae Jang from the Department of Industrial and Systems Engineering, were recipients of Google Education Grants that will support the development of new AI courses integrating the latest industrial technology. This collaboration is part of the KAIST-Google Partnership, which was established in July 2019 with the goal of nurturing AI talent at KAIST. The two proposals -- Professor Ka's 'Cloud AI-Empowered Multimodal Data Analysis for Human Affect Detection and Recognition' and Professor Jang's 'Learning Smart Factory with AI'-- were selected by the KAIST Graduate School of AI through a school-wide competition held in July. The proposals then went through a final review by Google and were accepted. The two professors will receive $7,500 each for developing AI courses using Google technology for one year. Professor Ka's curriculum aims to provide a rich learning experience for students by providing basic knowledge on data science and AI and helping them obtain better problem solving and application skills using practical and interdisciplinary data science and AI technology. Professor Jang's curriculum is designed to solve real-world manufacturing problems using AI and it will be field-oriented. Professor Jang has been managing three industry-academic collaboration centers in manufacturing and smart factories within KAIST and plans to develop his courses to go beyond theory and be centered on case studies for solving real-world manufacturing problems using AI. Professor Jang said, "Data is at the core of smart factories and AI education, but there is often not enough of it for the education to be effective. The KAIST Advanced Manufacturing Laboratory has a testbed for directly acquiring data generated from real semiconductor automation equipment, analyzing it, and applying algorithms, which enables truly effective smart factory and AI education." KAIST signed a partnership with Google in July 2019 to foster global AI talent and is operating various programs to train AI experts and support excellent AI research for two years. The Google AI Focused Research Award supports world-class faculty performing cutting-edge research and was previously awarded to professors Sung Ju Hwang from the Graduate School of AI and Steven Whang from the School of Electrical Engineering along with Google Cloud Platform (GCP) credits. These two professors have been collaborating with Google teams since October 2018 and recently extended their projects to continue through 2021. In addition, a Google Ph.D. Fellowship was awarded to Taesik Gong from the School of Computing in October this year, and three Student Travel Grants were awarded to Sejun Park from the School of Electrical Engineering, Chulhyung Lee from the Department of Mathematical Sciences, and Sangyun Lee from the School of Computing earlier in March. Five students were also recommended for the Google Internship program in March. (END)​ 2020.12.11 View 222 Fundraising for the 50th Anniversary Memorial Building Kicks Off KAIST started the fundraising campaign to construct the 50th Anniversary Memorial Building. This is one of the projects and events the 50th Anniversary Commemorative Committee established to celebrate the anniversary. The ground will be broken in 2022 after raising approximately 50 billion KRW through 2021. The five-story building will be the latest addition to the KAIST campus. To highlight the campus's history, the new building will connect the N5 (Basic Experiment & Research) and N2 (Administration Branch) buildings, the first buildings on the main Daejeon campus after its main campus moved from Seoul in 1987. Currently, the College of Business remains at the Seoul campus. The 50th Anniversary Memorial Building will connect the two buildings in the shape of C, and represent KAIST's C3 core value of Challenging, Creating, and Caring. The concept of this building was designed by Professor Sang-Min Bae from the Department of Industrial Design. The 50th Anniversary Commemorative Committee said the Memorial Building will reflect the spirit of its core values. The first floor will accommodate the auditorium and exhibition hall, showcasing the latest achievements in KAIST innovation and convergence research as well as alumni startups and companies. The second floor will be an education space for entrepreneurship and video studios. An area for delivering creative education platforms such as Education 4.0 will be prepared on the third floor. The fourth floor will be used for global leadership education. The fifth floor will house the KAIST Club, a lounge for alumni and the Global Strategy Institute. Co-Chair of the Fundraising & PR Sub-Committee of the KAIST 50th Anniversary Commemorative Committee and Former Vice President for Planning and Budget Seung-Bin Park and current Vice President for Planning and Budget Suchan Chae reiterated the importance of extending the infrastructure of the campus, saying that investments in the infrastructure will expand the university's future growth potential. In a letter to kick off the fundraising efforts last month, they called for support from the entire KAIST community to help construct the new memorial building that will produce global talents and help young scientists make their dreams come true. To donate, click here ​ KAIST Receives $57 Million Donation to Enhance Research The largest amount since the opening of KAIST will fund 'Singularity Professors' KAIST Development Foundation Chairman Soo-Young Lee made a gift of real estate estimated at approximately $57 million on July 23. This is the largest donation KAIST has received since it was founded in 1971. The fund will establish the "Soo-Young Lee Science Education Foundation" and the proceeds of the foundation will go to the "Singularity Professors" as necessary resources to help make discoveries and design new approaches to accelerate breakthroughs. "KAIST should be the institute that will produce first Korean Nobel laureate in the field of science. I hope this fund will be utilized to enable Korea to stand out in this challenging time by accomplishing breakthroughs nobody has never imagined," said Chairman Lee during the donation ceremony at KAIST's campus in Daejeon. This is Chairman Lee's third donation following the $6.7 million donation in 2012 and the $830,000 donation in 2016. Chairman Lee began her career as a journalist in 1963. In 1981, she started her own business by launching Kwangwon Ranch and became a successful businesswoman. In 1988, Chairman Lee established the real estate company Kwangwon Industries. After receiving an honorary doctorate from KAIST in 2012, she has served as the chairman of the KAIST Development Foundation from 2013. Chairman Lee expressed her intention to make another donation to KAIST in the near future during the news conference. "People matter most for advancing the world. KAIST has a very distinct mission to foster the brightest minds and will drive the nation to move forward. I have worked with KAIST for quite long time so that I have a strong belief that KAIST is the one that will not only make contributions to Korea but also to all humanity," she explained. "For example, about one-fourth of the R&D manpower at Samsung Electronics is from KAIST. In 2019, Samsung Electronics recorded a revenue of approximately $206 billion which accounted for about 16% of national GDP. KAIST is the one that fosters global talents who are working at global company such as Samsung and many others." KAIST President Sung-Chul Shin also expressed his deep respect for Chairman Lee's decision, saying that the entire KAIST community will make every effort to keep up Chairman Lee's noble idea encouraging KAIST to push forward and help realize KAIST's role and mission. (END) ​ COVID-19 Update: Fall Semester to Continue Offering Classes Online KAIST announced that the university would continue online classes through the fall semester. However, the university will conduct additional in-person classes for upper-level undergraduate lab classes and some graduate courses where on-site interaction was deemed to be highly necessary. Some 600-level graduate courses at the Daejeon campus and graduate courses at the Seoul campus will carry out both in-person and online classes. The fall semester will start from August 31. Provost and Executive Vice President Kwang Hyung Lee announced the fall semester plan in his letter to the entire student body on July 9. He said that the university decided to continue with online classes in consideration of the safety of KAIST community members and the current status of the COVID-19 spread. However, he said the new plan will help students choose class options between in-person and online classes. "Although the number of classes with two versions is limited, we believe this will help many students continue learning without the sustained face-to-face contact that is inherent in residential education," Provost Lee said. In-person classes conducted in the fall semester will also be provided online for students who are not available for in-person classes. Students may choose the type of the classes they prefer according to their situation, among only the courses that will offer two versions. Professors will decide if they will conduct two versions of their classes. The Office of Academic Affairs is collecting the professors' applications for conducting both versions until July 24. KAIST offered real-time online classes and pre-recorded KLMS (KAIST Learning Management System) classes during the spring semester with a very limited number of in-person lab classes for graduate courses and these two versions of online class will continue for fall semester. Provost Lee asked the students who will take the in-person classes to strictly observe all precaution measures as the university will do its best to abide by the government guidelines against the Covid-19 in preparation for the fall semester. "We will continue to make appropriate and safe accommodations for them," said Provost Lee. Those who need to reside in on-campus dormitories are required to be approved for moving. The applications will open after all the in-person class schedules are fixed next month. However, students who were approved for staying in the dormitories last semester can move in without additional approval procedures for the fall semester. (END)​ The 10th KINC Fusion Research Awardees The KAIST Institute for NanoCentury (KINC) recognized three distinguished researchers whose convergence studies made significant impacts. The KINC presented the 10th KINC Fusion Research Awards during a ceremony that took place at KAIST's main campus in Daejeon on May 19. This year's 'best' convergence research award went to a joint research group led by Professor Hee Tak Kim from the Department of Chemical and Biomolecular Engineering and Professor Sang Ouk Kim from the Department of Materials Science and Engineering. Their research, featured in the December 27 issue of Advanced Materials as a front cover article last year, introduced the world's first high-energy efficiency, membraneless, flowless, zinc-bromine battery. This study, in which research professor Gyoung Hwa Jeong, postdoctoral researcher Yearin Byun, and PhD candidate Ju-Hyuck Lee took part as co-lead authors, is deemed as an example of a best practice in convergence research in which two groups' respective expertise in the fields of carbon materials and electrochemical analysis created a synergistic effect. Professor Bumjoon Kim from the Department of Chemical and Biomolecular Engineering was also recognized for having published the most interdisciplinary research papers on polymer electronics and nanomaterials at home and abroad. Professor Hee-Tae Jung, the Director of KINC and the host of the KINC Fusion Research Awards, said, "The KINC is happy to announce the 10th awardees in nano-fusion research this year. Since convergence is crucial for making revolutionary changes, the importance of convergence studies should be recognized. Our institute will spare no effort to create a research environment suitable for convergence studies, which will be crucial for making a significant difference." The KINC was established in June 2006 under the KAIST Institute with the mission of facilitating convergence studies by tearing down boarders among departments and carrying out interdisciplinary joint research. Currently, the institute is comprised of approximately 90 professors from 13 departments. It aims to become a hub of university institutes for nano-fusion research. (END)​ Hubo Debuts as a News Anchor HUBO, a humanoid robot developed by Professor Jun-Ho Oh's team, made its debut as a co-anchor during the TJB prime time news 8 on May 14. "Un-contact" became the new normal after Covid-19 and many business solutions are being transformed using robotics. HUBO made two news reports on contactless services using robots in medical, manufacturing, and logistics industries. HUBO 2, the second generation of HUBO, appeared as a special anchor on the local broadcasting network's special program in celebration of its 25th anniversary. HUBO is the champion of the 2015 DARPA Robotics Challenge held in the USA. Its FX-2 riding robot also participated in the Olympic torch relay during the 2018 PyeongChang Winter Olympics. Click here to watch a full video of HUBO anchoring the news. (END)​ Professor Sukyung Park Named Presidential Science and Technology Adviser Professor Sukyung Park from the Department of Mechanical Engineering was appointed as the science and technology adviser to the President Jae-in Moon on May 4. Professor Park, at the age of 47, became the youngest member of the president's senior aide team at Chong Wa Dae. A Chong Wa Dae spokesman said on May 4 while announcing the appointment, "Professor Park, a talent with a great deal of policymaking participation in science and technology, will contribute to accelerating the government's push for science and technology innovation, especially in the information and communications technology (ICT) sector." Professor Park joined KAIST in 2004 as the first female professor of mechanical engineering. She is a biomechanics expert who has conducted extensive research on biometric mechanical behaviors. Professor Park is also a member of the KAIST Board of Trustees. Before that, she served as a senior researcher at the Korea Institute of Machinery and Materials (KIMM) as well as a member of the Presidential Advisory Council on Science and Technology. After graduating from Seoul Science High School as the first ever two-year graduate, Professor Park earned a bachelor and master's degrees in mechanical engineering at KAIST. She then finished her Ph.D. from the University of Michigan. (END)​ 14-Day Drawing Challenge Helps Maintain a Sense of Connection Amid Prolonged Social Distancing - "You need space, but you also need connections." - Schools and workplaces have closed and people are staying at home around the globe. Governments across the world have urged their people to keep a distance from others as a measure to slow the spread of the pandemic. With the Korean government's decision to extend the intensive social distancing campaign until at least April 19, people in Korea are advised to avoid nonessential trips, public facilities, and social gatherings for another two weeks or so. This unprecedented prolonged social distancing drive leads people to feel fatigue and frustration. Such emotional stress is worse for those who live alone in a foreign country. The International Scholar and Student Services (ISSS) Team at KAIST has been working around the clock to build a dedicated COVID-19 Mental Health Support Service to support the university's international community on campus and abroad and help get them connected online. As the COVID-19 situation lingers, there has been a growing demand for mental health support from many KAIST international members including 299 students who have been staying in Korea on their own and away from their families, as well as from those who could not return to campus from their overseas homes. In response to this, the KAIST ISSS Team has been offering some special online events and programs that can help the KAIST international community stay feeling connected whereever they are, while still keeping a safe distance from each other. For instance, the team is running an art-therapy program called 'The 14-day Drawing Challenge' March 30 through April 12. This program is online and individual-based, so it does not require any physical contact between participants. Each participant is asked to draw a picture at home using the daily topics previously set by the ISSS Team over 14 days. The topics include (Day 1) self-portrait, (Day 2) spring flowers, (Day 3) if you could become anything…, (Day 4) funniest memory you have, (Day 5) animals at KAIST, (Day 6) something you love, (Day 7) country or city you want to visit, (Day 8) what's for dinner? (Day 9) person you miss, (Day 10) your favorite place at KAIST, (Day 11) your feeling today, (Day 12) things in your favorite color, (Day 13) song lyrics, and (Day 14) your future self in 10 years. Once all 14 pieces have been completed, submissions can be made online by sending an e-mail to the ISSS Team after scanning or taking a photo of each drawing. Selected submissions will be awarded small prizes for participation and shared through the university's official website and SNS channels. "All the participants need is paper, coloring tools, and their creativity and imagination. They don't have to be a great artist to join this challenge. There is no right or wrong or good or bad. They just need to have fun drawing every day for two weeks, ease their coronavirus anxiety, and remain emotionally stable just like they did back in the normal days," said Su-yeon Ahn, the manager of the KAIST ISSS Team. She added, "In times like these, you need space, but you also need connections. Our team wants our international students, professors, and researchers to build strong connections with each other, even online." Katherine Michelle Pena Santana, an M.S. candidate from the Department of Industrial and Systems Engineering who is taking part in 'The 14-day Drawing Challenge,' looked back and said, "Lately with the new coronavirus spreading around Korea and the entire world, I was feeling very anxious. I didn't get out of my room and lived by just looking at the same walls and creating some kind of a psychological burden on myself." Santana added that these kinds of activities could give many foreign members of KAIST an opportunity to not only relieve fear and stress, but also share each other's experiences dealing with this pandemic. She explained that this is why she decided to participate in this challenge. An undergraduate student from the Department of Physics, Ada Carpenter, appreciated the KAIST ISSS Team's efforts to provide a variety of special online mental health support services to help the university's international community socialize, while strictly following the government's guidelines for social distancing. She expressed excitement about participating and said, "I'm so looking forward to the challenge of things that I wouldn't normally draw." < Short Self-interview Video Clip Filmed by Ada Carpenter > The COVID-19 Mental Health Support Service by the KAIST ISSS Team will be continually updated with new information and enhanced with other tools and support over the coming weeks and months. Some of the upcoming events and programs include 'The Online Guitar Lessons', 'The Growing Houseplants Challenge', and 'The Any Song Challenge*'. * The song titled "Any Song" by Korean rapper Zico has been gaining attention on social media thanks to many celebrities taking on the 'Any Song Challenge', performing a short dance to the chorus of the song and sharing it on social media. (END)​ Former Minister of Science and Technology Woo Sik Kim Elected as New Chairman of Board of Trustees Dr. Woo Sik Kim, former Minister of Science and Technology and Deputy Prime Minister, was elected as the new chairman of the KAIST Board of Trustees on March 26. Dr. Kim will succeed Chairman Jang-Mu Lee, whose three-year term expired last month. Dr. Kim is a chemical engineering professor who spent most of his academic career at Yonsei University from 1968. In 2000, he held the office of president of Yonsei University for four years before moving to the Presidential Office of President Roh Moo-Hyun as his chief of staff in 2004. After serving in the Blue House for two years, he served as the Minister of Science and Technology from 2006 to 2008. An emeritus fellow of the National Academy of Engineering of Korea (NAEK), Chairman Kim also taught at KAIST as an invited distinguished professor from 2008 to 2010. He is currently the chairman of the Creativity Engineering Institute (CEI). (END)​ COVID-19 Update: Students and Professors Adjust to 1,200 Online Classes - Approximately 1,200 online classes are being offered during the cyber semester. - COVID-19 is transforming the way KAISTians live. Many restrictions imposed to contain the spread of the virus have us adjusting to a new environment swiftly. A cyber MOU signing ceremony with a foreign partner university took place on March 25, as did a cyber Board of Trustees Meeting on March 26. KAIST's Main Campus is normally one of the most iconic picnic destinations for the citizens of Daejeon, but this is not the case this spring, as the campus has been temporarily closed to protect our own community as well as our neighboring communities. KAIST has been offering approximately 1,200 courses remotely since this semester opened on March 16 and will do so until further notice. Students and faculty members are experiencing the newly emerging norms of remote education in this time of social distancing. This unexpected disruption might advance the new digital pedagogy at KAIST, which was already ahead of the curve with its online learning and teaching infrastructure. Professor Youngsun Kwon, the Dean of KAIST Academy and the Director of the Center for Excellence in Learning and Teaching, said, "We had already initiated the KAIST Learning Management System (KLMS) in 2011 for introducing flipped learning, a student-centric creative-learning pedagogy. Since then, about nine percent of all our classes have been run using this methodology. Students pre-study the online streaming lecture materials that professors have uploaded in advance outside the classroom, and in-class activities are mainly group discussions and problem-solving activities." According to Dean Kwon, the university was planning to further introduce real-time online education from this spring semester and were in the process of setting up the system started from last year. "Our plan was to connect the real-time video conferencing service Zoom to our existing remote educational platform KLMS. However, things related to COVID-19 all happened so rapidly that we didn't yet have a full-fledged connection," said Dean Kwon. Professors had to choose either to conduct their lectures remotely in the form of a pre-made one-way lesson or a real-time two-way lesson. They could also modify them using both platforms. Professor Youngchul Kim from the Department of Civil and Environmental Engineering said, "I had to also make some changes in my class activities and assignments. I removed a group design project and some tutorial workshops that were meant to provide students with hands-on experience using design tools such a 3D printer and a laser cutting system. Ironically, I found that students seem to focus on online lectures more intensely than I expected. I feel like students give me their thoughts and respond much quicker as well." Unfortunately, the online learning and teaching infrastructure and resources that had been put in place could not handle the overwhelming volume of classes being uploaded over very short period of time. To handle the new demand, IT technicians are setting up the technical environment with stable servers to improve network traffic. For professors, teaching assistants, and students to teach and learn better in an online space, department offices have been lending spare equipment such as laptops, tablets, headsets, and webcams to those who do not have their own, based on availability. Academic support staff have also been pitching in by developing the best guidelines for online training. "Even in these uncharted waters, all of the members of KAIST are doing their best to keep the ship steadily sailing in the right direction. I am very grateful for everyone's efforts to make things work," said Dean Kwon. About 60% of the courses currently offered online are being uploaded using the non-real-time KLMS, and the remaining 40% are run in real time via Zoom. Each class runs for 50 minutes per academic credit, and comprises at least 25 minutes of lecture, a Q&A session, and a group discussion. Students enrolled in the 481 courses that include experiments are asked to conduct their experiments individually after watching a 50-minute online lecture. Experimental, practical, and physical courses that are impossible to provide online have been cancelled or postponed until the next semester or summer/winter breaks. "I find the online lessons quite convenient for the courses that I am taking this semester, especially the non-real-time ones, because I can watch the lecture videos over and over again even after the class has finished to understand the contents better," said Jaymee Palma, an undergraduate student from the Department of Chemistry. Ada Carpenter, an undergraduate student from the Department of Physics, added, "Students who normally feel uncomfortable speaking in class raise their questions on an online Q&A board more easily. Besides, I saw many other students asking questions and leading a discussion verbally as well. I think, when students join a synchronous Zoom classroom, they are more engaged than when just attending a regular lecture in a conventional classroom. It's like everyone can sit in the front row of the class." Still, there are reportedly pedagogical, logistical, and technological challenges to these extraordinary educational measures. Some students express concerns about keeping up with professors and other students if they don't have sufficient technological knowledge and skills. Some also cite the disadvantage of online classes having much less interaction and engagement among students and between professors and students than offline ones. "Fortunately, I think my professors are all excellent, so I can immerse myself well during all my cyber classes," said Sang-Hyeon Lee, a graduate student from the School of Computing. (END) ​ COVID-19 Update: All Undergrad Housing Closed KAIST stepped up preventive measures against the outbreak of COVID-19 by closing all housing complexes for undergraduate students. Provost Kwang-Hyung Lee, in an email to KAIST community members on March 12, advised all undergraduate students who had already moved in to the dormitories to move out by March 23. The university opened the spring semester on March 16, two weeks later than originally scheduled, due to the outbreak. All in-person classes have been shifted to online classes and this will continue until further notice. "The dormitory would likely become the source of a COVID-19 cluster on the campus. Given the gravity of the current situation, we can't help but make this unprecedented measure. It is fully for the best interests for our students' health and safety. It saddens me to say that students are required to go back to their homes," said Provost Lee. Dormitory fees will be refunded, and transportation and storage services will be provided for students who return back home. It has not yet been decided when they can return to the campus. There are four exceptional cases for this special measure: 1. when a student does not have legal residency in Korea, 2. if a student's legal residence is located in a severely affected region such as Daegu, Chongdo, and Kyongsan, 3. if students in their final semester before the graduation need to take a research class that is not available online, 4. if students have a very special reason that does not allow them to stay at home. Such students are required to meet the Associate Vice President of Student Life for approval of the exceptional stay. Meanwhile, the first day of the online semester on March 16 saw an overwhelming amount of traffic on the remote educational platform, the KAIST learning management system (KLMS), and the real-time platform, Zoom. The two systems were both overloaded. The Dean of the KAIST Academy sent an email to the community, explaining the technical glitch causing the overload. He said his office had fixed the problem, allowing resumed access to the system from inside and outside the campus. Considered the nature of classes that are difficult or impossible to provide online, the university decided to cancel the some of physical training classes such as golf, dance sports, badminton, swimming, and tennis this semester. Social distancing is another issue the university is enhancing throughout the campus. The university announced new lunch break shifts to disperse the dining hall crowds; the first shift is from 11:30 to 12:30 and the second shift is from 12:30 to 13:30, effective from March 17. The COVID-19 response bulletin also instructed KAIST community members to sit in a row, not face to face, when eating together with colleagues, and asked them to refrain from talking while eating. In addition, a total of 29 virus and fine duster filtering machines have been installed across the campus dining facilities. The bulletin posted on March 13 restressed the importance of wearing a face mask in compact areas such as elevators and refrain the non-essential business or personal travel. Parents who need to take care of their children due to the closure of schools and day care centers are advised to work from home. (END)​ COVID-19 Update: All Classes to Go Online after Semester Opens All classes of undergraduate and graduate courses will go online from March 16 in a protective measure for the KAIST community to slow the spread of COVID-19. No decision has yet been made for how long the online classes will last. The spring semester will start two weeks later than scheduled due to the outbreak of the COVID-19. For online classes, professors are uploading their taped class video clips onto the KAIST Learning Management System (KLMS). These classes will be conducted in both real time and on demand. The video conferencing solution Zoom will be employed for real-time online classes, and professors and students will interact using the bulletin board function for on-demand classes. The university is scaling up its institutional response to protect the KAIST community against the outbreak of the disease following the cancellation and postponing of major academic events including the commencement and matriculation ceremonies scheduled in February and March. The new protective measures include all sports complexes and facilities temporarily closing from February 24. All building entrance gates are only accessible with those carrying a KAIST ID card. A total of nine fever monitors have been installed in the university headquarter building, main library, dining halls, the day care center at Daejeon campus, and at the Seoul campus. The Emergency Response Team is posting a daily bulletin and response manual on the KAIST portal system with updates on the number of confirmed cases in Daejeon and other regions including Seoul as well as reminder notices to help contain the spread. Provost Kwang-Hyung Lee advised KAIST community members to refrain from traveling to the gravely affected region and foreign countries in an email sent on March 11. Anyone who has a travel history in those regions should report it to the Emergency Response Team and self-quarantine for two weeks at home or in a designated dorm complex. KAIST surveyed all community members' travel histories last month and instructed those who had traveled to Daegu and foreign countries or had contact with a confirmed patient to go into self-isolation or work from home while conducting intensive self-monitoring. They have been asked to report their temperature to the Emergency Response Team twice a day. The response manual recommends canceling or postponing meetings and events at the campus. "If necessary, we ask that you make a conference call instead," said the Emergency Response Team. Meanwhile, the Academic Affairs Office decided to employ a flexible academic schedule in consideration of students' circumstances during this extraordinary outbreak situation. "We still need to run 16 weeks of classes for the semester but we are being flexible in how the classes can be run. It will wholly depend on the professor and students' discussions based on their situation. We won't apply a unilateral mid-term and final exam week during this special time," said the bulletin from the Academic Affairs Office. (END) ​ >>마지막 페이지 24
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
7,013
Q: Setup with server and storage shelves for ~400TB on gigabit ethernet Today i sketched out a storage setup, but since i am not very experienced with enterprise level storage, i would be very glad to have the concept reality checked by more experienced folks. Unfortunately, i didn't manage to find an existing question or report that matches this scenario. Requirements are as follows: * *~400TB capacity with some kind of redundancy. There are off-site backups so full mirroring should not be necessary *There is only gigabit ethernet so the bandwidth requirement is at most to saturate this link *Few but large files *I'm trying to keep the cost around 10k€ What i am looking at is buying a used HP ProLiant DL360p, 2x Xeon E5-2660 8-Core 2.20 GHz and upgrading it with 32GB to 64GB ECC RAM. I will fit a PCIe8x Dell 6G SAS HBA (4 ports) to the server. Finally, i want to use three to four HP StorageWorks D2600 Disk Shelves, fitted with 12x12TB drives each. I have not yet decided what drives exactly, if there are suggestions, i'd be happy to hear them. With the mentioned controller, i would even have a dedicated controller port for each shelf, so i could group drives per shelf (as a RAID/ZFS pool or whatever it will be) and have potential 6G SAS bandwidth for each one. For the server, i suppose i will either use FreeNAS (so ZFS) or plain Linux and set everything up manually. Is this a feasible setup or are there obvious flaws? I would be very grateful for any feedback. A: Biggest flaw is FreeNAS. It's on the death row, ZFS on FreeBSD is being phased out by ZFS on Linux so I'd suggest to forget about FreeBSD and it's fork outs and stick with Linux distro you're familiar with. You don't mention IOPS and redundancy level so it's difficult to recommend how many RAIDz2 zvols you should stripe together to get what you want. Gigabit Ethernet sounds archaic.
{ "redpajama_set_name": "RedPajamaStackExchange" }
518
San Diego State vs. Boise State score update: Aztecs take the lead, 14-13, in 3rd quarter By Brandon Worley@brandonworley Nov 3, 2012, 11:02pm MDT Share All sharing options for: San Diego State vs. Boise State score update: Aztecs take the lead, 14-13, in 3rd quarter The Boise State Broncos trail the San Diego Aztecs, 14-13, after three quarters on Saturday night thanks to an offense that stalled to start the second half and special teams mistakes. The Broncos carried a 13-7 lead into halftime after the Aztecs took an early 7-0 lead with an opening-kickoff touchdown return, thanks to a pair of D.J. Harper touchdowns. The Boise State offense would struggle coming back out of the locker room, however, starting with quarterback Joe Southwick's interception throw on the first play from scrimmage in the third quarter. A three-and-out for Boise State on their next possession would end in disaster, as the ensuing punt from deep within Broncos territory was blocked. The Aztecs would take advantage of the short field two plays later, with Adam Muema's 3-yard touchdown run giving San Diego State a 14-13 lead. The Broncos would gain just 31 yards of offense in the third quarter and are 2-of-9 on third down in the game. San Diego State vs. Boise State: Aztecs edge Broncos, 21-19 SDSU makes statement in defeating Boise State Aztecs take the lead, 14-13, in 3rd quarter Broncos pull ahead, 13-7, at the half
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
410
\section{Methods Appendix} \label{appendix:methods} \subsection{Concrete approximations} \label{appendix:concrete} \begin{figure}[h] \begin{center} \includegraphics[width=\linewidth]{figures/concrete_approx.pdf} \caption{Left: Concrete approximation to PI for temperatures \(\ensuremath{\tau} \in [1\mathrm{e-}3, 1\mathrm{e-}1]\). Middle: Concrete approximation to ES for temperatures \(\ensuremath{\tau} \in [1\mathrm{e-}3, 1\mathrm{e-}1]\). Right: Stochastic gradient ascent trajectories when maximizing concrete approximation to parallel versions of PI (top left) and ES (bottom right), both with a temperature \(\ensuremath{\tau}=0.01\).} \label{fig:concrete_approx} \end{center} \end{figure} As per Section~\ref{sect:differentiability}, utility is sometimes measured in terms of discrete events \(\ensuremath{\boldsymbol{e}} \in \ensuremath{\mathcal{E}}\). Unfortunately, mappings from continuous values \(\ensuremath{\mathbf{y}} \in \ensuremath{\mathcal{Y}}\) to the space of discrete events \ensuremath{\mathcal{E}}{} are typically discontinuous and, therefore, violate a necessary condition for interchangeability of differentiation and expectation. In the generalized context of differentiating a Monte Carlo integral \text{w.r.t.\@}{} the parameters of a \emph{discrete} generative distribution, \cite{jang2016categorical,maddison2016concrete} proposed to resolve this issue by introducing a continuous approximation to the aforementioned discontinuous mapping. As a guiding example, assume that \(\ensuremath{\boldsymbol{\theta}}\) is a self-normalized vector of \ensuremath{q}{} parameters such that \(\forall \ensuremath{\theta} \in \ensuremath{\boldsymbol{\theta}},\, \ensuremath{\theta} \ge 0\) and that \(\ensuremath{\mathbf{z}} = \T{[\ensuremath{z}_{1},\ldots,\ensuremath{z}_{\ensuremath{q}}]}\) is a corresponding vector of uniform random variables. Subsequently, let \(\ensuremath{\phi}(\ensuremath{\mathbf{z}}; \ensuremath{\boldsymbol{\theta}}) = \log(\nicefrac{-\ensuremath{\boldsymbol{\theta}}}{\log \ensuremath{\mathbf{z}}})\) be defined as random variables \ensuremath{\mathbf{y}}'s reparameterization. Denoting by \(\opt{\ensuremath{y}} = \max(\ensuremath{\mathbf{y}})\), the vector-valued function \(\ensuremath{\boldsymbol{e}} : \ensuremath{\mathcal{Y}}^{\ensuremath{q}}\mapsto \{0, 1\}^{\ensuremath{q}}\) defined as \begin{align} \ensuremath{\boldsymbol{e}}(\ensuremath{\mathbf{z}}; \ensuremath{\boldsymbol{\theta}}) = \T{[\ensuremath{y}_{1} \ge \opt{\ensuremath{y}},\ldots, \ensuremath{y}_{\ensuremath{q}} \ge \opt{\ensuremath{y}}]} = \ensuremath{\operatorname{\mathbbm{1}}}^{+}\left(\ensuremath{\mathbf{y}} - \opt{\ensuremath{y}}\right) \label{eq:event_onehot} \end{align} then reparameterizes a (one-hot encoded) categorical random variable \ensuremath{\boldsymbol{e}}{} having distribution \(p(\ensuremath{\boldsymbol{e}}; \ensuremath{\boldsymbol{\theta}}) = \prod\nolimits_{i=1}^{\ensuremath{q}} \ensuremath{\theta}_{i}^{\ensuremath{\boldsymbol{e}}_{i}}\). Importantly, we can rewrite \eqref{eq:event_onehot} as the zero-temperature limit of as continuous mapping \(\tilde{\ensuremath{\boldsymbol{e}}} : \ensuremath{\mathcal{Y}}^{\ensuremath{q}} \mapsto [0, 1]^{\ensuremath{q}}\) defined as \begin{align} \tilde{\ensuremath{\boldsymbol{e}}}(\ensuremath{\mathbf{y}}; \ensuremath{\tau}) = \ensuremath{\operatorname{softmax}}\left(\frac{\ensuremath{\mathbf{y}} - \opt{\ensuremath{y}}}{\ensuremath{\tau}}\right) = \ensuremath{\operatorname{softmax}}\left(\frac{\ensuremath{\mathbf{y}}}{\ensuremath{\tau}}\right), \label{eq:event_softmax} \end{align} where \(\ensuremath{\tau} \in [0, \infty]\) is a temperature parameter. For non-zero temperatures \(\ensuremath{\tau} > 0\), we obtain a relaxed version of the original (one-hot encoded) categorical event. Unlike the original however, the relaxed event satisfies the conditions for interchanging differentiation and expectation. Returning to the case of an acquisition function \eqref{eq:expected_utility} defined over a multivariate normal belief \(p(\ensuremath{\mathbf{y}}|\ensuremath{\mathbf{X}})\) with parameters \(\ensuremath{\boldsymbol{\theta}} = (\ensuremath{\boldsymbol{\mu}},\ensuremath{\boldsymbol{\Sigma}})\), random variables \ensuremath{\mathbf{y}}{} are instead reparameterized by \(\ensuremath{\phi}(\ensuremath{\mathbf{z}}; \ensuremath{\theta}) = \ensuremath{\boldsymbol{\mu}} + \ensuremath{\mathbf{L}} \operatorname{g}(\ensuremath{\mathbf{z}})\), where \(\operatorname{g}\) denotes, e.g., the Box-Muller transform of uniform \text{rvs}{} \(\ensuremath{\mathbf{z}} = \T{[\ensuremath{z}_{1},\ldots,\ensuremath{z}_{2\ensuremath{q}}]}\). This particular example demonstrates how Entropy Search's innermost integrand can be relaxed using a \emph{concrete} approximation. Identical logic can be applied to approximate Probability of Improvement's integrand. For Monte Carlo versions of both acquisition functions, Figure~\ref{fig:concrete_approx} shows the resulting approximation across a range of temperatures along with gradient-based optimization in the parallel setting \(\ensuremath{q} = 2\). Whereas for high temperatures \(\ensuremath{\tau}\) the approximations wash out, both converge to the corresponding true function as \(\ensuremath{\tau} \to 0\). \newpage \subsection{Parallel Upper Confidence Bound (\parallelize{UCB}{})} \label{appendix:parallel_ucb} For convenience, we begin by reproducing \eqref{eq:expected_utility_mvn} as indefinite integrals, \begin{align*} \ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}}) =\int_{\boldsymbol{-\infty}}^{\boldsymbol{\infty}} \ensuremath{\ell}(\ensuremath{\mathbf{y}}) \ensuremath{\mathcal{N}}(\ensuremath{\mathbf{y}}; \ensuremath{\boldsymbol{\mu}}, \ensuremath{\boldsymbol{\Sigma}}) d\ensuremath{\mathbf{y}} = \int_{\boldsymbol{-\infty}}^{\boldsymbol{\infty}} \ensuremath{\ell}(\ensuremath{\boldsymbol{\mu}} + \ensuremath{\mathbf{L}}\ensuremath{\mathbf{z}}) \ensuremath{\mathcal{N}}(\ensuremath{\mathbf{z}}; \ensuremath{\mathbf{0}}, \ensuremath{\mathbf{I}}) d\ensuremath{\mathbf{z}}. \end{align*} Working backward through this equation, we derive an exact expression for parallel \ensuremath{\operatorname{UCB}}{}. To this end, we introduce the definition \begin{align} \label{eq:integral_def} \sqrt{\frac{\pi}{2}}\int_{-\infty}^{\infty}\abs{\ensuremath{\sigma}\ensuremath{z}} \ensuremath{\mathcal{N}}(\ensuremath{z}; 0, 1)d\ensuremath{z} = \sqrt{2\pi}\int_{0}^{\infty} \ensuremath{y} \ensuremath{\mathcal{N}}(\ensuremath{y}; 0, \ensuremath{\stddev^{2}})d\ensuremath{y} = \ensuremath{\sigma}, \end{align} where \(\abs{\cdot}\) denotes the (pointwise) absolute value operator.\footnote{This definition comes directly from the standard integral identity \cite{gradshteyn2014table}: \(\int_{0}^{b}xe^{-q^{2}x^{2}}dx = \frac{1 - e^{-q^{2}b^{2}}}{2q^{2}}\).} Using this fact and given \(\ensuremath{z} \sim \ensuremath{\mathcal{N}}(0, 1)\), let \(\hat{\ensuremath{\sigma}}^{2} \triangleq (\nicefrac{\beta\pi}{2})\ensuremath{\stddev^{2}}\) such that \(\mathbb{E}{\abs{\hat{\ensuremath{\sigma}}\ensuremath{z}}} = \beta^{\ensuremath{\nicefrac{1}{2}}}\ensuremath{\sigma}\). Under this notation, marginal \ensuremath{\operatorname{UCB}}{} can be expressed as \begin{align} \begin{split} \ensuremath{\operatorname{UCB}}(\ensuremath{\mathbf{x}}; \beta) &=\ensuremath{\mu} + \beta^{\ensuremath{\nicefrac{1}{2}}}\ensuremath{\sigma}\\ &=\int_{-\infty}^{\infty} \left(\ensuremath{\mu} + \abs{\hat{\ensuremath{\sigma}} \ensuremath{z}}\right) \ensuremath{\mathcal{N}}(\ensuremath{z}; 0, 1) d\ensuremath{z}\\ &= \int_{-\infty}^{\infty} \left(\ensuremath{\mu} + \abs{\ensuremath{\gamma}}\right) \ensuremath{\mathcal{N}}(\ensuremath{\gamma}; 0, \hat{\ensuremath{\sigma}}^2)d\ensuremath{\gamma} \end{split} \label{eq:marginal_ucb} \end{align} where \((\ensuremath{\mu}, \ensuremath{\stddev^{2}})\) parameterize a Gaussian belief over \(\ensuremath{y} = f(\ensuremath{\mathbf{x}}{})\) and \(\ensuremath{\gamma} = \ensuremath{y} - \mu\) denotes \ensuremath{y}{}'s residual. This integral form of \ensuremath{\operatorname{UCB}}{} is advantageous precisely because it naturally lends itself to the generalized expression \begin{align} \begin{split} \ensuremath{\operatorname{UCB}}(\ensuremath{\mathbf{X}}; \beta) &= \int_{\boldsymbol{-\infty}}^{\boldsymbol{\infty}} \max(\ensuremath{\boldsymbol{\mu}} + \abs{\ensuremath{\boldsymbol{\gamma}}}) \ensuremath{\mathcal{N}}(\ensuremath{\boldsymbol{\gamma}}; \ensuremath{\mathbf{0}}, \hat{\ensuremath{\boldsymbol{\Sigma}}}) d\ensuremath{\boldsymbol{\gamma}}\\ &= \int_{\boldsymbol{-\infty}}^{\boldsymbol{\infty}} \max(\ensuremath{\boldsymbol{\mu}} + \abs{\hat{\ensuremath{\mathbf{L}}}\ensuremath{\mathbf{z}}}) \ensuremath{\mathcal{N}}(\ensuremath{\mathbf{z}}; \ensuremath{\mathbf{0}}, \ensuremath{\mathbf{I}}) d\ensuremath{\mathbf{z}}\\ &\approx \frac{1}{\ensuremath{m}} \sum^{\ensuremath{m}}_{\ensuremath{k}=1} \max(\ensuremath{\boldsymbol{\mu}} + \abs{\hat{\ensuremath{\mathbf{L}}}\sample{\ensuremath{\mathbf{z}}}}) \text{~~for~~} \sample{\ensuremath{\mathbf{z}}} \sim \ensuremath{\mathcal{N}}(\ensuremath{\mathbf{0}}, \ensuremath{\mathbf{I}}), \end{split} \label{eq:parallel_ucb} \end{align} where \(\hat{\ensuremath{\mathbf{L}}}\T{\hat{\ensuremath{\mathbf{L}}}} = \hat{\ensuremath{\boldsymbol{\Sigma}}} \triangleq (\nicefrac{\beta\pi}{2})\ensuremath{\boldsymbol{\Sigma}}\). This representation has the requisite property that, for any size \(\alt{\ensuremath{q}} \le \ensuremath{q}\) subset of \ensuremath{\mathbf{X}}, the value obtained when marginalizing out the remaining \(\ensuremath{q} - \alt{\ensuremath{q}}\) terms is its \alt{\ensuremath{q}}-UCB value. Previous methods for parallelizing \ensuremath{\operatorname{UCB}}{} have approached the problem by imitating a purely sequential strategy \cite{contal2013parallel,desautels2014parallelizing}. Because a fully Bayesian approach to sequential selection generally involves an exponential number of posteriors, these works incorporate various well-chosen heuristics for the purpose of efficiently approximate parallel \ensuremath{\operatorname{UCB}}{}.\footnote{Due to the stochastic nature of the mean updates, the number of posteriors grows exponentially in \ensuremath{q}.} By directly addressing the associated \(\ensuremath{q}{}\)-dimensional integral however, Eq.~\eqref{eq:parallel_ucb} avoids the need for such approximations and, instead, unbiasedly estimates the true value. Finally, the special case of marginal \ensuremath{\operatorname{UCB}}~\eqref{eq:marginal_ucb} can be further simplified as \begin{align} \label{eq:mUCB_simple} \ensuremath{\operatorname{UCB}}(\ensuremath{\mathbf{x}}; \beta) =\ensuremath{\mu} + 2\int_{0}^{\infty}\hat{\ensuremath{\sigma}} \ensuremath{z} \ensuremath{\mathcal{N}}(\ensuremath{z}; 0, 1)dz =\int_{\mu}^{\infty} \ensuremath{y} \ensuremath{\mathcal{N}}(\ensuremath{y}; \mu, 2\pi\beta\ensuremath{\stddev^{2}}) d\ensuremath{y}, \end{align} revealing an intuitive form --- namely, the expectation of a Gaussian random variable (with rescaled covariance) above its mean. \pagebreak \subsection{Normalizing utility functions} \label{appendix:normalizing_utility} An additional requirement when proving the near-optimality of greedy maximization for a SM{} function \ensuremath{\mathcal{L}}{} is that \ensuremath{\mathcal{L}}{} be a normalized set function such that \(\ensuremath{\mathcal{L}}(\emptyset) = 0\). As in Section~\ref{sect:myopic_maximal}, let \ensuremath{v_{\min}}{} be defined as the smallest possible utility value given a utility function \ensuremath{\ell}{} defined over a ground set \version{f} indexed by \ensuremath{\mathcal{X}}{}. Because the \(\max\) is additive such that \(\max(\ensuremath{\mathbf{y}} - \ensuremath{v_{\min}}) = \max(\ensuremath{\mathbf{y}}) - \ensuremath{v_{\min}}\), normalization is only necessary when establishing regret bounds and simply requires lower bounding \(\ensuremath{v_{\min}}{} > -\infty\). This task is is facilitated by the fact that \ensuremath{v_{\min}}{} pertains to the outputs of \ensuremath{\ell}{} rather than to (a belief over) black-box \ensuremath{f}{}. \temp{Addressing the matter by case, we have:} \begin{enumerate}[label=\alph*.,leftmargin=16pt] \item Expected Improvement: For a given threshold \ensuremath{\alpha}{}, let improvement be defined (pointwise) as \(\ensuremath{\operatorname{ReLU}}(\ensuremath{\mathbf{y}} - \ensuremath{\alpha}) = \max(0, \ensuremath{\mathbf{y}} - \ensuremath{\alpha})\). \ensuremath{\operatorname{EI}}'s integrand is then the largest improvement \(\ensuremath{\ell}_{\acqSubscript{\ensuremath{\operatorname{EI}}}}(\ensuremath{\mathbf{y}}; \ensuremath{\alpha}) = \max(\ensuremath{\operatorname{ReLU}}(\ensuremath{\mathbf{y}} - \ensuremath{\alpha}))\).\footnote{\(\ensuremath{\ell}_{\acqSubscript{\ensuremath{\operatorname{EI}}}}\) is often written as the improvement of \(\max(\ensuremath{\mathbf{y}})\); however, these two forms are equivalent.} Applying the rectifier prior to the \(\max\) defines \(\ensuremath{\ell}_{\acqSubscript{\ensuremath{\operatorname{EI}}}}\) as a normalized, submodular function. \item Probability of Improvement: \ensuremath{\operatorname{PI}}'s integrand is defined as \(\ensuremath{\ell}_{\acqSubscript{\ensuremath{\operatorname{PI}}}}(\ensuremath{\mathbf{y}}, \alpha) = \max(\ensuremath{\operatorname{\mathbbm{1}}}^{-}(\ensuremath{\mathbf{y}} - \alpha))\), where \(\ensuremath{\operatorname{\mathbbm{1}}}^{-}\) denotes the left-continuous Heaviside step function. Seeing as the Heaviside maps \(\ensuremath{\mathcal{Y}} \mapsto \{0,1\}\), \(\ensuremath{\ell}_{\acqSubscript{\ensuremath{\operatorname{PI}}}}\) is already normalized. \item Simple Regret: The submodularity of Simple Regret was previously discussed in \cite{azimi2010batch}, under the assumption \(\ensuremath{v_{\min}} = 0\). More generally, normalizing \(\ensuremath{\ell}_{\acqSubscript{\ensuremath{\operatorname{SR}}}}\) requires bounding \ensuremath{f}'s infimum under \(p\). Technical challenges for doing so make submodular maximization of SR the hardest to justify. \item Upper Confidence Bound: As per \eqref{eq:parallel_ucb_short}, define \ensuremath{\operatorname{UCB}}'s integrand as the maximum over \ensuremath{\mathbf{y}}'s expectation incremented by non-negative terms. By definition then, \(\ensuremath{\ell}_{\acqSubscript{\ensuremath{\operatorname{UCB}}}}\) is lower bounded by the predictive mean and can therefore be normalized as \(\bar{\ensuremath{\ell}}_{\acqSubscript{\ensuremath{\operatorname{UCB}}}} = \max(\ensuremath{\boldsymbol{\mu}} + \abs{\ensuremath{\boldsymbol{\gamma}}} - \ensuremath{v_{\min}})\), provided that \(\ensuremath{v_{\min}} = \min_{\ensuremath{\mathbf{x}} \in \ensuremath{\mathcal{X}}}\ensuremath{\mu}(\ensuremath{\mathbf{x}})\) is finite. For a zero-mean GP with a twice differentiable kernel, this condition is guaranteed for bounded functions \ensuremath{f}{}. \end{enumerate} \subsection{Expected Improvement's incremental{} form} For the special case of \(\ensuremath{\mathcal{L}}_{\acqSubscript{\ensuremath{\operatorname{EI}}}}\), the expected improvement of improvement integrand \(\ensuremath{\ell}_{\acqSubscript{\ensuremath{\operatorname{EI}}}}\) simplifies as: \begin{align} \begin{split} \ensuremath{\operatorname{EI}}_{\ensuremath{\ell}_{\acqSubscript{\ensuremath{\operatorname{EI}}}}}(\NEW{1}{\ensuremath{\mathbf{x}}}, \ensuremath{\mathcal{D}}_{\ensuremath{j}}) &= \mathbb{E}_{\NEW{1}{\ensuremath{y}}}\left[ \ensuremath{\operatorname{ReLU}}\left( \ensuremath{\operatorname{ReLU}}(\NEW{1}{\ensuremath{y}} - \ensuremath{\alpha}) - \max \ensuremath{\operatorname{ReLU}}(\OLD{1}{\ensuremath{\mathbf{y}}} - \ensuremath{\alpha}) \right)\right]\\ &= \mathbb{E}_{\NEW{1}{\ensuremath{y}}}\left[ \ensuremath{\operatorname{ReLU}}\left( \max(\ensuremath{\alpha}, \NEW{1}{\ensuremath{y}}) - \max(\ensuremath{\alpha}, \max \OLD{1}{\ensuremath{\mathbf{y}}}) \right)\right]\\ &= \mathbb{E}_{\NEW{1}{\ensuremath{y}}}\left[ \ensuremath{\operatorname{ReLU}}\left( \NEW{1}{\ensuremath{y}} - \max(\ensuremath{\alpha}, \max \OLD{1}{\ensuremath{\mathbf{y}}}) \right)\right]\\ &= \ensuremath{\mathcal{L}}_{\acqSubscript{\ensuremath{\operatorname{EI}}}}(\NEW{1}{\ensuremath{\mathbf{x}}}; \ensuremath{\mathcal{D}}_{\ensuremath{j}}), \end{split} \label{eq:incremental{}_ei} \end{align} where \(\alpha = \max(\{y : \forall (\ensuremath{\mathbf{x}}, \ensuremath{y}) \in \ensuremath{\mathcal{D}}\})\) denotes the initial improvement threshold. \section{Experiments Appendix} \label{appendix:experiments} \subsection{Experiment Details} \label{appendix:experiment_details} \subheading{Synthetic tasks} To eliminate model error, experiments were first run on synthetic tasks drawn from a known prior. For a GP with a continuous, stationary kernel, approximate draws \ensuremath{f}{} can be constructed via a weighted sum of basis functions sampled from the corresponding Fourier dual \cite{bochner1959lectures, rasmussen-book06a, rahimi2008random, hernandez-nips14}. For a \matern{$\nu$}{2} kernel with anisotropic lengthscales \(\ensuremath{\boldsymbol{\Lambda}}^{-1}\), the associated spectral density is the multivariate \(\mathnormal{t}\)-distribution \(t_{\nu}(\ensuremath{\mathbf{0}}, \ensuremath{\boldsymbol{\Lambda}}^{-1})\) with \(\nu\) degrees of freedom \cite{kotz2004multivariate}. For our experiments, we set \(\ensuremath{\boldsymbol{\Lambda}} = (\nicefrac{\ensuremath{d}}{16})\,\ensuremath{\mathbf{I}} \) and approximated \ensuremath{f}{} using \(2^{14}\) basis functions, \temp{resulting in} tasks that were sufficiently challenging and closely resembled exact draws from the prior. \subheading{Maximizers} In additional to findings reported in the text, we compared several gradient-based approaches (incl. L-BFGS-B and Polyak averaging~\cite{wang2016parallel}) and found that \text{Adam}{} consistently delivered superior performance. CMA-ES was included after repeatedly outperforming the rival black-box method \text{DIRECT}{} \cite{jones1993lipschitzian}. RS was chosen as a na\"ive acquisition maximizer{} after Successive Halving (SH, \cite{karnin2013almost,jamieson2016non}) failed to yield significant improvement. For consistency when identifying the best proposed query set(s), both RS and SH used deterministic estimators \AcqMC{}. Whereas RS was run with a constant batch-size \(\ensuremath{m}=1024\), SH started small and iteratively increased \ensuremath{m}{} to refine estimated acquisition values for promising candidates using a cumulative moving average and cached posteriors. \subsection{Multi-start initialization} \label{appendix:initialization} As noted in \cite{wang2016parallel}, gradient-based query optimization strategies are often sensitive to the choice of starting positions. This \temp{sensitivity} naturally occurs for two primary reasons. First, acquisition functions \ensuremath{\mathcal{L}}{} are consistently non-convex. As a result, it is easy for members of query set \(\ensuremath{\mathbf{X}} \subseteq \ensuremath{\mathcal{X}}\) to get stuck in local regions of the space. Second, acquisition surfaces are frequently patterned by (large) plateaus offering little expected utility. Such plateaus typically emerge when corresponding regions of \ensuremath{\mathcal{X}}{} are thought to be inferior and are therefore excluded from the search process. To combat this issue, we appeal to the submodularity of \ensuremath{\mathcal{L}}{} (see Section~\ref{sect:myopic_maximal}). Assuming \ensuremath{\mathcal{L}}{} is submodular, then acquisition values exhibit diminishing returns \text{w.r.t.\@}{} the degree of parallelism \ensuremath{q}{}. As a result, the marginal value for querying a single point \(\ensuremath{\mathbf{x}} \in \ensuremath{\mathcal{X}}\) upper bounds its potential contribution to any query set \ensuremath{\mathbf{X}}{} s.t. \(\ensuremath{\mathbf{x}} \in \ensuremath{\mathbf{X}}\). Moreover, marginal acquisition functions \(\ensuremath{\mathcal{L}}(\ensuremath{\mathbf{x}})\) are substantially cheaper to compute that parallel ones (see Figure~\ref{fig:overview_pt1}d). Accordingly, we can initialize query sets \ensuremath{\mathbf{X}}{} by sampling from \(\ensuremath{\mathcal{L}}(\ensuremath{\mathbf{x}})\). By doing so we \temp{gracefully avoid initializing points in excluded regions}, mitigating the impact of acquisition plateaus. In our experiments we observed consistent performance gains when using this strategy in conjunction with most query optimizers. To accommodate runtime constraints, the initialization process was run for the first tenth of the allocated time. Lastly, when greedily maximizing \ensuremath{\mathcal{L}}{} (equiv. in parallel asynchronous cases), ``pending'' queries were handled by fantasizing observations at their predictive mean. Conditioning on the expected value reduces uncertainty in the vicinity of the corresponding design points and, in turn, promotes diversity within individual query sets \cite{desautels2014parallelizing}. To the extent that this additional step helped in our experiments, the change in performance was rather modest. \subsection{Extended Results} \label{appendix:results_extended} Additional results for both \parallelize{UCB}{} (Section~\ref{sect:differentiability}) and incremental{} form \ensuremath{\operatorname{\mathnormal{q}\mathhyphen{}EI}}{} (Section~\ref{sect:myopic_maximal}) are shown here. These experiments were run under identical conditions to those in Section~\ref{sect:experiments}. \subheading{Parallel UCB} We set confidence parameter \(\beta = 2\). Except for on the Levy benchmark, \parallelize{UCB}{} outperformed \ensuremath{\operatorname{\mathnormal{q}\mathhyphen{}EI}}{}, and this result also held for both Branin-Hoo and Hartmann-3 (not shown). \subheading{Incremental q-EI} We tested performance using \(\ensuremath{m} \in \{16, 32, 64, 128\}\) states. At the end of the first round of greedy selection \(k=1\), \ensuremath{m}{} outcomes \(\ensuremath{y}^{(i)}_{1} \sim p(\ensuremath{y}_{1} | \ensuremath{\mathbf{x}}_{1}, \ensuremath{\mathcal{D}})\) were fantasized, producing \ensuremath{m}{} distinct fantasy states \(\ensuremath{\mathcal{D}}^{(i)}_{1} = \ensuremath{\mathcal{D}} \cup \{(\ensuremath{\mathbf{x}}_{1}, \ensuremath{y}^{(i)}_{1})\}\). At all other steps \(k \in [2,\ensuremath{q}]\), a single outcome was fantasize for each state such that the number of states remained constant. Additionally, fantasized outcomes were never resampled. Figure~\ref{fig:results_synthetic_ei_incremental{}} compares results obtained when greedily maximizing incremental{} \ensuremath{\operatorname{\mathnormal{q}\mathhyphen{}EI}}{} (with \(\ensuremath{m}=16\) to those obtained when greedily maximizing joint \ensuremath{\operatorname{\mathnormal{q}\mathhyphen{}EI}}{} (as discussed in Section~\ref{sect:experiments}). In contrast with the larger body of results, CMA-ES combined with incremental{} \ensuremath{\operatorname{\mathnormal{q}\mathhyphen{}EI}}{} outperformed gradient-based optimization for higher dimensional acquisition surfaces. \begin{figure}[t] \begin{center} \includegraphics[width=\linewidth]{figures/synthetic_ucb.pdf} \caption{Average performance of different acquisition \solver s{} on synthetic tasks from a known prior, given varied runtimes when maximizing Monte Carlo \parallelize{UCB}. Reported values indicate the log of the immediate regret \(\log_{10}\left|f_{\max} - f(\opt{\ensuremath{\mathbf{x}}})\right|\), where \(\opt{\ensuremath{\mathbf{x}}}\) denotes the observed maximizer \(\opt{\ensuremath{\mathbf{x}}} \in \ensuremath{\operatornamewithlimits{arg\,max}}_{\ensuremath{\mathbf{x}} \in \ensuremath{\mathcal{D}}} \hat{\ensuremath{y}}\).} \label{fig:results_synthetic_ucb} \end{center} \end{figure} \begin{figure} \begin{center} \includegraphics[width=\linewidth]{figures/blackbox_ucb.pdf} \caption{Average performance of different acquisition \solver s{} on black-box tasks from an unknown prior, given varied runtimes when maximizing Monte Carlo \parallelize{UCB}. Reported values indicate the log of the immediate regret \(\log_{10}\left|f_{\max} - f(\opt{\ensuremath{\mathbf{x}}})\right|\), where \(\opt{\ensuremath{\mathbf{x}}}\) denotes the observed maximizer \(\opt{\ensuremath{\mathbf{x}}} \in \ensuremath{\operatornamewithlimits{arg\,max}}_{\ensuremath{\mathbf{x}} \in \ensuremath{\mathcal{D}}} \hat{\ensuremath{y}}\).} \label{fig:results_blackbox_ucb} \end{center} \end{figure} \begin{figure}[t] \begin{center} \includegraphics[width=\linewidth]{figures/synthetic_ei_incr_full.pdf} \caption{Average performance when greedily maximizing joint vs. incremental{} forms of \ensuremath{\operatorname{\mathnormal{q}\mathhyphen{}EI}}{}.} \label{fig:results_synthetic_ei_incremental{}} \end{center} \end{figure} \section{Background} \label{sect:background} \begin{figure}[t] \begin{center} \includegraphics[width=\linewidth]{figures/overview_part1.pdf} \vspace{-20pt} \caption{(a) Pseudo-code for standard BO's ``outer-loop'' with parallelism \ensuremath{q}{}; the inner optimization problem is boxed in red. (b--c) GP-based belief and expected utility (EI), given four initial observations `\raisebox{1.25pt}{$\scriptscriptstyle{\bullet}$}'. The aim of the inner optimization problem is to find the optimal query `\raisebox{-1.75pt}{\FiveStarOpen}'. (d) Time to compute \(2^{14}\) evaluations of MC{} \ensuremath{\operatorname{\mathnormal{q}\mathhyphen{}EI}}{} using a GP surrogate for varied observation counts and degrees of parallelism. Runtimes fall off at the final step because \ensuremath{q}{} decreases to accommodate evaluation budget \(T=1,024\). } \label{fig:overview_pt1} \end{center} \end{figure} Bayesian optimization relies on both a surrogate model \ensuremath{\mathcal{M}}{} and an acquisition function \ensuremath{\mathcal{L}}{} to define a strategy for efficiently maximizing a black-box{} function \ensuremath{f}{}. At each ``outer-loop'' iteration (\warn{Figure~\ref{fig:overview_pt1}a}), this strategy is used to choose a set of queries \ensuremath{\mathbf{X}}{} whose evaluation advances the search process. This section reviews related concepts and closes with discussion of the associated inner optimization problem. For an in-depth review of BO, we defer to the recent survey \cite{shahriari-ieee16}. Without loss of generality, we assume BO strategies evaluate \ensuremath{q}{} designs \(\ensuremath{\mathbf{X}} \in \mathbb{R}^{\ensuremath{q} \times \ensuremath{d}}\) in parallel so that setting \(\ensuremath{q}=1\) recovers purely sequential decision-making. We denote available information regarding \ensuremath{f}{} as \(\ensuremath{\mathcal{D}} = \{(\ensuremath{\mathbf{x}}_{i}, \ensuremath{y}_{i})\}_{i=1}^{\ldots}\) and, for notational convenience, assume noiseless observations \(\ensuremath{\mathbf{y}} = f(\ensuremath{\mathbf{X}})\). Additionally, we refer to \ensuremath{\mathcal{L}}'s parameters (such as an improvement threshold) as \ensuremath{\boldsymbol{\psi}}{} and to \ensuremath{\mathcal{M}}'s parameters as \ensuremath{\boldsymbol{\ensuremath{\zeta}}}{}. Henceforth, direct reference to these terms will be omitted where possible. \subheading{Surrogate models} A surrogate model \ensuremath{\mathcal{M}}{} provides a probabilistic interpretation of \ensuremath{f}{} whereby possible explanations for the function are seen as draws \(\version{\ensuremath{f}} \sim p(\ensuremath{f} | \ensuremath{\mathcal{D}})\). In some cases, this belief is expressed as an explicit ensemble of sample functions \cite{hernandez-nips14,springenberg2016bayesian,wang2017max}. More commonly however, \ensuremath{\mathcal{M}}{} dictates the parameters \(\ensuremath{\boldsymbol{\theta}}\) of a (joint) distribution over the function's behavior at a finite set of points \ensuremath{\mathbf{X}}{}. By first tuning the model's (hyper)parameters \(\ensuremath{\boldsymbol{\ensuremath{\zeta}}}\) to explain for \(\ensuremath{\mathcal{D}}\), a belief is formed as \(p(\ensuremath{\mathbf{y}}|\ensuremath{\mathbf{X}}, \ensuremath{\mathcal{D}}) = p(\ensuremath{\mathbf{y}}; \ensuremath{\boldsymbol{\theta}})\) with \(\ensuremath{\boldsymbol{\theta}} \gets \ensuremath{\mathcal{M}}(\ensuremath{\mathbf{X}}; \ensuremath{\boldsymbol{\ensuremath{\zeta}}})\). Throughout, \(\ensuremath{\boldsymbol{\theta}} \gets \ensuremath{\mathcal{M}}(\ensuremath{\mathbf{X}}; \ensuremath{\boldsymbol{\ensuremath{\zeta}}})\) is used to denote that belief \(p\)'s parameters \ensuremath{\boldsymbol{\theta}}{} are specified by model \ensuremath{\mathcal{M}}{} evaluated at \ensuremath{\mathbf{X}}. A member of this latter category, the Gaussian process prior (GP) is the most widely used surrogate and induces a multivariate normal belief \(\ensuremath{\boldsymbol{\theta}} \triangleq (\ensuremath{\boldsymbol{\mu}},\ensuremath{\boldsymbol{\Sigma}}) \gets \ensuremath{\mathcal{M}}(\ensuremath{\mathbf{X}}; \ensuremath{\boldsymbol{\ensuremath{\zeta}}})\) such that \(p(\ensuremath{\mathbf{y}}; \ensuremath{\boldsymbol{\theta}}) = \ensuremath{\mathcal{N}}(\ensuremath{\mathbf{y}}; \ensuremath{\boldsymbol{\mu}}, \ensuremath{\boldsymbol{\Sigma}})\) for any finite set \ensuremath{\mathbf{X}}{} (see \warn{Figure~\ref{fig:overview_pt1}b}). \subheading{Acquisition functions} With few exceptions, acquisition functions amount to integrals defined in terms of a belief \(p\) over the unknown outcomes \(\ensuremath{\mathbf{y}} = \{\ensuremath{y}_{1},\ldots,\ensuremath{y}_{\ensuremath{q}}\}\) revealed when evaluating a black-box{} function \ensuremath{f}{} at corresponding input locations \(\ensuremath{\mathbf{X}} = \{\ensuremath{\mathbf{x}}_{1},\ldots,\ensuremath{\mathbf{x}}_{\ensuremath{q}}\}\). This formulation naturally occurs as part of a Bayesian approach whereby the value of querying \ensuremath{\mathbf{X}}{} is determined by accounting for the utility provided by possible outcomes \(\sample{\ensuremath{\mathbf{y}}} \sim p(\ensuremath{\mathbf{y}}|\ensuremath{\mathbf{X}}, \ensuremath{\mathcal{D}})\). Denoting the chosen utility function as \ensuremath{\ell}{}, this paradigm leads to acquisition functions defined as expectations \begin{align} \ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}}; \ensuremath{\mathcal{D}}, \ensuremath{\boldsymbol{\psi}}) = \mathbb{E}_{\ensuremath{\mathbf{y}}}\left[\ensuremath{\ell}(\ensuremath{\mathbf{y}}; \ensuremath{\boldsymbol{\psi}})\right] = \int \ensuremath{\ell}(\ensuremath{\mathbf{y}}; \ensuremath{\boldsymbol{\psi}}) p(\ensuremath{\mathbf{y}}|\ensuremath{\mathbf{X}}, \ensuremath{\mathcal{D}}{}) d\ensuremath{\mathbf{y}}\,. \label{eq:expected_utility} \end{align} A seeming exception to this rule, \emph{non-myopic} acquisition functions assign value by further considering how different realizations of \(\sample{\ensuremath{\mathcal{D}}}_{\ensuremath{q}} \gets \ensuremath{\mathcal{D}} \cup \{(\ensuremath{\mathbf{x}}_{i},\sample{\ensuremath{y}}_{i})\}_{i=1}^{\ensuremath{q}}\) impact our broader understanding of \ensuremath{f}{} and usually correspond to more complex, nested integrals. \warn{Figure~\ref{fig:overview_pt1}c} portrays a prototypical acquisition surface and Table~\ref{table:reparameterizations} exemplifies popular, myopic and non-myopic instances of~\eqref{eq:expected_utility}. \begin{table} \begin{center} \input{tables/reparameterizations.tex} \captionsetup{width=1.0\textwidth} \vspace{4pt} \caption{ Examples of reparameterizable acquisition functions; the final column indicates whether they belong to the MM{} family (Section~\ref{sect:myopic_maximal}). Glossary: \(\ensuremath{\operatorname{\mathbbm{1}}}^{+/-}\) denotes the right-/left-continuous Heaviside step function; \ensuremath{\operatorname{ReLU}}{} and \ensuremath{\operatorname{\sigma}}{} rectified linear and sigmoid nonlinearities, respectively; \ensuremath{\operatorname{H}}{} the Shannon entropy; \(\alpha\) an improvement threshold; \(\tau\) a temperature parameter; \(\ensuremath{\mathbf{L}}\T{\ensuremath{\mathbf{L}}} \triangleq \ensuremath{\boldsymbol{\Sigma}}\) the Cholesky factor; and, residuals \(\ensuremath{\boldsymbol{\gamma}} \sim \ensuremath{\mathcal{N}}\left(\ensuremath{\mathbf{0}}, \ensuremath{\boldsymbol{\Sigma}}\right)\). Lastly, non-myopic acquisition function (\ensuremath{\operatorname{ES}}{} and \ensuremath{\operatorname{KG}}) are assumed to be defined using a discretization. Terms associated with the query set and discretization are respectively denoted via subscripts \(a\) and \(b\).} \label{table:reparameterizations} \end{center} \vspace{-12pt} \end{table} \subheading{Inner optimization problem} Maximizing acquisition functions plays a crucial role in BO as the process through which abstract machinery (\text{e.g.\@}{} model \ensuremath{\mathcal{M}}{} and acquisition function \ensuremath{\mathcal{L}}) yields concrete actions (\text{e.g.\@}{} decisions regarding sets of queries \ensuremath{\mathbf{X}}). Despite its importance however, this inner optimization problem is \temp{often neglected}. This lack of emphasis is largely attributable to a greater focus on creating new and improved machinery as well as on applying BO to new types of problems. Moreover, elementary examples of BO facilitate \ensuremath{\mathcal{L}}'s maximization. For example, optimizing a single query \(\ensuremath{\mathbf{x}} \in \mathbb{R}^{\ensuremath{d}}\) is usually straightforward when \ensuremath{\mathbf{x}}{} is low-dimensional and \ensuremath{\mathcal{L}}{} is myopic. Outside these textbook examples, however, BO's inner optimization problem becomes qualitatively more difficult to solve. In virtually all cases, acquisition functions are non-convex (frequently due to the non-convexity of plausible explanations for \ensuremath{f}{}). Accordingly, increases in input dimensionality \ensuremath{d}{} can be prohibitive to efficient query optimization. In the generalized setting with parallelism \(\ensuremath{q} \ge 1\), this issue is exacerbated by the additional scaling in \ensuremath{q}. While this combination of non-convexity and (acquisition) dimensionality is problematic, the routine intractability of both non-myopic and parallel acquisition poses a commensurate challenge. As is generally true of integrals, the majority of acquisition functions are intractable. Even Gaussian integrals, which are often preferred because they lead to analytic solutions for certain instances of~\eqref{eq:expected_utility}, are only tractable in a handful of special cases~\cite{genz92numerical,gassmann02,cunningham2011epmgp}. To circumvent the lack of closed-form solutions, researchers have proposed a wealth of diverse methods. Approximation strategies~\cite{cunningham2011epmgp, desautels2014parallelizing, wang2017max}, which replace a quantity of interest with a more readily computable one, work well in practice but may not to converge to the true value. In contrast, bespoke solutions~\cite{genz92numerical, ginsbourger2010kriging, chevalier2013fast} provide \mbox{(near-)}analytic expressions but typically do not scale well with dimensionality.\footnote{By \emph{near-analytic}, we refer to cases where an expression contains terms that cannot be computed exactly but for which high-quality solvers exist (\text{e.g.\@}{} low-dimensional multivariate normal CDF estimators \cite{genz92numerical,genz2004numerical}).} Lastly, MC{} methods~\cite{osborne2009gaussian,snoek-nips12a,hennig-jmlr12a} are highly versatile and generally unbiased, but are often perceived as non-differentiable and, therefore, inefficient for purposes of maximizing \ensuremath{\mathcal{L}}{}. Regardless of the method however, the (often drastic) increase in cost when evaluating \ensuremath{\mathcal{L}}'s proxy acts as a barrier to efficient query optimization, and these costs increase over time as shown in \warn{Figure~1d}. In an effort to address these problems, we now go inside the outer-loop and focus on efficient methods for maximizing acquisition functions. \section{Conclusion} \label{sect:conclusion} BO relies upon an array of powerful tools, such as surrogate models and acquisition functions, and all of these tools are sharpened by strong usage practices. We extend these practices by demonstrating that Monte Carlo acquisition functions provide unbiased gradient estimates that can be exploited when optimizing them. Furthermore, we show that many of the same acquisition functions form a family of submodular set functions that can be efficiently optimized using greedy maximization. These insights serve as cornerstones for easy-to-use, general-purpose techniques for practical BO. Comprehensive empirical evidence concludes that said techniques lead to substantial performance gains in real-world scenarios where queries must be chosen in finite time. By tackling the inner optimization problem, these advances directly benefit the theory and practice of Bayesian optimization. \subsection{Differentiating Monte Carlo acquisitions} \label{sect:differentiability} Gradients are one of the most valuable sources of information for optimizing functions. In this section, we detail both the reasons and conditions whereby MC{} acquisition functions are differentiable and further show that most well-known examples readily satisfy these criteria (see Table~\ref{table:reparameterizations}). We assume that \ensuremath{\mathcal{L}}{} is an expectation over a multivariate normal belief \(p(\ensuremath{\mathbf{y}}|\ensuremath{\mathbf{X}}, \ensuremath{\mathcal{D}}) = \ensuremath{\mathcal{N}}(\ensuremath{\mathbf{y}}; \ensuremath{\boldsymbol{\mu}}, \ensuremath{\boldsymbol{\Sigma}})\) specified by a GP surrogate such that \((\ensuremath{\boldsymbol{\mu}}, \ensuremath{\boldsymbol{\Sigma}}) \gets \ensuremath{\mathcal{M}}(\ensuremath{\mathbf{X}})\). More generally, we assume that samples can be generated as \(\sample{\ensuremath{\mathbf{y}}} \sim p(\ensuremath{\mathbf{y}}|\ensuremath{\mathbf{X}}, \ensuremath{\mathcal{D}})\) to form an unbiased MC{} estimator of an acquisition function \( \ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}}) \approx \AcqMC{}(\ensuremath{\mathbf{X}}) \triangleq \tfrac{1}{\ensuremath{m}}\sum\nolimits^{\ensuremath{m}}_{\ensuremath{k}=1} \ensuremath{\ell}(\sample{\ensuremath{\mathbf{y}}}) \). Given such an estimator, we are interested in verifying whether \begin{align} \nabla\ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}}) \approx \nabla\AcqMC{}(\ensuremath{\mathbf{X}}) \triangleq \tfrac{1}{\ensuremath{m}}\sum\nolimits^{\ensuremath{m}}_{\ensuremath{k}=1} \nabla\ensuremath{\ell}(\sample{\ensuremath{\mathbf{y}}}), \label{eq:gradient_mc} \end{align} where \(\nabla\ensuremath{\ell}\) denotes the gradient of utility function \ensuremath{\ell}{} taken with respect to \ensuremath{\mathbf{X}}{}. The validity of MC{} gradient estimator \eqref{eq:gradient_mc} is obscured by the fact that \sample{\ensuremath{\mathbf{y}}} depends on \ensuremath{\mathbf{X}}{} through generative distribution \ensuremath{p}{} and that \(\nabla\AcqMC{}\) is the expectation of \ensuremath{\ell}'s derivative rather than the derivative of its expectation. Originally referred to as \emph{infinitesimal perturbation analysis}~\cite{cao1985convergence,glasserman1988performance}, the \emph{reparameterization trick}~\cite{kingma2013vae,JimenezRezende2014a} is the process of differentiating through an MC{} estimate to its generative distribution \ensuremath{p}'s parameters and consists of two components: i) reparameterizing samples from \ensuremath{p}{} as draws from a simpler base distribution \(\hat{\ensuremath{p}}\), and ii) interchanging differentiation and integration by taking the expectation over sample path derivatives. \subheading{Reparameterization} Reparameterization is a way of interpreting samples that makes their differentiability \text{w.r.t.\@}{} a generative distribution's parameters transparent. Often, samples \(\sample{\ensuremath{\mathbf{y}}} \sim p(\ensuremath{\mathbf{y}}; \ensuremath{\boldsymbol{\theta}})\) can be re-expressed as a deterministic mapping \(\ensuremath{\phi} : \ensuremath{\mathcal{Z}} \times \ensuremath{\boldsymbol{\Theta}} \to \ensuremath{\mathcal{Y}}\) of simpler random variates \(\sample{\ensuremath{\mathbf{z}}} \sim \hat{p}(\ensuremath{\mathbf{z}})\)~\cite{kingma2013vae, JimenezRezende2014a}. This change of variables helps clarify that, if \ensuremath{\ell}{} is a differentiable function of \(\ensuremath{\mathbf{y}} = \ensuremath{\phi}(\ensuremath{\mathbf{z}};\ensuremath{\boldsymbol{\theta}})\), then \(\small{\frac{d\ensuremath{\ell}}{d\ensuremath{\boldsymbol{\theta}}}} = \small{\frac{d\ensuremath{\ell}}{d\ensuremath{\phi}}\frac{d\ensuremath{\phi}}{d\ensuremath{\boldsymbol{\theta}}}}\) by the chain rule of (functional) derivatives. If generative distribution \ensuremath{p}{} is multivariate normal with parameters \(\ensuremath{\boldsymbol{\theta}} = (\ensuremath{\boldsymbol{\mu}}, \ensuremath{\boldsymbol{\Sigma}})\), the corresponding mapping is then \(\ensuremath{\phi}(\ensuremath{\mathbf{z}}; \ensuremath{\boldsymbol{\theta}}) \triangleq \ensuremath{\boldsymbol{\mu}} + \ensuremath{\mathbf{L}}\ensuremath{\mathbf{z}}\), where \(\ensuremath{\mathbf{z}} \sim \ensuremath{\mathcal{N}}(\ensuremath{\mathbf{0}}, \ensuremath{\mathbf{I}})\) and \ensuremath{\mathbf{L}}{} is \ensuremath{\boldsymbol{\Sigma}}'s Cholesky factor such that \(\ensuremath{\mathbf{L}}\T{\ensuremath{\mathbf{L}}} = \ensuremath{\boldsymbol{\Sigma}}\). Rewriting~\eqref{eq:expected_utility} as a Gaussian integral and reparameterizing, we have \begin{align} \ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}}) =\int_{\ensuremath{\boldsymbol{a}}}^{\ensuremath{\boldsymbol{b}}} \ensuremath{\ell}(\ensuremath{\mathbf{y}}) \ensuremath{\mathcal{N}}(\ensuremath{\mathbf{y}}; \ensuremath{\boldsymbol{\mu}}, \ensuremath{\boldsymbol{\Sigma}}) d\ensuremath{\mathbf{y}} = \int_{\alt{\ensuremath{\boldsymbol{a}}}}^{\alt{\ensuremath{\boldsymbol{b}}}} \ensuremath{\ell}(\ensuremath{\boldsymbol{\mu}} + \ensuremath{\mathbf{L}}\ensuremath{\mathbf{z}}) \ensuremath{\mathcal{N}}(\ensuremath{\mathbf{z}}; \ensuremath{\mathbf{0}}, \ensuremath{\mathbf{I}}) d\ensuremath{\mathbf{z}}\,, \label{eq:expected_utility_mvn} \end{align} where each of the \ensuremath{q}{} terms \(\alt{c}_i\) in both \(\alt{\ensuremath{\boldsymbol{a}}}\) and \(\alt{\ensuremath{\boldsymbol{b}}}\) is transformed as \(\alt{c}_{i} = (c_i - \ensuremath{\mu}_{i} - \sum_{j<i}\ensuremath{L}_{i j}\ensuremath{z}_{j})/\ensuremath{L}_{ii}\). The third column of Table~\ref{table:reparameterizations} grounds \eqref{eq:expected_utility_mvn} with several prominent examples. For a given draw \(\sample{\ensuremath{\mathbf{y}}} \sim \ensuremath{\mathcal{N}}(\ensuremath{\boldsymbol{\mu}}, \ensuremath{\boldsymbol{\Sigma}})\), the sample path derivative of \ensuremath{\ell}{} \text{w.r.t.\@}{} \ensuremath{\mathbf{X}}{} is then \begin{align} \nabla\ensuremath{\ell}(\sample{\ensuremath{\mathbf{y}}}) = \frac{d\ensuremath{\ell}(\sample{\ensuremath{\mathbf{y}}})}{d\sample{\ensuremath{\mathbf{y}}}} \frac{d\sample{\ensuremath{\mathbf{y}}}}{d\ensuremath{\mathcal{M}}(\ensuremath{\mathbf{X}})} \frac{d\ensuremath{\mathcal{M}}(\ensuremath{\mathbf{X}})}{d\ensuremath{\mathbf{X}}}, \end{align} where, by minor abuse of notation, we have substituted in \(\sample{\ensuremath{\mathbf{y}}} = \ensuremath{\phi}\left(\sample{\ensuremath{\mathbf{z}}}; \ensuremath{\mathcal{M}}(\ensuremath{\mathbf{X}})\right)\). Reinterpreting \(\ensuremath{\mathbf{y}}\) as a function of \(\ensuremath{\mathbf{z}}\) therefore sheds light on individual MC{} sample's differentiability. \subheading{Interchangeability} Since \AcqMC{} is an unbiased MC{} estimator consisting of differentiable terms, it is natural to wonder whether the average sample gradient \(\nabla\AcqMC{}\) \eqref{eq:gradient_mc} \temp{follows suit}, \text{i.e.\@}{} whether \begin{align} \nabla\ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}}) = \nabla\mathbb{E}_{\ensuremath{\mathbf{y}}} \left[\ensuremath{\ell}(\ensuremath{\mathbf{y}})\right] \ensuremath{\stackrel{?}{=}}\mathbb{E}_{\ensuremath{\mathbf{y}}} \left[\nabla\ensuremath{\ell}(\ensuremath{\mathbf{y}})\right] \approx \nabla\ensuremath{\mathcal{L}}_{\ensuremath{m}}(\ensuremath{\mathbf{X}}) \label{eq:gradient_interchange}\,, \end{align} where \ensuremath{\stackrel{?}{=}}{} denotes a potential equivalence when interchanging differentiation and expectation. \marc{Necessary and sufficient conditions for this interchange are that, as defined under \ensuremath{p}, integrand \ensuremath{\ell}{} must be continuous and its first derivative \(\ensuremath{\ell}^\prime\) must a.s.\ exist and be integrable~\citep{cao1985convergence,glasserman1988performance}.} \citet{wang2016parallel} demonstrated that these conditions are met for a GP with a twice differentiable kernel, provided that the elements in query set \ensuremath{\mathbf{X}}{} are unique. The authors then use these results to prove that~\eqref{eq:gradient_mc} is an unbiased gradient estimator for the parallel Expected Improvement (\ensuremath{\operatorname{\mathnormal{q}\mathhyphen{}EI}}) acquisition function \cite{ginsbourger2010kriging,snoek-nips12a,chevalier2013fast}. In later works, these findings were extended to include parallel versions of the Knowledge Gradient (KG) acquisition function \cite{wu2016parallel,wu2017bayesian}. Figure~\ref{fig:overview_pt2}d (bottom right) visualizes gradient-based optimization of MC{} \ensuremath{\operatorname{\mathnormal{q}\mathhyphen{}EI}}{} for parallelism \(\ensuremath{q}=2\). \begin{figure}[t] \begin{center} \includegraphics[width=\linewidth]{figures/overview_part2.pdf} \vspace{-20pt} \caption{(a) Pseudo-code for BO outer-loop with greedy parallelism, the inner optimization problem is boxed in red. (b--c) Successive iterations of greedy maximization, starting from the posterior shown in Figure~1b. (d) On the left, greedily selected query `\raisebox{-1.75pt}{\FiveStarOpen}'; on the right and from `\ensuremath{\bm{\times}}' to `\raisebox{-1.75pt}{\FiveStarOpen}', trajectory when jointly optimizing parallel queries $\ensuremath{\mathbf{x}}_{1}$ and $\ensuremath{\mathbf{x}}_{2}$ via stochastic gradient ascent. Darker colors correspond with larger acquisitions.} \label{fig:overview_pt2} \end{center} \end{figure} \subheading{Extensions} Rather than focusing on individual examples, our goal is to show differentiability for a broad class of MC{} acquisition functions. In addition to its conceptual simplicity, one of MC{} integration's primary strengths is its generality. This versatility is evident in Table~\ref{table:reparameterizations}, which catalogs (differentiable) reparameterizations for six of the most popular acquisition functions. While some of these forms were previously known (\ensuremath{\operatorname{EI}}{} and \ensuremath{\operatorname{KG}}) or follow freely from the above (\ensuremath{\operatorname{SR}}), others require additional steps. We summarize these steps below and provide full details in Appendix~\ref{appendix:methods}. In many cases of interest, utility is measured in terms of discrete events. For example, Probability of Improvement \cite{kushner1964new,viana2010surrogate} is the expectation of a binary event \(\ensuremath{\boldsymbol{e}}_{\acqSubscript{\ensuremath{\operatorname{PI}}}}\): ``will a new set of results improve upon a level \ensuremath{\alpha}?'' Similarly, Entropy Search \cite{hennig-jmlr12a} contains expectations of categorical events \(\ensuremath{\boldsymbol{e}}_{\acqSubscript{\ensuremath{\operatorname{ES}}}}\): ``which of a set of random variables will be the largest?'' Unfortunately, mappings from continuous variables \ensuremath{\mathbf{y}}{} to discrete events \ensuremath{\boldsymbol{e}}{} are typically discontinuous and, therefore, violate the conditions for \eqref{eq:gradient_interchange}. To overcome this issue, we utilize \emph{concrete} (\underline{con}tinuous to dis\underline{crete}) approximations in place of the original, discontinuous mappings \cite{jang2016categorical,maddison2016concrete}. Still within the context of the reparameterization trick, \cite{jang2016categorical,maddison2016concrete} studied the closely related problem of optimizing an expectation \text{w.r.t.\@}{} a discrete generative distribution's parameters. To do so, the authors propose relaxing the mapping from, \text{e.g.\@}{}, uniform to categorical random variables with a continuous approximation so that the (now differentiable) transformed variables closely resemble their discrete counterparts in distribution. Here, we first map from uniform to Gaussian (rather than Gumbel) random variables, but the process is otherwise identical. Concretely, we can approximate \ensuremath{\operatorname{PI}}'s binary event as \begin{align} \tilde{\ensuremath{\boldsymbol{e}}}_{\acqSubscript{\ensuremath{\operatorname{PI}}}}(\ensuremath{\mathbf{X}}; \ensuremath{\alpha}, \ensuremath{\tau}) = \max\left(\ensuremath{\operatorname{\sigma}}\left(\nicefrac{\ensuremath{\mathbf{y}} - \alpha}{\ensuremath{\tau}}\right)\right) \approx \max\left(\ensuremath{\operatorname{\mathbbm{1}}}^{-}(\ensuremath{\mathbf{y}} - \ensuremath{\alpha})\right), \end{align} where \(\ensuremath{\operatorname{\mathbbm{1}}}^{-}\) denotes the left-continuous Heaviside step function, \ensuremath{\operatorname{\sigma}}{} the sigmoid nonlinearity, and \(\ensuremath{\tau} \in [0,\infty]\) acts as a temperature parameter such that the approximation becomes exact as \(\ensuremath{\tau} \to 0\). Appendix~\ref{appendix:concrete} further discusses concrete approximations for both \ensuremath{\operatorname{PI}}{} and \ensuremath{\operatorname{ES}}{}. Lastly, the Upper Confidence Bound (\ensuremath{\operatorname{UCB}}) acquisition function \cite{srinivas10} is typically not portrayed as an expectation, seemingly barring the use of MC{} methods. At the same time, the standard definition \(\ensuremath{\operatorname{UCB}}(\ensuremath{\mathbf{x}}; \beta) \triangleq \ensuremath{\mu} + \beta^{\ensuremath{\nicefrac{1}{2}}}\ensuremath{\sigma}\) bares a striking resemblance to the reparameterization for normal random variables \(\ensuremath{\phi}(\ensuremath{z}; \ensuremath{\mu}, \ensuremath{\sigma}) = \ensuremath{\mu} + \ensuremath{\sigma}\ensuremath{z}\). By exploiting this insight, it is possible to rewrite this closed-form expression as \(\ensuremath{\operatorname{UCB}}(\ensuremath{\mathbf{x}}; \beta) = \int_{\ensuremath{\mu}}^{\infty} \ensuremath{y}\ensuremath{\mathcal{N}}(\ensuremath{y}; \ensuremath{\mu}, 2\pi\beta\ensuremath{\stddev^{2}})d\ensuremath{y}\). Formulating \ensuremath{\operatorname{UCB}}{} as an expectation allows us to naturally parallelize this acquisition function as \begin{align} \ensuremath{\operatorname{UCB}}(\ensuremath{\mathbf{X}};\beta) = \mathbb{E}_{\ensuremath{\mathbf{y}}} \big[ \max(\ensuremath{\boldsymbol{\mu}} + \sqrt{\nicefrac{\beta\pi}{2}}\abs{\ensuremath{\boldsymbol{\gamma}}}) \big], \label{eq:parallel_ucb_short} \end{align} where \(\abs{\ensuremath{\boldsymbol{\gamma}}} = \abs{\ensuremath{\mathbf{y}} - \ensuremath{\boldsymbol{\mu}}}\) denotes the absolute value of \ensuremath{\mathbf{y}}{}'s residuals. In contrast with existing parallelizations of \ensuremath{\operatorname{UCB}}{} \cite{contal2013parallel,desautels2014parallelizing}, Equation~\eqref{eq:parallel_ucb_short} directly generalizes its marginal form and can be efficiently estimated via MC{} integration (see Appendix~\ref{appendix:parallel_ucb} for the full derivation). These extensions further demonstrate how many of the apparent barriers to gradient-based optimization of MC{} {} acquisition functions can be overcome by borrowing ideas from new (and old) techniques. \section{Experiments} \label{sect:experiments} We assessed the efficacy of gradient-based and submodular strategies for maximizing acquisition function in two primary settings: ``synthetic'', where task \ensuremath{f}{} was drawn from a known GP prior, and ``black-box'', where \ensuremath{f}'s nature is unknown to the optimizer. In both cases, we used a GP surrogate with a constant mean and an anisotropic \matern{5}{2} kernel. For black-box tasks, ambiguity regarding the correct function prior was handled via online MAP estimation of the GP's (hyper)parameters. Appendix~\ref{appendix:experiment_details} further details the setup used for synthetic tasks. We present results averaged over 32 independent trials. Each trial began with three randomly chosen inputs, and competing methods were run from identical starting conditions. While the general notation of the paper has assumed noise-free observations, all experiments were run with Gaussian measurement noise leading to observed values \(\hat{\ensuremath{y}} \sim \ensuremath{\mathcal{N}}(\ensuremath{f}(\ensuremath{\mathbf{x}}), 1\mathrm{e-}3)\). \subheading{Acquisition functions} We focused on parallel MC{} acquisition functions \AcqMC{}, particularly \ensuremath{\operatorname{EI}}{} and \ensuremath{\operatorname{UCB}}{}. Results using \ensuremath{\operatorname{EI}}{} are shown here and those using \ensuremath{\operatorname{UCB}}{} are provided in extended results (Appendix~\ref{appendix:results_extended}). To avoid confounding variables when assessing BO performance for different acquisition maximizers, results using the incremental{} form of \ensuremath{\operatorname{\mathnormal{q}\mathhyphen{}EI}}{} discussed in Section~\ref{sect:myopic_maximal} are also reserved for extended results. In additional experiments, we observed that optimization of \ensuremath{\operatorname{PI}}{} and \ensuremath{\operatorname{SR}}{} behaved like that of \ensuremath{\operatorname{EI}}{} and \ensuremath{\operatorname{UCB}}{}, respectively. However, overall performance using these acquisition functions was slightly worse, so further results are not reported here. Across experiments, the \parallelize{UCB}{} acquisition function introduced in Section~\ref{sect:differentiability} outperformed \ensuremath{\operatorname{\mathnormal{q}\mathhyphen{}EI}}{} on all tasks but the Levy function. Generally speaking, MC{} estimators \AcqMC{} come in both deterministic and stochastic varieties. Here, determinism refers to whether or not each of \ensuremath{m}{} samples \(\sample{\ensuremath{\mathbf{y}}}\) were generated using the same random variates \(\sample{\ensuremath{\mathbf{z}}}\) within a given outer-loop iteration (see Section~\ref{sect:differentiability}). Together with a decision regarding ``batch-size'' \ensuremath{m}{}, this choice reflects a well-known tradeoff of approximation-, estimation-, and optimization-based sources of error when maximizing the true function \ensuremath{\mathcal{L}}{} \cite{bousquet2008tradeoffs}. We explored this tradeoff for each maximizer{} and summarize our findings below. \begin{figure}[t] \begin{center} \includegraphics[width=\linewidth]{figures/synthetic_ei.pdf} \caption{Average performance of different acquisition \solver s{} on synthetic tasks from a known prior, given varied runtimes when maximizing Monte Carlo \ensuremath{\operatorname{\mathnormal{q}\mathhyphen{}EI}}. Reported values indicate the log of the immediate regret \(\log_{10}\left|f_{\max} - f(\opt{\ensuremath{\mathbf{x}}})\right|\), where \(\opt{\ensuremath{\mathbf{x}}}\) denotes the observed maximizer \(\opt{\ensuremath{\mathbf{x}}} \in \ensuremath{\operatornamewithlimits{arg\,max}}_{\ensuremath{\mathbf{x}} \in \ensuremath{\mathcal{D}}} \hat{\ensuremath{y}}\).} \label{fig:results_synthetic_ei} \end{center} \end{figure} \subheading{\Solver s} We considered a range of (acquisition) \solver s, ultimately settling on stochastic gradient ascent (\text{Adam}, \cite{kingma2014adam}), Covariance Matrix Adaptation Evolution Strategy (CMA-ES, \cite{hansen2016cma}) and Random Search (RS, \cite{bergstra12}). Additional information regarding these choices is provided in Appendix~\ref{appendix:experiment_details}. For fair comparison, \solver s{} were constrained by CPU runtime. At each outer-loop iteration, an ``inner budget{}'' was defined as the average time taken to simultaneously evaluate \ensuremath{N}{} acquisition values given equivalent conditions. When using greedy parallelism, this budget was split evenly among each of \ensuremath{q}{} iterations. To characterize performance as a function of allocated runtime, experiments were run using inner budgets{} \(\ensuremath{N} \in \{2^{12}, 2^{14}, 2^{16}\}\). For \text{Adam}, we used stochastic minibatches consisting of \(\ensuremath{m}=128\) samples and an initial learning rate \(\eta=\nicefrac{1}{40}\). To combat non-convexity, gradient ascent was run from a total of 32 (64) starting positions when greedily (jointly) maximizing \AcqMC. Appendix~\ref{appendix:initialization} details the multi-start initialization strategy. As with the gradient-based approaches, CMA-ES performed better when run using stochastic minibatches \((\ensuremath{m}=128)\). Furthermore, reusing the aforementioned initialization strategy to generate CMA-ES's initial population of 64 samples led to additional performance gains. \nolinebreak \begin{figure}[t] \begin{center} \includegraphics[width=\linewidth]{figures/blackbox_ei.pdf} \caption{Average performance of different acquisition \solver s{} on black-box tasks from an unknown prior, given varied runtimes when maximizing Monte Carlo \ensuremath{\operatorname{\mathnormal{q}\mathhyphen{}EI}}. Reported values indicate the log of the immediate regret \(\log_{10}\left|f_{\max} - f(\opt{\ensuremath{\mathbf{x}}})\right|\), where \(\opt{\ensuremath{\mathbf{x}}}\) denotes the observed maximizer \(\opt{\ensuremath{\mathbf{x}}} \in \ensuremath{\operatornamewithlimits{arg\,max}}_{\ensuremath{\mathbf{x}} \in \ensuremath{\mathcal{D}}} \hat{\ensuremath{y}}\).} \label{fig:results_blackbox_ei} \end{center} \end{figure} \subheading{Empirical results} Figures \ref{fig:results_synthetic_ei} and \ref{fig:results_blackbox_ei} present key results regarding BO performance under varying conditions. Both sets of experiments explored an array of input dimensionalities \ensuremath{d}{} and degrees of parallelism \ensuremath{q}{} (shown in the lower left corner of each panel). \Solver s{} are grouped by color, with darker colors denoting use of greedy parallelism; inner budgets{} are shown in ascending order from left to right. Results on synthetic tasks (Figure~\ref{fig:results_synthetic_ei}), provide a clearer picture of the \solver s' impacts on the full BO loop by eliminating the model mismatch. Across all dimensions \ensuremath{d}{} (rows) and inner budgets{} \ensuremath{N}{} (columns), gradient-based \solver s{} (orange) were consistently superior to both gradient-free (blue) and na\"ive (green) alternatives. Similarly, submodular \solver s{} generally surpassed their joint counterparts. However, in lower-dimensional cases where gradients alone suffice to optimize \AcqMC{}, the benefits for coupling gradient-based strategies with near-optima seeking submodular maximization naturally decline. Lastly, the benefits of exploiting gradients and submodularity both scaled with increasing acquisition dimensionality \(\ensuremath{q}\times\ensuremath{d}\). Trends are largely identical for black-box tasks (Figure~\ref{fig:results_blackbox_ei}), and this commonality is most evident for tasks sampled from an unknown GP prior (final row). These runs were identical to ones on synthetic tasks (specifically, the diagonal of Figure~\ref{fig:results_synthetic_ei}) but where knowledge of \ensuremath{f}'s prior was withheld. Outcomes here clarify the impact of model mismatch, showing how \solver s{} maintain their influence. Finally, performance on Hartmann-6 (top row) serves as a clear indicator of the importance for thoroughly solving the inner optimization problem. In these experiments, performance improved despite mounting parallelism due to a corresponding increase in the inner budget{}. Overall, these results clearly demonstrate that both gradient-based and submodular approaches to (parallel) query optimization lead to reliable and, often, substantial improvement in outer-loop performance. Furthermore, these gains become more pronounced as the acquisition dimensionality increases. Viewed in isolation, \solver s{} utilizing gradients consistently outperform gradient-free alternatives. Similarly, greedy strategies improve upon their joint counterparts in most cases. \section{Introduction} \label{sect:introduction} Bayesian optimization (BO) is a powerful framework for tackling complicated global optimization problems \cite{kushner1964new,movckus1975bayesian,jones-jgo98a}. Given a black-box{} function \(\ensuremath{f} : \ensuremath{\mathcal{X}} \to \ensuremath{\mathcal{Y}}\), BO seeks to identify a maximizer \(\opt{\ensuremath{\mathbf{x}}} \in \ensuremath{\operatornamewithlimits{arg\,max}}_{\ensuremath{\mathbf{x}} \in \ensuremath{\mathcal{X}}} \ensuremath{f}(\ensuremath{\mathbf{x}})\) while simultaneously minimizing incurred costs. Recently, these strategies have demonstrated state-of-the-art results on many important, real-world problems ranging from material sciences \cite{frazier2016bayesian,ueno2016combo}, to robotics \cite{calandra2016bayesian,bansal2017goal}, to algorithm tuning and configuration \cite{hutter2011sequential,snoek-nips12a,swersky-nips13,falkner-icml-18}. From a high-level perspective, BO can be understood as the application of Bayesian decision theory to optimization problems~\cite{movckus1994application, degroot2005optimal,robert2007bayesian}. One first specifies a belief over possible explanations for \ensuremath{f}{} using a probabilistic surrogate model and then combines this belief with an acquisition function \ensuremath{\mathcal{L}}{} to convey the expected utility for evaluating a set of queries \ensuremath{\mathbf{X}}{}. In theory, \ensuremath{\mathbf{X}}{} is chosen according to Bayes' decision rule as \ensuremath{\mathcal{L}}{}'s maximizer by solving for an \emph{inner optimization problem} \cite{gelbart2014bayesian,martinez2014bayesopt,wang2016parallel}. In practice, challenges associated with maximizing \ensuremath{\mathcal{L}}{} greatly impede our ability to live up to this standard. Nevertheless, this inner optimization problem is often treated as a black-box{} unto itself. Failing to address this challenge leads to a systematic departure from BO's premise and, consequently, consistent deterioration in \temp{achieved} performance. To help reconcile theory and practice, we present two modern perspectives for addressing BO's inner optimization problem that exploit key aspects of acquisition functions and their estimators. First, we clarify how sample path derivatives can be used to optimize a wide range of acquisition functions estimated via Monte Carlo (MC) integration. Second, we identify a common family of submodular acquisition functions and show that its constituents can generally be expressed in a \temp{more computer-friendly} form. These acquisition functions' properties enable greedy approaches to efficiently maximize them with guaranteed near-optimal results. Finally, we demonstrate through comprehensive experiments that these theoretical contributions directly translate to reliable and, often, substantial performance gains. \section*{Acknowledgments} The authors thank David Ginsbourger, Dario Azzimonti and Henry Wynn for initial discussions regarding the submodularity of various integrals. The support of the EPSRC Centre for Doctoral Training in High Performance Embedded and Distributed Systems (reference EP/L016796/1) is gratefully acknowledged. This work has partly been supported by the European Research Council (ERC) under the European Union's Horizon 2020 research and innovation programme under grant no. 716721. \footnotesize \linespread{1.0}\selectfont \setlength{\bibsep}{5pt} \section{Maximizing acquisition functions} \label{sect:methods} This section presents the technical contributions of this paper, which can be broken down into two complementary topics: \inlineItem{1} gradient-based optimization of acquisition functions that are estimated via Monte Carlo integration, and \inlineItem{2} greedy maximization of ``myopic maximal'' acquisition functions. Below, we separately discuss each contribution along with its related literature. \input{differentiability.tex} \input{myopic_maximal.tex} \subsection{Maximizing myopic maximal acquisitions} \label{sect:myopic_maximal} This section focuses exclusively on the family of myopic maximal (MM) acquisition functions: myopic acquisition functions defined as the expected max of a pointwise utility function \ensuremath{\hat{\acq}}{}, \text{i.e.\@}{} \(\ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}}) = \mathbb{E}_{\ensuremath{\mathbf{y}}}[\ensuremath{\ell}(\ensuremath{\mathbf{y}})] = \mathbb{E}_{\ensuremath{\mathbf{y}}}[\max \ensuremath{\hat{\acq}}(\ensuremath{\mathbf{y}})]\). Of the acquisition functions included in Table~\ref{table:reparameterizations}, this family includes \ensuremath{\operatorname{EI}}{}, \ensuremath{\operatorname{PI}}, \ensuremath{\operatorname{SR}}{}, and \ensuremath{\operatorname{UCB}}{}. \temp{We show that} these functions have special properties that make them particularly amenable to greedy maximization. Greedy maximization is a popular approach for selecting near-optimal sets of queries \ensuremath{\mathbf{X}}{} to be evaluated in parallel \cite{azimi2010batch,chen2013near,contal2013parallel,desautels2014parallelizing,shah2015parallel,kathuria2016batched}. This iterative strategy is so named because it always ``greedily'' chooses the query \(\ensuremath{\mathbf{x}}\) that produces the largest immediate reward. At each step \(\ensuremath{j} = 1, \ldots, \ensuremath{q}\), a greedy maximizer treats the \(\ensuremath{j}{-}1\) preceding choices \(\OLD{1}{\ensuremath{\mathbf{X}}}\) as constants and grows the set by selecting an additional element \(\NEW{0}{\ensuremath{\mathbf{x}}} \in \ensuremath{\operatornamewithlimits{arg\,max}}_{\ensuremath{\mathbf{x}} \in \ensuremath{\mathcal{X}}} \ensuremath{\mathcal{L}}(\OLD{1}{\ensuremath{\mathbf{X}}} \cup \{\ensuremath{\mathbf{x}}\}; \ensuremath{\mathcal{D}})\) from the set of possible queries \ensuremath{\mathcal{X}}{}. \warn{Algorithm~2} in Figure~\ref{fig:overview_pt2} outlines this process's role in BO's outer-loop. \subheading{Submodularity} Greedy maximization is often linked to the concept of \emph{submodularity} (SM). Roughly speaking, a set function \(\ensuremath{\mathcal{L}}\) is SM{} if its increase in value when adding any new point \NEW{1}{\ensuremath{\mathbf{x}}} to an existing collection \OLD{1}{\ensuremath{\mathbf{X}}} is non-increasing in cardinality \(k\) (for a technical overview, see~\cite{bach2013learning}). Greedily maximizing SM{} functions is guaranteed to produce near-optimal results~\cite{minoux1978accelerated,nemhauser78,krause14}. Specifically, if \ensuremath{\mathcal{L}}{} is a normalized SM{} function with maximum \(\opt{\ensuremath{\mathcal{L}}}\), then a greedy maximizer will incur no more than \(\tfrac{1}{e}\opt{\ensuremath{\mathcal{L}}}\) regret when attempting to solve for \(\opt{\ensuremath{\mathbf{X}}} \in \ensuremath{\operatornamewithlimits{arg\,max}}_{\ensuremath{\mathbf{X}} \in \ensuremath{\mathcal{X}}^{\ensuremath{q}}} \ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}})\). In the context of BO, SM{} has previously been appealed to when establishing outer-loop regret bounds \cite{srinivas10,contal2013parallel,desautels2014parallelizing}. Such applications of SM{} utilize this property by relating an idealized BO strategy to greedy maximization of a SM{} objective (\text{e.g.\@}{}, the mutual information between black-box{} function \(f\) and observations \ensuremath{\mathcal{D}}{}). In contrast, we show that the family of MM{} acquisition functions are inherently SM, thereby guaranteeing that greedy maximization thereof produces near-optimal choices \ensuremath{\mathbf{X}}{} at each step of BO's outer-loop.\footnote{An additional technical requirement for SM{} is that the ground set \ensuremath{\mathcal{X}}{} be finite. Under similar conditions, SM-based guarantees have been extended to infinite ground sets \cite{srinivas10}, but we have not yet taken these steps.} We begin by removing some unnecessary complexity: \begin{enumerate}[label=\arabic*.,leftmargin=16pt,rightmargin=8pt] \item Let \(\sample{f} \sim p(f | \ensuremath{\mathcal{D}})\) denote the \(\ensuremath{k}\)-th possible explanation of black-box{} \(f\) given observations \ensuremath{\mathcal{D}}. By marginalizing out nuisance variables \(f(\ensuremath{\mathcal{X}} \setminus \ensuremath{\mathbf{X}})\), \ensuremath{\mathcal{L}}{} can be expressed as an expectation over functions \(\sample{f}\) themselves rather than over potential outcomes \(\sample{\ensuremath{\mathbf{y}}} \sim p(\ensuremath{\mathbf{y}}|\ensuremath{\mathbf{X}}, \ensuremath{\mathcal{D}})\). \item Belief \(p(f | \ensuremath{\mathcal{D}})\) and sample paths \sample{f} depend solely on \ensuremath{\mathcal{D}}{}. Hence, expected utility \(\ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}}; \ensuremath{\mathcal{D}}) = \mathbb{E}_{f}\left[\ensuremath{\ell}(f(\ensuremath{\mathbf{X}}))\right]\) is a weighted sum over a fixed set of functions whose weights are constant. Since non-negative linear combinations of SM{} functions are SM{} \cite{krause14}, \(\ensuremath{\mathcal{L}}(\makebox[1ex]{\textbf{$\cdot$}})\) is SM{} so long as the same can be said of all functions \(\ensuremath{\ell}(\sample{f}(\makebox[1ex]{\textbf{$\cdot$}})) = \max \ensuremath{\hat{\acq}}\left(\sample{f}(\makebox[1ex]{\textbf{$\cdot$}})\right)\). \item As pointwise functions, \sample{f} and \ensuremath{\hat{\acq}}{} specify the set of values mapped to by \ensuremath{\mathcal{X}}{}. They therefore influences whether we can normalize the utility function such that \(\ensuremath{\ell}(\emptyset) = 0\), but do not impact SM{}. Appendix \ref{appendix:normalizing_utility} discusses the technical condition of normalization in greater detail. In general however, we require that \(\ensuremath{v_{\min}} = \min_{\ensuremath{\mathbf{x}} \in \ensuremath{\mathcal{X}}}\ensuremath{\hat{\acq}}(\sample{f}(\ensuremath{\mathbf{x}}))\) is guaranteed to be bounded from below for all functions under the support of \(p(f|\ensuremath{\mathcal{D}})\). \end{enumerate} Having now eliminated confounding factors, the remaining question is whether \(\max(\cdot)\) is SM{}. Let \(\mathcal{V}\) be the set of possible utility values and define \(\max(\emptyset) = \ensuremath{v_{\min}}\). Then, given sets \(\ensuremath{\mathcal{A}} \subseteq \ensuremath{\mathcal{B}} \subseteq \mathcal{V}\) and \(\forall v \in \mathcal{V}\), it holds that \begin{align} \max(\ensuremath{\mathcal{A}} \cup \{v\}) - \max(\ensuremath{\mathcal{A}}) \ge \max(\ensuremath{\mathcal{B}} \cup \{v\}) - \max(\ensuremath{\mathcal{B}}). \label{eq:max_submodularity} \end{align} \temp{\emph{Proof:}} We prove the equivalent definition \(\max(\ensuremath{\mathcal{A}}) + \max(\ensuremath{\mathcal{B}}) \ge \max(\ensuremath{\mathcal{A}}~\cup~\ensuremath{\mathcal{B}}) + \max(\ensuremath{\mathcal{A}}~\cap~\ensuremath{\mathcal{B}})\). Without loss of generality, assume \(\max(\ensuremath{\mathcal{A}}~\cup~\ensuremath{\mathcal{B}}) = \max(\ensuremath{\mathcal{A}})\). Then, \(\max(\ensuremath{\mathcal{B}}) \ge \max(\ensuremath{\mathcal{A}}~\cap~\ensuremath{\mathcal{B}})\) since, for any \(\ensuremath{\mathcal{C}} \subseteq \ensuremath{\mathcal{B}}\), \(\max(\ensuremath{\mathcal{B}}) \ge \max(\ensuremath{\mathcal{C}})\). This result establishes the MM{} family as a class of SM{} set functions, providing strong theoretical justification for greedy approaches to solving BO's inner-optimization problem. \subheading{\warn{Incremental} form} So far, we have discussed greedy maximizers that select a \ensuremath{j}-th new point \(\NEW{0}{\ensuremath{\mathbf{x}}}\) by optimizing the joint acquisition \( \ensuremath{\mathcal{L}}(\BOTH{0}{\ensuremath{\mathbf{X}}}; \ensuremath{\mathcal{D}}) = \mathbb{E}_{\BOTH{0}{\ensuremath{\mathbf{y}}}|\ensuremath{\mathcal{D}}}\left[\ensuremath{\ell}(\BOTH{0}{\ensuremath{\mathbf{y}}})\right] \) originally defined in \eqref{eq:expected_utility}. A closely related strategy~\cite{ginsbourger2011dealing,snoek-nips12a,contal2013parallel,desautels2014parallelizing} is to formulate the greedy maximizer's objective as (the expectation of) a marginal acquisition function \(\ensuremath{\bar{\ensuremath{\mathcal{L}}}}\). We refer to this category of acquisition functions, which explicitly represent the value of \BOTH{0}{\ensuremath{\mathbf{X}}} as that of \(\OLD{0}{\ensuremath{\mathbf{X}}}\) incremented by a marginal quantity, as \emph{incremental{}}. The most common example of an incremental{} acquisition function is the iterated expectation \( \mathbb{E}_{\OLD{1}{\ensuremath{\mathbf{y}}}|\ensuremath{\mathcal{D}}} \left[ \ensuremath{\bar{\ensuremath{\mathcal{L}}}}(\NEW{1}{\ensuremath{\mathbf{x}}}; \ensuremath{\mathcal{D}}_{\ensuremath{j}}) \right] \), where \(\ensuremath{\mathcal{D}}_{\ensuremath{j}} = \ensuremath{\mathcal{D}} \cup \{(\ensuremath{\mathbf{x}}_{i},\ensuremath{y}_{i})\}_{i<\ensuremath{j}}\) denotes a fantasy state. Because these integrals are generally intractable, MC{} {} integration (Section~\ref{sect:differentiability}) is typically used to estimate their values by averaging over fantasies formed by sampling from \(p(\OLD{1}{\ensuremath{\mathbf{y}}}| \OLD{1}{\ensuremath{\mathbf{X}}}, \ensuremath{\mathcal{D}})\). In practice, approaches based on incremental{} acquisition functions (such as the mentioned MC{} estimator) have several distinct advantages over joint ones. Marginal (myopic) acquisition functions usually admit differentiable, closed-form solutions. The latter property makes them cheap to evaluate, while the former reduces the sample variance of MC{} estimators. Moreover, these approaches can better utilize caching since many computationally expensive terms (such as a Cholesky used to generate fantasies) only change between rounds of greedy maximization. A joint acquisition function \ensuremath{\mathcal{L}}{} can always be expressed as an incremental one by defining \ensuremath{\bar{\ensuremath{\mathcal{L}}}}{} as the expectation of the corresponding utility function \ensuremath{\ell}'s discrete derivativ \begin{align} \ensuremath{\Delta}(\NEW{0}{\ensuremath{\mathbf{x}}}; \OLD{0}{\ensuremath{\mathbf{X}}}, \ensuremath{\mathcal{D}}) = \mathbb{E}_{\BOTH{0}{\ensuremath{\mathbf{y}}}|\ensuremath{\mathcal{D}}} \left[ \ensuremath{\delta}(\NEW{0}{\ensuremath{y}}; \OLD{0}{\ensuremath{\mathbf{y}}}) \right] = \ensuremath{\mathcal{L}}(\BOTH{0}{\ensuremath{\mathbf{X}}}; \ensuremath{\mathcal{D}}) - \ensuremath{\mathcal{L}}(\OLD{0}{\ensuremath{\mathbf{X}}}; \ensuremath{\mathcal{D}}), \label{eq:growth_expected} \end{align} with \(\ensuremath{\delta}(\NEW{1}{\ensuremath{y}}; \OLD{0}{\ensuremath{\mathbf{y}}}) = \ensuremath{\ell}(\BOTH{0}{\ensuremath{\mathbf{y}}}) - \ensuremath{\ell}(\OLD{1}{\ensuremath{\mathbf{y}}})\) and \(\ensuremath{\mathcal{L}}(\emptyset; \ensuremath{\mathcal{D}}) = 0\) so that \(\ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}}_{1:q}; , \ensuremath{\mathcal{D}}) = \sum_{\ensuremath{j}=1}^{\ensuremath{q}}\ensuremath{\Delta}(\NEW{1}{\ensuremath{\mathbf{x}}}; \OLD{1}{\ensuremath{\mathbf{X}}}, \ensuremath{\mathcal{D}})\). To show why this representation is especially useful for MM{} acquisition functions, we reuse the notation of \eqref{eq:max_submodularity} to introduce the following straightforward identity \begin{align} \max(\ensuremath{\mathcal{B}}) - \max(\ensuremath{\mathcal{A}}) = \ensuremath{\operatorname{ReLU}}\left(\max(\ensuremath{\mathcal{B}} \setminus \ensuremath{\mathcal{A}}) - \max(\ensuremath{\mathcal{A}})\right). \label{eq:max_growth} \end{align} \emph{Proof:} Since \(\ensuremath{v_{\min}}\) is defined as the smallest possible element of either set, the \ensuremath{\operatorname{ReLU}}'s argument is negative if and only if \ensuremath{\mathcal{B}}'s maximum is a member of \ensuremath{\mathcal{A}}{} (in which case both sides equate to zero). In all other cases, the \ensuremath{\operatorname{ReLU}}{} can be eliminated and \(\max(\ensuremath{\mathcal{B}}) = \max(\ensuremath{\mathcal{B}} \setminus \ensuremath{\mathcal{A}})\) by definition. Reformulating the MM{} marginal gain function as \(\ensuremath{\delta}(\NEW{0}{\ensuremath{y}}; \OLD{0}{\ensuremath{\mathbf{y}}}) = \ensuremath{\operatorname{ReLU}}(\ensuremath{\ell}(\NEW{0}{\ensuremath{y}}) - \ensuremath{\ell}(\OLD{0}{\ensuremath{\mathbf{y}}}))\) now gives the desired result: that the MM{} family's discrete derivative is the ``improvement'' function. Accordingly, the conditional expectation of \eqref{eq:growth_expected} given fantasy state \(\ensuremath{\mathcal{D}}_{\ensuremath{j}}\) is the expected improvement of \ensuremath{\ell}, \text{i.e.\@} \begin{align} \mathbb{E}_{\NEW{0}{\ensuremath{y}}|\ensuremath{\mathcal{D}}_{\ensuremath{j}}} \left[ \ensuremath{\delta}(\NEW{0}{\ensuremath{y}}; \OLD{0}{\ensuremath{\mathbf{y}}}) \right] = \ensuremath{\operatorname{EI}}_{\ensuremath{\ell}}\left(\NEW{0}{\ensuremath{\mathbf{x}}}; \ensuremath{\mathcal{D}}_{\ensuremath{j}}\right) = \int_{\NEW{0}{\Gamma}} \left[\ensuremath{\ell}(\NEW{0}{\ensuremath{y}}) - \ensuremath{\ell}(\OLD{0}{\ensuremath{\mathbf{y}}})\right] p(\NEW{0}{\ensuremath{y}}| \NEW{0}{\ensuremath{\mathbf{x}}}, \ensuremath{\mathcal{D}}_{\ensuremath{j}}) d\NEW{0}{\ensuremath{y}}, \label{eq:growth_expected_ei} \end{align} where \(\NEW{1}{\Gamma} \triangleq \{\NEW{1}{\ensuremath{y}} : \ensuremath{\ell}(\NEW{1}{\ensuremath{y}}) > \ensuremath{\ell}(\OLD{1}{\ensuremath{\mathbf{y}}})\}\). Since marginal gain function \(\ensuremath{\delta}\) primarily acts to lower bound a univariate integral over \NEW{0}{\ensuremath{y}}, \eqref{eq:growth_expected_ei} often admits closed-form solutions. This statement is true of all MM{} acquisition functions considered here, making their incremental{} forms particularly efficient. Putting everything together, an MM{} acquisition function's joint and incremental{} forms equate as \( \ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}}_{1:\ensuremath{q}}; \ensuremath{\mathcal{D}}) = \sum_{\ensuremath{j}=1}^{q} \mathbb{E}_{\OLD{1}{\ensuremath{\mathbf{y}}}|\ensuremath{\mathcal{D}}} \left[ \ensuremath{\operatorname{EI}}_{\ensuremath{\ell}}\left(\NEW{0}{\ensuremath{\mathbf{x}}}; \ensuremath{\mathcal{D}}_{\ensuremath{j}})\right) \right] \). For the special case of Expected Improvement per se (denoted here as \(\ensuremath{\mathcal{L}}_{\acqSubscript{\ensuremath{\operatorname{EI}}}}\) to avoid confusion), this expression further simplifies to reveal an exact equivalence whereby \( \ensuremath{\mathcal{L}}_{\acqSubscript{\ensuremath{\operatorname{EI}}}}(\ensuremath{\mathbf{X}}_{1:\ensuremath{q}}; \ensuremath{\mathcal{D}}) = \sum_{\ensuremath{j}=1}^{q}\mathbb{E}_{\OLD{1}{\ensuremath{\mathbf{y}}}|\ensuremath{\mathcal{D}}} \left[ \ensuremath{\mathcal{L}}_{\acqSubscript{\ensuremath{\operatorname{EI}}}}(\NEW{1}{\ensuremath{\mathbf{x}}}; \ensuremath{\mathcal{D}}_{\ensuremath{j}}) \right] \). Appending~\ref{appendix:results_extended} compares performance when using joint and incremental{} forms, demonstrating how the latter becomes increasingly beneficial as the dimensionality of the (joint) acquisition function \(\ensuremath{q} \times d\) grows. \subsection{Submodularity of parallel acquisitions} \label{sect:submodularity} \newcommand{i}{i} \newcommand{\makebox[1ex]{\textbf{$\cdot$}}}{\makebox[1ex]{\textbf{$\cdot$}}} \flag[inline]{jw: this section is much improved; but, probably too dense.} Greedy maximization is a popular strategy for approximately solving parallel decision-making problems \cite{azimi2010batch,chen2013near,contal2013parallel,desautels2014parallelizing,shah2015parallel,kathuria2016batched}. Iterative greedy strategies proceed by choosing one thing at a time. At each step \(k = 1,\dotsc, \ensuremath{q}\), one treats the \(k{-}1\) preceding choices \(\ensuremath{\mathbf{X}}\) as constants and grows the set by selecting an additional query \(\ensuremath{\mathbf{x}}_{k} \in \ensuremath{\operatornamewithlimits{arg\,max}}_{\ensuremath{\mathbf{x}} \in \ensuremath{\mathcal{X}}} \ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}} \cup \{\ensuremath{\mathbf{x}}\}; \ensuremath{\mathcal{D}})\). \warn{Algorithm~2} in Figure~\ref{fig:overview_pt2} outlines this process as it occurs within the context of BO's outer-loop. \temp{Celebrated} results~\cite{minoux1978accelerated,nemhauser78,krause14} \temp{prove} that this simple greedy approach is guaranteed to find a near-optimal solution provided that the maximization objective is a \emph{submodular}.\footnote{Submodularity is often likened to ``diminishing returns''. For a more technical overview, we defer to \cite{bach2013learning}.} Specifically, if \ensuremath{\mathcal{L}}{} is a normalized submodular function with maximum \(\opt{\ensuremath{\mathcal{L}}}\), then a greedy maximizer is guaranteed to incur at most \(\tfrac{1}{e}\opt{\ensuremath{\mathcal{L}}}\) regret. In the context of BO, submodularity has previously been appealed to when establishing outer-loop regret bounds \cite{srinivas10,contal2013parallel,desautels2014parallelizing}. Like applications of submodularity utilize this property by relating an idealized BO strategy to greedy maximization of a submodular objective (\text{e.g.\@}{} the mutual information between unknown function \(f\) and observations \ensuremath{\mathcal{D}}{}). In contrast, we directly show submodularity for \temp{various} parallel acquisitions functions per se. \note{what's the implication? inner loop + ?} \subheading{Submodular acquisitions} Let \(f^{(i)} \sim p(f | \ensuremath{\mathcal{D}})\) denote the \(i\)-th draw from a belief over possible functions \(f\), \(\ensuremath{\mathbf{y}}^{(i)} = \{f^{(i)}(\ensuremath{\mathbf{x}}) : \ensuremath{\mathbf{x}} \in \ensuremath{\mathbf{X}}\}\) the subset formed when indexing by (i.e. evaluating at) \ensuremath{\mathbf{X}}{} and \(\compl{\ensuremath{\mathbf{y}}}^{(i)}\) its complement. \note{looks more like a mean. consider using $y_{\backslashi}$ instead} By marginalizing out \(\compl{\ensuremath{\mathbf{y}}}\), we can interpret \ensuremath{\mathcal{L}}{} as integrating over functions \(f\) instead of unknown outcomes \(\ensuremath{\mathbf{y}} = f(\ensuremath{\mathbf{X}})\). In the language of the preceding section, \temp{this amounts to interpreting} the Gaussian reparameterization as \(\ensuremath{\phi}(\ensuremath{\mathbf{X}}; \ensuremath{\mathbf{z}}, \ensuremath{\boldsymbol{\theta}}) = \{\ensuremath{\boldsymbol{\mu}}_{j} + \ensuremath{\mathbf{L}}_{j}\ensuremath{\mathbf{z}}\}_{\ensuremath{\mathbf{x}}_{j} \in \ensuremath{\mathbf{X}}}\), where moments \(\ensuremath{\boldsymbol{\theta}} = (\ensuremath{\boldsymbol{\mu}}, \ensuremath{\boldsymbol{\Sigma}})\) are defined over \ensuremath{\mathcal{X}}{} rather than \ensuremath{\mathbf{X}}{}, which now \temp{simply} acts as an index set.\note{jw: sentence may no longer be needed} \note{what was $\ensuremath{\mathcal{X}}$ again?} \note{Why is this interpretation useful, i.e. where is this leading to?} Provided that \ensuremath{\mathcal{L}}{} is myopic, the individual functions \(f^{(i)}\) and corresponding probabilities \(\pi_{i}\) that define the expected utility \(\ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}}; \ensuremath{\mathcal{D}}) = \sum_{i=1}^{\infty} \pi_{i}\ensuremath{\ell}\left(\ensuremath{\mathbf{y}}^{(i)}; \ensuremath{\boldsymbol{\psi}} \right)\) are fixed since they only depend on observations \ensuremath{\mathcal{D}}{}. Moreover, since non-negative linear combinations of submodular functions are submodular, \(\ensuremath{\mathcal{L}}(\makebox[1ex]{\textbf{$\cdot$}}; \ensuremath{\mathcal{D}})\) is submodular so long as the same can be said of functions \(\ensuremath{\ell}\left(f^{(i)}(\makebox[1ex]{\textbf{$\cdot$}}); \ensuremath{\boldsymbol{\psi}})\). \note{can we get a reference for this statement?} As seen in Table~\ref{table:reparameterizations}, the \(\max(\makebox[1ex]{\textbf{$\cdot$}})\) operator acts as a common thread tying together many parallel acquisition functions. We collectively refer to instances, where utility is defined as the maximum of an element-wise function, as \emph{maximal} acquisition functions. \temp{Presuming} that such an acquisition function is myopic, it will typically be submodular. \note{what does 'typically' mean? what are the cornercases?} For a given function \ensuremath{\ell}{}, let \(\mathcal{V}\) be the set of possible utility values with corresponding minimum \ensuremath{v_{\min}}{} and define \(\max(\emptyset) = \ensuremath{v_{\min}}\). Then, given sets \(\ensuremath{\mathcal{A}} \subseteq \ensuremath{\mathcal{B}} \subseteq \mathcal{V}\) and \(\forall v \in \mathcal{V}\), it holds that \begin{align} \max(\ensuremath{\mathcal{A}} \cup \{v\}) - \max(\ensuremath{\mathcal{A}}) \ge \max(\ensuremath{\mathcal{B}} \cup \{v\}) - \max(\ensuremath{\mathcal{B}}). \label{eq:max_submodularity} \end{align} \temp{\emph{Proof:}} We prove the equivalent definition \(\max(\ensuremath{\mathcal{A}}) + \max(\ensuremath{\mathcal{B}}) \ge \max(\ensuremath{\mathcal{A}}~\cup~\ensuremath{\mathcal{B}}) + \max(\ensuremath{\mathcal{A}}~\cap~\ensuremath{\mathcal{B}})\). Without loss of generality, assume \(\max(\ensuremath{\mathcal{A}}~\cup~\ensuremath{\mathcal{B}}) = \max(\ensuremath{\mathcal{A}})\). Then, \(\max(\ensuremath{\mathcal{B}}) \ge \max(\ensuremath{\mathcal{A}}~\cap~\ensuremath{\mathcal{B}})\) since, for any \(\ensuremath{\mathcal{C}} \subseteq \ensuremath{\mathcal{B}}\), \(\max(\ensuremath{\mathcal{B}}) \ge \max(\ensuremath{\mathcal{C}})\). In order to enjoy theoretical guarantees however, the function being greedily maximized must be normalizable, such that the empty set evaluates to zero. Appendix~\ref{appendix:normalizing_utility} discusses this technical condition, which amounts to lower bounding \ensuremath{v_{\min}}{}, in greater detail. \subheading{Iterated forms} So far, we have discussed greedy strategies, which select points \ensuremath{\mathbf{x}}{} by maximizing a joint utility \(\ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}} \cup \{\ensuremath{\mathbf{x}}\}; \ensuremath{\mathcal{D}})\). A closely related approach \temp{[cite]} is to instead maximize the expected value of a marginal utility \(\ensuremath{\mathcal{L}}(\ensuremath{\mathbf{x}}; \new{\ensuremath{\mathcal{D}}})\) when intergating over possible states \(\new{\ensuremath{\mathcal{D}}} \gets \ensuremath{\mathcal{D}} \cup \{(\ensuremath{\mathbf{x}}_{i},\ensuremath{y}_{i})\}_{i=1}^{q}\). \temp{In some cases, this latter approaches have certain benefits: i) closed-form marginal acquisition function allow us to avoid reparameterization, ii) better caching, iii) STUFF}. \temp{These approaches and, more specifically, the associated utility expressions have more in common than might previously have been expected however.} \temp{In most cases, myopic maximal acquisition functions can equivalently be written in both joint and iterated forms. To prove this equivalence, we introduce the following identity while reusing the notation of \eqref{eq:max_submodularity}} \begin{align} \max(\ensuremath{\mathcal{B}}) - \max(\ensuremath{\mathcal{A}}) = \ensuremath{\operatorname{ReLU}}\left(\max(\ensuremath{\mathcal{B}} \setminus \ensuremath{\mathcal{A}}) - \max(\ensuremath{\mathcal{A}})\right). \label{eq:max_growth} \end{align} \emph{Proof:} Since \(\ensuremath{v_{\min}}\) is defined as the smallest possible element of either set, the \ensuremath{\operatorname{ReLU}}'s argument is negative if and only if \ensuremath{\mathcal{B}}'s maximum is a member of \ensuremath{\mathcal{A}}{} (in which case both sides equate to zero). In all other cases, the \ensuremath{\operatorname{ReLU}}{} can be eliminated and \(\max(\ensuremath{\mathcal{B}}) = \max(\ensuremath{\mathcal{B}} \setminus \ensuremath{\mathcal{A}})\) by definition. By rewriting \(\ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}})\) as a telescopic sum of growth function \( \Delta(\ensuremath{\mathbf{x}}; \ensuremath{\mathbf{X}}) = \ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}} \cup \{\ensuremath{\mathbf{x}}\}) - \ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}}) \) and subsequently applying \eqref{eq:max_growth} along with the law of iterated expectation, we arrive at the result that, for \temp{myopic}{\ask{jw: only for myopic?}} maximal acquisition functions, \begin{align} \ensuremath{\mathcal{L}}(\ensuremath{\mathbf{X}}) = \sum_{k=1}^{q} \mathbb{E}_{\ensuremath{\mathbf{y}}_{1:k-1}} \left[ \mathbb{E}_{\ensuremath{y}_{k}} \left[ \ensuremath{\operatorname{ReLU}}\left(\ensuremath{\ell}(\ensuremath{y}_k) - \max \ensuremath{\ell}(\ensuremath{\mathbf{y}}_{1:k-1})\right) \right] \right]. \label{eq:iterated_form} \end{align} Because the right-hand side of \eqref{eq:max_growth} amounts to the improvement of \ensuremath{\mathcal{B}}'s max over that of \ensuremath{\mathcal{A}}, the special case of parallel \ensuremath{\operatorname{EI}}{} further simplifies as \( \ensuremath{\mathcal{L}}_{\abbrScript{\ensuremath{\operatorname{EI}}}}(\ensuremath{\mathbf{X}}; \ensuremath{\mathcal{D}}) = \sum_{k=1}^{q}\mathbb{E}_{\ensuremath{\mathbf{y}}_{1:k-1}} \left[ \ensuremath{\mathcal{L}}_{\abbrScript{\ensuremath{\operatorname{EI}}}}(\ensuremath{\mathbf{x}}_{k}; \ensuremath{\mathcal{D}} \cup \{(\ensuremath{\mathbf{x}}_i,\ensuremath{y}_i)\}_{i=1}^{k-1}) \right] \). \note{we need a concluding sentence/paragraph here. summarize this subsection.}
{ "redpajama_set_name": "RedPajamaArXiv" }
9,588
mcworse has made his choice, and the reviews are rolling in — like a train wreck. cnn's "the cafferty file": jack cafferty: the question about mrs. palin went up on cnn.com about an hour and a half ago. we have received, before this show started, over 6,000 e-mails on this little topic. and the republicans ain't going to like this. rebecca in california: "as a lifelong republican soccer mom living in an affluent community, i was impressed with senator obama's acceptance speech last evening. having my morning latte with a few of my republican friends, i almost spit my coffee when i heard the news. is mccain really putting the best interests of our nation first? to me, he is pandering to women, trying to obtain their vote. it seems he wants another 'trophy' to parade around with. what is wrong with this man?" mitch in michigan writes: "i think mccain's selection of sarah palin as v.p. very similar to bush's nomination of harriet miers to the supreme court. it shows how much a mccain presidency would be just like the bush presidency, with the selection of totally unqualified individuals for high government posts. we've seen the disastrous results of such picks by bush. we can not let mccain continue this saga." doug writes: "had i known that being a hockey mom under 45 and having virtually no political experience was the desired v.p. running mate for mccain, i would have asked my wife to throw her hat into the ring. mccain has just handed the presidency to barack obama." jack cafferty: i'm telling you, there's 11,000 postings. we got a lot of mail to this, wolf. fairbanks daily news-miner: sarah palin's chief qualification for being elected governor was that she was not frank murkowski," cole said of her enormously unpopular predecessor ... anchorage daily news: state house speaker john harris, a republican from valdez, was astonished at the news. he didn't want to get into the issue of her qualifications. "she's old enough," harris said. "she's a u.s. citizen." denver post: i served with hillary clinton. i know hillary clinton. hillary clinton is a friend of mine. you, sarah palin, are no hillary clinton." sorry to steal joe biden's thunder, but we didn't want to wait for the vice presidential candidates' debate to say the obvious. ... in short, the presumptive republican nominee, an old soldier in all senses of that term, drafted the political equivalent of the unknown soldier as his co-pilot. mccain's pick of palin jettisons his attack that obama isn't ready to lead and looks more like a desperate "hail mary" campaign tactic aimed at female voters. register citizen: it's also obvious that mccain, if elected, is counting on surviving a presidential term. journal news: any woman won't do. i've got a message for mccain: hillaryites didn't want a woman; they wanted that woman. if this is his attempt at wooing disaffected hillary backers, he has sold all women short. national review online: ... he is one arrogant sob. mccain is essentially telling the world that he doesn't really need a vice president. it is hard to imagine palin playing the same sort of role that modern vice presidents like gore, bush, cheney, or mondale played. rather, the office would seem poised to return to the "proverbial warm bucket of p***" category. chicago tribune: steve chapman: this decision mocks mccain's seriousness on the issues that are supposed to be his strength. it tells us that he puts his own political fortunes above the safety of the nation. andrew zajac: john mccain may have some work to do ... if the underwhelmed reaction of former maryland gop gov. robert ehrlich is any indication... "i gotta go digest this choice," he mumbled to a couple of acquaintances. new york times: he was looking for someone who was well prepared to fight against international islamic extremism, the transcendent issue of our time. and in the end he decided that in good conscience, he was not going to settle for anyone who had not been commander of a state national guard for at least a year and a half. he put down his foot! mccain's mighty and oft-swung obama swatting hammer of experience has been instantly changed from steel to rubber. presidential scholars say say she appears to be the least experienced, least credentialed person to join a major-party ticket in the modern era. so unconventional was mccain's choice that it left students of the presidency literally "stunned," in the words of joel goldstein, a st. louis university law professor and scholar of the vice presidency. "being governor of a small state for less than two years is not consistent with the normal criteria for determining who's of presidential caliber," said goldstein. in one swift stroke, mccain demolished what had been one of his main arguments against obama. "i think we're going to have to examine our tag line, 'dangerously inexperienced,'" a top mccain official said wryly. i'd feel bad for sarah, but she's only got mcworse to blame. her horrid press only underscores the wisdom of obama's choice of biden. obama's pick was a play to the punditry, whose glowing praise was what he needed in order to promote the "judgment" meme he's been trying to cultivate. mcworse desperately wanted to steal a news cycle from obama, and he's succeeded, though his victory is profoundly pyrrhic: the press he's won is as bad as the press he'd won the last time he tried stealing a news cycle — the night of obama's primary victory speech. and it calls into question his judgment, keeping mcworse right where obama wants him — behind. and mcworse's gambit has now put the once-tricky question of his age up front and center — on his birthday with a vengeance. that's two unforced errors for the price of one. isn't the birthday boy supposed to be receiving gifts instead of giving them? mcworse advisor charlie black: she's going to learn national security at the foot of the master for the next four years, and most doctors think that he'll be around at least that long. wow — sexist and condescending! a twofer! nice to see the republicans can still insult the people they're trying so desperately to win over. Labels: barack obama, democrats, election, mccain, media, republicans, sarah palin
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
3,502
\section{Introduction} In this work, we consider the following linear generalized fourth-order Schr\"odinger equations \begin{align}\label{eq 4SE} \begin{cases} i \partial_t u + {\bf \dotDelta}^2 u = V(x) u , & (t, x) \in {\mathbb{R}} \times {\mathbb{R}}^d ,\\ u(0,x) = u_0(x) , \end{cases} \end{align} where ${\bf \dotDelta}^2 := \sum_{j=1}^d \partial_{x_j}^{\, 4}$. Note that ${\bf \dotDelta}^2$ is a fourth-order differential operator that removes the mixed derivative terms $\partial_{x_k x_k x_j x_j}$ ($k \neq j$) from regular bi-Laplacian $\Delta^2$. We will use a slight abuse of language -- we will refer ${\bf \dotDelta}^2$ as a `separable' fourth-order Schr\"odinger operator in the rest of this work. Our goal is to obtain sufficient conditions on the behavior of the solution $u$ at two different times $t = 0$ and $t = 1$ which guarantee that $u \equiv 0$ is the unique solution of \eqref{eq 4SE}. \subsection{Motivation} \subsubsection{Unique continuation questions} The study on unique continuation for partial differential equations has been historically an active research. This type of results answers the question of what conditions two solutions of a PDE must satisfy in order to be the same. In the context of dispersive PDEs, there are lots of works along this research line, see \cite{Z_KdV, Z_NLS, IK1, IK2, IV_JDE} and reference therein. These kind of uniqueness results are obtained of the form that if assuming two solutions coincide in a large subdomain of ${\mathbb{R}}^d$ at two different times, then they are identical on ${\mathbb{R}}^d$. In Kenig-Ponce-Vega \cite{KPV_CPAM} and Escauriaza-Kenig-Ponce-Vega \cite{EKPV_CPDE}, the authors were motivated by Hardy's uncertainty principle{\footnote{Hardy's uncertainty principle says that for $f : {\mathbb{R}} \to {\mathbb{C}}$ if $f (x) = \mathcal{O} (e^{-\pi A x^2})$ and its Fourier transform $\widehat{f} (\xi) = \mathcal{O} (e^{-\pi B \xi^2})$ with $A,B >0$ and $A B >1$, then $f \equiv 0$. Also if $A =B =1$, then $f (x) =ce^{-\pi x^2}$.}} and initiated a different way to answer the unique continuation question for free Schr\"odinger equations: \begin{align}\label{eq LS} i\partial_t u + \Delta u = V(t,x) u, \quad (t,x) \in [0,T] \times {\mathbb{R}}^d. \end{align} Roughly speaking, they showed that if a solution has {\it fast decay} at two different times, the solution has to be trivial. Accordingly for the nonlinear equation, they deduced uniqueness of the solution from information on the difference of two possible solutions at two different times (not necessary agree to each other in a subdomain). Later, in a series of papers \cite{EKPV_Duke, EKPV_JEMS, EKPV_MRL, KPV_MRL}, the authors have obtained the sharpest {\it fast decay} requirement. In particular, \cite{EKPV_Duke} obtained that for classical Schr\"odinger equations if assuming the potential $V$ satisfies certain boundedness properties (we neglect the assumption on $V$ here), then for a solution $u$ with fast enough decay, one has that the solution has to be trivial. More precisely, we cite \begin{thm}[Theorem 1 in \cite{EKPV_Duke}]\label{thm Sharp} Assume that $u \in C([0,T], L^2 ({\mathbb{R}}^d))$ verifies \eqref{eq LS}, $\alpha , \beta >0$, $T \alpha \beta > 1 /4 $ both $\norm{ e^{\alpha^2 \abs{x}^2} u(0)}_{L^2 ({\mathbb{R}}^d)}$ and $\norm{e^{\beta^2 \abs{x}^2} u(T)}_{L^2 ({\mathbb{R}}^d)}$ are finite, and the potential $V$ satisfies certain boundedness properties. Then $u \equiv 0$. \end{thm} Let us point out that the Gaussian decay in Theorem \ref{thm Sharp} is motivated by the Gaussian weight in the Hardy's uncertainty principle. Also the {\it fast enough decay} measured in such Gaussian weight fashion is sharp, since in the same work \cite{EKPV_Duke}, the authors provided an example on the threshold case ($T\alpha \beta = 1/4$), there exists a nonzero smooth solution with corresponding decay. \subsubsection{Fourth-order Schr\"odinger equations} Fourth-order Schr\"odinger equations with bi-Laplacian were introduced by Karpman \cite{Karpman} and Karpman-Shagalov \cite{KS} to investigate the stabilizing role of higher-order dispersive effects for the soliton instabilities. The following work by Fibich-Ilan-Papanicolaou \cite{FIP} studies the self-focusing and singularity formation of such fourth-order Schr\"odinger equations from the mathematical viewpoint. Analogues of unique continuation questions remain widely open for many high dimensional dispersive equations. Let us point out a recent work by Huang-Huang-Zheng \cite{HHZ} that proved a unique continuation result of the {\it fast decay} for higher order Schr\"odinger equations in one spatial dimension. As for the limitation of their argument applying higher dimensional analogues, the authors commented that the main obstacle is to obtain a suitable higher dimensional Carleman estimate due to a possible phase degeneracy problem in the restriction estimate used in their proof. In this work, our goal is to extend a {\it fast decay} type of unique continuation propriety (initiated in \cite{KPV_CPAM, EKPV_CPDE}) to `separable' fourth-order Schr\"odinger equations, especially in higher dimensions. It is worth mentioning that this type of higher degree generalizations of the Schr\"odinger equation is not uncommon, see for example \cite{ACP} for the same generalization in the context of the study of pointwise convergence of Schr\"odinger operators. \subsection{Main result and its sharpness} Now let us present the main result: \begin{thm}[Main result]\label{thm Main} Assume that $u\in C^1([0,1]: H^3(\mathbb{R}^d))$ solves \eqref{eq 4SE} with $V $ real-valued and $V, \nabla_x V, \nabla_x^{\, 2} V, \nabla_x^{\, 3} V \in L^{\infty} ({\mathbb{R}}^d)$. If there exists $\lambda>0$ and $\alpha >\frac{4}{3}$ such that \begin{align}\label{eq Weight} u(0,x), \, u(1,x)\in H^3(e^{\lambda|x|^\alpha}\,dx), \end{align} then $u(t,x) \equiv 0$. \end{thm} Note that we say that $f \in L^2(e^{\lambda|x|^\alpha}\,dx)$ if \begin{align} \int_{{\mathbb{R}}^d} \abs{f(x)}^2 e^{\lambda|x|^\alpha}\,dx < \infty , \end{align} and that $f \in H^3(e^{\lambda|x|^\alpha}\,dx)$ if $f , \partial_{x_j} f , \partial_{x_j x_k } f, \partial_{x_j x_k, x_p} f \in L^2 (e^{\lambda|x|^\alpha}\,dx)$ for all $j,k,p =1 , \cdots d$. \begin{rmk} With the main result stated, let us make a few comments on the order of exponential weight, $e^{\lambda \abs{x}^{\alpha}}$, $\alpha > \frac{4}{3}$. \begin{enumerate} \item In \cite{EKPV_CPDE}, a unique continuation is obtained for initial and terminal data with a super Gaussian decay, and it was mentioned that such super Gaussian weight in the measurement of the decay of data is closely related to the quadratic weight in the Hardy's uncertainty principle. For the general case, let us recall the following result by Hormander \cite{Hor} that obtained a uncertainty principle with conjugate convex weights. \begin{thm}[Corollary in \cite{Hor}] If $\varphi$ and $\psi$ are conjugate convex functions then \begin{align} \int_{{\mathbb{R}}} \abs{f (x)} e^{\varphi (x)} \, dx <\infty , \quad \int_{{\mathbb{R}}} \abs{\widehat{f}(\xi)} e^{\psi (\xi)} \, d\xi < \infty , \end{align} then $f \equiv 0$. \end{thm} Notice that the fourth-order Schr\"odinger equation has a kernel of $e^{\mathcal{O} (\abs{x}^4)}$ type, hence the analogue of Gaussian weight in our case would be expected to be the conjugate convex weights, $ e^{\mathcal{O} (\abs{x}^{\frac{4}{3}})}$. This implies that our decay power $\alpha > \frac{4}{3}$ is almost sharp. \item The decay rate which is described by $\lambda \abs{x}^{\alpha} $, $\alpha > \frac{4}{3}$ in \eqref{eq Weight} can be made better by replacing the exponential weight in \eqref{eq Weight} by $ \lambda \abs{x}^{\frac{4}{3}}$, where $\lambda > \lambda_0$, for some $\lambda_0 >0$ well chosen. The choice of such $\lambda_0$ can be made using the same argument did in the proof of Theorem 1.1 in \cite{HHZ}. \item The $H^3$ regularity requirement for both solutions and the potential is not necessary. We included it in the statement of Theorem \ref{thm Main} simply because in the proof of it, we need to differentiate the equation a couple of times to be able to use the logarithmic convexity for solutions with derivatives. In fact, by following the strategy in \cite{EKPV_JEMS} and introducing an artificial diffusion into the equation, we should be able to get rid of the regularity assumption. That is, we consider the modified equation (to fix the idea, we considered the $V=0$ case) \begin{align} \partial_t = (A + iB) {\bf \dotDelta}^2 u \end{align} where $A >0$. An inherent decay given by the artificial diffusion $A {\bf \dotDelta}^2$ (see \eqref{eq Ker1}) allows one to do integration by parts freely and prove the solution up to certain derivatives preserves the same decay properties (via a logarithmic convexity) as the initial and terminal data without requiring extra regularity of the solution at all (since no differentiation of the equation is needed). Hence as a byproduct, we could even remove the $H^3$ regularity requirement on the solution. Then by taking the parameter $A \to 0$, a limiting argument gives the unique continuation properties that we desire. We do not plan to introduce any artificial diffusion in our proof, since such term helps mostly in the derivation of preservation of decay properties (i.e. logarithmic convexity) and we will only re-use the proof in \cite{HHZ} instead of proving a new one. \end{enumerate} \end{rmk} \subsection{Difficulties of the proof} We follow the strategy used in \cite{EKPV_CPDE} and prove Theorem \ref{thm Main} via a contradiction argument. There will be the following two ingredients needed in this contradiction argument. \begin{enumerate} \item A logarithmic convexity. We assume that a solution has {\it fast decay} at two times $0$ and $1$, then in order to show the persistence of the {\it fast decay} for times between $0$ and $1$, we need to obtain a convexity inequality. \item A Carleman inequality. The key idea in this contradiction argument is to obtain certain lower bounds of the localized solution that is related to the {\it fast decay} obtained via the logarithmic convexity. Then combining these two steps, we should be able to derive a contradiction. \end{enumerate} For the logarithmic convexity part, we extend the one obtained in \cite{HHZ} to our `separable' fourth-order operator by noticing that our setting shares the same heat kernel estimates after introducing the artificial diffusion. The main difficulty in our paper lies in the proof of the Carleman inequality. To obtain the Carleman inequality for the operator $i\partial_t + {\bf \dotDelta}^2$, we expect to see more terms in the computation of commutators (since the order of derivatives are much higher than the classical Schr\"odinger case). We are forced to use a computational software to simplify the extremely long expression we were faced with, originating from operators such as $\hat{T}f(x):=e^{-\phi(x)}\,(\partial_{x_i})^{4} \left[e^{\phi(x)} f(x)\right]$ and commutators thereof. After such simplifications, we need to manipulate certain $L^2$ inner-product containing many terms in such a way that it was lower-bounded in a positive fashion. We would use this lower bound to derive a new Carleman inequality, which we would later use in our lower bound proof. The bulk of this paper's difficulty lies in the Carleman proof, as outlined above. \subsection{Organization of the paper} In Section \ref{sec Logcon}, we present a logarithmic convexity result for linear solutions to \eqref{eq 4SE} with {\it fast decay}; in Section \ref{sec Carleman}, we show a Carleman inequality in the `separable' fourth-order Schr\"odinger operator; in Section \ref{sec Lower}, we prove a lower bound for the {\it fast decay} solutions; in Section \ref{sec Proof}, we prove the main linear unique continuation result, by combining the lower bound and logarithmic convexity proved in previous sections. \subsection*{Acknowledgement} Both authors would like to thank Gigliola Staffilani for suggesting this problem and Zongyuan Li and Luis Vega for for very insightful conversations. Part of this work was done while the second author was in residence at the Institute for Computational and Experimental Research in Mathematics in Providence, RI, during the Hamiltonian Methods in Dispersive and Wave Evolution Equations program. Z. L. was supported by the Undergraduate Research Opportunities Program at the Massachusetts Institute of Technology. X.Y. was partially supported by an AMS-Simons travel grant. \section{Logarithmic Convexity}\label{sec Logcon} In this section, we discuss a logarithmic convexity for the solution to the linear equation \eqref{eq 4SE}. \subsection{Logarithmic convexity in \cite{HHZ}}\label{ssec Logcon} A key ingredient in \cite{HHZ} is the following logarithmic convexity in any dimensions. \begin{lem}[Fourth-order case in Proposition 1.3 in \cite{HHZ}]\label{lem HHZ} Suppose $V \in L^{\infty}({\mathbb{R}}^d)$ is real-valued, and $u \in C({\mathbb{R}} ; L^2 ({\mathbb{R}}^d))$ solves \begin{align} i \partial_t u - (-\Delta_x)^2 u = V(x)u . \end{align} If there exists $\lambda >0$ such that \begin{align} e^{\lambda \abs{x}^{\frac{4}{3}}} u(0,x) , \quad e^{\lambda \abs{x}^{\frac{4}{3}}} u(1,x) \in L_x^2 ({\mathbb{R}}^d), \end{align} then for $t \in [0,1]$, we have \begin{align} \norm{e^{\lambda \abs{x}^{\frac{4}{3}}} u(t,x)}_{L_x^2} \leq C e^{\frac{t(1-t)}{2} \norm{V}_{L_x^{\infty}}^2} \norm{e^{\lambda \abs{x}^{\frac{4}{3}}} u(0,x)}_{L_x^2}^{1-t} \norm{e^{\lambda \abs{x}^{\frac{4}{3}}} u(1,x)}_{L_x^2}^{t} . \end{align} \end{lem} \begin{rmk} Notice that Lemma \ref{lem HHZ} can not be applied in our equation directly. However, in the proof of Lemma \ref{lem HHZ}, they employed the following estimates on higher order heat kernels. Below we quote only the four order case. \begin{enumerate} \item Let $K(t,x,y)$ be the kernel of fourth-order heat semigroup $\{ e^{-t (\mathcal{P} +V)}\}_{t \geq 0}$, where $\mathcal{P} = (-\Delta)^2$ on $L_x^2 ({\mathbb{R}}^d)$ with $V \in L^{\infty} ({\mathbb{R}}^d)$ is real-valued. It is known in \cite{Da, BD} that there exist $C_1, C_2, \omega_0 > 0$ for the following to hold \begin{align}\label{eq Ker1} \abs{K (t,x,y)} \leq C_1 t^{- \frac{d}{4}} \exp \bracket{- C_2 t^{-\frac{1}{3}} \abs{x-y}^{\frac{4}{3}} + \omega_0 t \norm{V}_{L^{\infty}} } \end{align} for $t >0$, $x, y \in {\mathbb{R}}^d$. \item Using Theorem 2.1 in \cite{ZZ}, it further follows that for some $C_1, C_2, \omega_0 > 0$, the kernel $K(z,x,y)$ of the analytic semigroup $\{ e^{-z (\mathcal{P} +V)}\}_{\re z >0}$, where $\mathcal{P} = (-\Delta)^2$, satisfies \begin{align}\label{eq Ker2} \abs{K (z,x,y)} \leq C_1 (\re z)^{- \frac{d}{4}} \exp \bracket{- C_2 (\re z) (\frac{\abs{x-y}}{\abs{z}})^{\frac{4}{3}} + \omega_0 \re z \norm{V}_{L^{\infty}} } \end{align} for $\re z > 0 $, $x,y \in {\mathbb{R}}^d$. \end{enumerate} In fact, \cite{Da, BD, ZZ} imply that the same decay estimates \eqref{eq Ker1} and \eqref{eq Ker2} apply to $\mathcal{P} = {\bf \dotDelta}^2$, which is the case we are focusing on. \end{rmk} Another essential ingredient in Proposition 1.3 in \cite{HHZ} is the following formal commutator estimates in \cite{EKPV_JEMS}, which will also apply to our setting. \begin{lem}[Lemma 2 in \cite{EKPV_JEMS}]\label{lem JEMS} Suppose that $\mathcal{S}$ is a symmetric operator, $\mathcal{A}$ is skew-symmetric, both are allowed to depend in the time variable, $G$ is a positive function, $f(t,x)$ is a reasonable function, \begin{align} H(t) = \inner{f,f}, \quad D(t) = \inner{\mathcal{S}f, f}, \quad \partial_t \mathcal{S} = \mathcal{S}_t, \quad N(t) = \frac{D(t)}{H(t)}. \end{align} Then \begin{align} \partial_t^2 H = 2 \partial_t \re \inner{\partial_t f - \mathcal{S} f - \mathcal{A} f , f} + 2 \inner{\mathcal{S}_t f + [\mathcal{S} , \mathcal{A}] f , f} \\ + \norm{\partial_t f - \mathcal{A} f + \mathcal{S} f }_{L^2}^2 - \norm{\partial_t f - \mathcal{A} f - \mathcal{S} f}_{L^2}^2 , \end{align} and \begin{align} \dot{N}(t) \geq (\mathcal{S}_t f + [\mathcal{S} , \mathcal{A}] f)/ H - \norm{\partial_t f - \mathcal{A} f - \mathcal{S} f}_{L^2}^2 / (2H) . \end{align} Moreover, if \begin{align} \abs{\partial_t f - \mathcal{A} f - \mathcal{S} f} \leq M_1 \abs{f} + G \quad \text{ in } [0,1] \times {\mathbb{R}} , \quad \mathcal{S}_t + [\mathcal{S} , \mathcal{A}] \geq - M_0 , \end{align} and \begin{align} M_2 = \sup_{t \in [0,1]} \norm{G(t)}_{L^2} / \norm{f(t)}_{L^2} \end{align} is finite, then $\log H(t)$ is logarithmically convex in $[0,1]$ and there is a universal constant $N$ such that for $t \in [0,1]$ \begin{align} H(t) \leq e^{N (M_0 + M_1 + M_2 + M_1^2 + M_2^2)} H(0)^{1-t} H(1)^t. \end{align} \end{lem} \subsection{Logarithmic convexity for `separable' fourth-order Schr\"odinger equation} With the fundamental decay estimates and abstract commutator result verified, using the same calculation in Proposition 1.3 in \cite{HHZ}, we have the following two logarithmic convexity results for \eqref{eq 4SE}. \begin{cor} If $u \in C({\mathbb{R}} ; L^2 ({\mathbb{R}}^d))$ solves \eqref{eq 4SE} and assume the same hypothesis in Lemma \ref{lem HHZ}, one has \begin{align} \norm{e^{\lambda \abs{x}^{\frac{4}{3}}} u(t,x)}_{L_x^2} \leq C e^{\frac{t(1-t)}{2} \norm{V}_{L^{\infty}}^2} \norm{e^{\lambda \abs{x}^{\frac{4}{3}}} u(0,x)}_{L_x^2}^{1-t} \norm{e^{\lambda \abs{x}^{\frac{4}{3}}} u(1,x)}_{L_x^2}^{t} . \end{align} \end{cor} \begin{cor}\label{cor Logcon} If $u$ solves the perturbed equation \begin{align} i\partial_t u + {\bf \dotDelta}^2 u = V u + H \end{align} Lemma \ref{lem JEMS} gives that \begin{align} \norm{e^{\lambda \abs{x}^{\frac{4}{3}}} u(t,x)}_{L_x^2} \leq C e^{\frac{t(1-t)}{2} (\norm{V}_{L^{\infty}}^2 + M^2)} \norm{e^{\lambda \abs{x}^{\frac{4}{3}}} u(0,x)}_{L_x^2}^{1-t} \norm{e^{\lambda \abs{x}^{\frac{4}{3}}} u(1,x)}_{L_x^2}^{t} , \end{align} where \begin{align} M = \sup_{t \in [0,1]} \norm{e^{\lambda \abs{x}^{\frac{4}{3}}}H }_{L_x^2} / \norm{e^{\lambda \abs{x}^{\frac{4}{3}}} u}_{L_x^2}. \end{align} \end{cor} \section{Carleman Inequality}\label{sec Carleman} In this section, we prove a Carleman estimate with a quadratic exponential weights for the `separable' equation \eqref{eq 4SE}, which will be used in Section \ref{sec Proof}. \begin{lem}\label{lem Carleman} Assume that $R>0$ and $\varphi:[0,1] \to \mathbb{R}$ is a smooth function. Let $u (t,x) \in C_c^\infty(\mathbb{R}\times {\mathbb{R}}^d)$ with support contained in the set \begin{align} \{(t,x) \in [0,1] \times {\mathbb{R}}^d : \abs{\frac{x_1}{R}+ \varphi (t))} \geq 1 \} . \end{align} Then there exists $c= c(d, \norm{\varphi'}_{L^{\infty}} , \norm{\varphi''}_{L^{\infty}})$ such that the inequality \begin{align} \norm{e^{\alpha (\frac{x_1}{R}+ \varphi (t))^2 + \alpha \sum_{j=2}^d (\frac{x_j}{R})^2} (i\partial_t + {\bf \dotDelta}^2) u}_{L_{t,x}^2}^2 \geq c \frac{\alpha^7}{R^8} \norm{e^{\alpha (\frac{x_1}{R}+ \varphi (t))^2 + \alpha \sum_{j=2}^d (\frac{x_j}{R})^2} u}_{L_{t,x}^2}^2 \end{align} holds when $\alpha \geq c R^{\frac{4}{3}}$. \end{lem} \begin{proof}[Proof of Lemma \ref{lem Carleman}] Let $f = e^{\Phi^2} u$, where \begin{align} \Phi^2 (t,x) := (\frac{x_1}{R}+ \varphi (t))^2 + \sum_{j=2}^d (\frac{x_j}{R})^2 = \psi^2 + \sum_{j=2}^d (\frac{x_j}{R})^2, \end{align} and \begin{align} \psi = \frac{x_1}{R}+ \varphi (t) . \end{align} Under this change of variables, we reduce to proving \begin{align} \norm{e^{\alpha\Phi^2} (i\partial_t + {\bf \dotDelta}^2) e^{-\alpha\Phi^2}f}_{L_{t,x}^2}^2 \geq c \frac{\alpha^7 }{R^8}\norm{f}_{L_{t,x}^2}^2 . \end{align} We first write \begin{align} e^{\alpha\Phi^2} (i\partial_t + {\bf \dotDelta}^2) e^{-\alpha\Phi^2}f = : \mathcal{S} f + \mathcal{A} f \end{align} where $\mathcal{S}$ and $\mathcal{A}$ are respectively symmetric and anti-symmetric operators (with respect to the $L^2$ norm). A direct computation gives that \begin{itemize} \item For $j =1$ \begin{align} e^{\alpha\Phi^2 } \partial_{x_1}^{\, 4} (e^{\alpha\Phi^2} f) & = \partial_{x_1}^{\, 4} f + \partial_{x_1}^{\, 3} f [- \frac{8 \alpha \psi}{R}] + \partial_{x_1}^{\, 2} f [\frac{24 \alpha^2 \psi^2}{R^2} - \frac{12 \alpha}{R^2}] \\ & \quad + \partial_{x_1}f [-\frac{32 \alpha^3 \psi^3}{R^3} + \frac{48 \alpha^2 \psi}{R^3}] + f[ \frac{16 \alpha^4 \psi^4}{R^4} -\frac{48 \alpha^3 \psi^2}{R^4} + \frac{12 \alpha^2}{R^4}] ; \end{align} \item For $j = 2 , \cdots , d$ \begin{align} e^{\alpha\Phi^2 } \partial_{x_j}^{\, 4} (e^{\alpha\Phi^2} f) & = \partial_{x_j}^{\, 4} f + \partial_{x_j}^{\, 3} [- \frac{8 \alpha x_j}{R^2}] + \partial_{x_j}^{\, 2} [\frac{24 \alpha^2 x_j^2}{R^4} - \frac{12 \alpha}{R^2}] + \partial_{x_j} f [- \frac{32 \alpha^3 x_j^3}{R^6} \\ & \quad + \frac{48 \alpha^2 x_j}{R^4}] + f [ \frac{16 \alpha^4 x_j^4}{R^8} -\frac{48 \alpha^3 x_j^2}{R^6} + \frac{12 \alpha^2}{R^4}] . \end{align} \end{itemize} Then adding these two cases yields \begin{align} e^{\alpha\Phi^2 } {\bf \dotDelta}^2 ( e^{ -\alpha\Phi^2 } f )& = {\bf \dotDelta}^2 f + \partial_{x_1}^{\, 3} f [- \frac{8 \alpha \psi}{R} ] + \sum_{j=2}^d \partial_{x_j}^{\, 3} f [- \frac{8 \alpha x_j}{R^2}] \label{eq 14} \\ & \quad + \partial_{x_1}^{\, 2} f [\frac{24 \alpha^2 \psi^2}{R^2} - \frac{12 \alpha}{R^2}] + \sum_{j=2}^d \partial_{x_j}^{\, 2} f [\frac{24 \alpha^2 x_j^2}{R^4} - \frac{12\alpha}{R^2}] \\ & \quad + \partial_{x_1} f [-\frac{32 \alpha^3 \psi^3}{R^3} + \frac{48 \alpha^2 \psi}{R^3} ] + \sum_{j=2}^d \partial_{x_j} f [- \frac{32 \alpha^3 x_j^3}{R^6} + \frac{48 \alpha^2 x_j}{R^4}] \\ & \quad + f [ \frac{16\alpha^4 \psi^4}{R^4} -\frac{48\alpha^3 \psi^2}{R^4} + \sum_{j=2}^d ( \frac{16 \alpha^4 x_j^4}{R^8} - \frac{48 \alpha^3 x_j^2}{R^6} ) + \frac{12 d \alpha^2 }{R^4}] , \end{align} and \begin{align} e^{\alpha\Phi^2} i\partial_t (e^{-\alpha\Phi^2} f) = e^{\alpha\Phi^2} i (e^{-\alpha\Phi^2} \partial_t f - e^{-\alpha\Phi^2} \alpha (\frac{2x_1}{R} \varphi' + 2 \varphi \varphi')f) = i \partial_t f - i \alpha (\frac{2x_1}{R} \varphi' + 2 \varphi \varphi') f \label{eq 15}. \end{align} We recognize the symmetric and the anti-symmetric parts of the operator in \eqref{eq 14} and \eqref{eq 15}. The symmetric operator $\mathcal{S}$ is given by \begin{align} \mathcal{S}f & = i \partial_t f + {\bf \dotDelta}^2 f \\ & \quad + \partial_{x_1}^{\, 2} f [\frac{24 \alpha^2 \psi^2}{R^2} ] + \sum_{j=2}^d \partial_{x_j}^{\, 2} f [\frac{24 \alpha^2 x_j^2}{R^4} ] \\ & \quad + \partial_{x_1} f [ \frac{48 \alpha^2 \psi}{R^3} ] + \sum_{j=2}^d \partial_{x_j} f [ \frac{48 \alpha^2 x_j}{R^4}] \\ & \quad + f [ \frac{16\alpha^4 \psi^4}{R^4} + \sum_{j=2}^d \frac{16 \alpha^4 x_j^4}{R^8} + \frac{12 d \alpha^2 }{R^4}] . \end{align} We decompose $\mathcal{S}$ into \begin{align} \mathcal{S} f = : \mathcal{S}_t f + \mathcal{S}_{x_1} + \sum_{j=2}^d \mathcal{S}_{x_j} , \end{align} where \begin{align} \mathcal{S}_t f & = i \partial_t f ,\\ \mathcal{S}_{x_1} f & = \partial_{x_1}^{\, 4} f + \partial_{x_1}^{\, 2} f [\frac{24 \alpha^2 \psi^2}{R^2} ] + \partial_{x_1} f [ \frac{48 \alpha^2 \psi}{R^3} ] + f [ \frac{16\alpha^4 \psi^4}{R^4} + \frac{12 \alpha^2 }{R^4}] ,\\ \mathcal{S}_{x_j} f & = \partial_{x_j}^{\, 4} f + \partial_{x_j}^{\, 2} f [\frac{24 \alpha^2 x_j^2}{R^4} ] + \partial_{x_j} f [ \frac{48 \alpha^2 x_j}{R^4} ] + f [ \frac{16 \alpha^4 x_j^4}{R^8} + \frac{12 \alpha^2 }{R^4}], \quad j = 2, \cdots , d. \end{align} Then the anti-symmetric operator $\mathcal{A}$ is given by \begin{align} \mathcal{A}f & = f[- 2 i \alpha (\frac{x_1}{R} \varphi' + \varphi \varphi')]\\ & \quad + \partial_{x_1}^{\, 3} f [- \frac{8 \alpha \psi}{R} ] + \sum_{j=2}^d \partial_{x_j}^{\, 3} f [- \frac{8 \alpha x_j}{R^2}] \\ & \quad + \partial_{x_1}^{\, 2} f [- \frac{12 \alpha}{R^2}] + \sum_{j=2}^d \partial_{x_j}^{\, 2} f [ - \frac{12\alpha}{R^2}] \\ & \quad + \partial_{x_1} f [-\frac{32 \alpha^3 \psi^3}{R^3} ] + \sum_{j=2}^d \partial_{x_j} f [- \frac{32 \alpha^3 x_j^3}{R^6} ] \\ & \quad + f [ -\frac{48\alpha^3 \psi^2}{R^4} + \sum_{j=2}^d ( - \frac{48 \alpha^3 x_j^2}{R^6} ) ] . \end{align} We again decompose $\mathcal{A}$ into \begin{align} \mathcal{A}f =: \mathcal{A}_{t}f + \mathcal{A}_{x_1} f + \sum_{j=2}^d \mathcal{A}_{x_j} f , \end{align} where \begin{align} \mathcal{A}_t f & = f[- 2 i \alpha (\frac{x_1}{R} \varphi' + \varphi \varphi')] ,\\ \mathcal{A}_{x_1} f & = \partial_{x_1}^{\, 3} f [- \frac{8 \alpha \psi}{R} ] + \partial_{x_1}^{\, 2} f [- \frac{12 \alpha}{R^2}] + \partial_{x_1} f [-\frac{32 \alpha^3 \psi^3}{R^3} ] + f [ -\frac{48\alpha^3 \psi^2}{R^4} ] ,\\ \mathcal{A}_{x_j} f & = \partial_{x_j}^{\, 3} f [- \frac{8 \alpha x_j}{R^2}] + \partial_{x_j}^{\, 2} f [ - \frac{12\alpha}{R^2}] + \partial_{x_j} f [- \frac{32 \alpha^3 x_j^3}{R^6} ] + f [ - \frac{48 \alpha^3 x_j^2}{R^6} ] , \quad j = 2, \cdots , d. \end{align} Now we will compute the commutator $[\mathcal{S}, \mathcal{A}]$ using term by term. First notice that $[\mathcal{S}_{x_j}, \mathcal{A}_{x_j}] =0$ when $i \neq j$, and $[\mathcal{S}_t , \mathcal{A}_{x_j}] = [\mathcal{S}_{x_j} , \mathcal{A}_t] =0$, for $i \neq 1$. This observation implies that we only need to compute the following five cases: \begin{enumerate} \item $[\mathcal{S}_t, \mathcal{A}_t]$ \item $[\mathcal{S}_t, \mathcal{A}_{x_1}]$ \item $[\mathcal{S}_{x_1}, \mathcal{A}_t]$ \item $[\mathcal{S}_{x_1}, \mathcal{A}_{x_1}]$ \item $[\mathcal{S}_{x_j}, \mathcal{A}_{x_j}]$ \end{enumerate} For Case (1) in the list, we have \begin{align} [\mathcal{S}_t, \mathcal{A}_t]f & = [i\partial_t , - 2i \alpha (\frac{x_1}{R} \varphi' + \varphi \varphi')]f = 2 \alpha [\partial_t , (\frac{x_1}{R} \varphi' + \varphi \varphi')]f \\ & = 2 \alpha \partial_t ((\frac{x_1}{R} \varphi' + \varphi \varphi')f) - 2 \alpha (\frac{x_1}{R} \varphi' + \varphi \varphi') \partial_t f \\ & = 2 \alpha (\frac{x_1}{R} \varphi'' + \varphi'^2 + \varphi \varphi'' ) f . \end{align} Then consider Case (2), we compute \begin{align} [i \partial_t , \frac{8 \alpha \psi}{R} \partial_{x_1}^{\, 3}]f & = i \partial_t (\frac{8 \alpha \psi}{R} \partial_{x_1}^{\, 3} f) - i \frac{8 \alpha \psi}{R} \partial_{x_1}^{\, 3} ( \partial_t f ) = i \frac{8 \alpha \varphi'}{R} \partial_{x_1}^{\, 3} f , \\ [i \partial_t , \frac{12 \alpha}{R^2} \partial_{x_1}^{\, 2}]f & = 0 ,\\ [i \partial_t , \frac{32 \alpha^3 \psi^3}{R^3} \partial_{x_1}]f & = i \partial_t (\frac{32 \alpha^3 \psi^3}{R^3} \partial_{x_1} f) - i \frac{32 \alpha^3 \psi^3}{R^3} \partial_{x_1} ( \partial_t f ) = i \frac{96 \alpha^3 \psi^2 \varphi'}{R^3} \partial_{x_1} f , \\ [i \partial_t , \frac{48 \alpha^3 \psi^2}{R^4}] f & = i \partial_t ( \frac{48 \alpha^3 \psi^2}{R^4} f ) - i \frac{48 \alpha^3 \psi^2}{R^4} ( \partial_t f ) = i \frac{96 \alpha^3 \psi \varphi'}{R^4} f . \end{align} Then adding them together, we have \begin{align} [\mathcal{S}_t ,\mathcal{A}_{x_1}] f & = -i \frac{8 \alpha \varphi'}{R} \partial_{x_1}^{\, 3} f - i \frac{96 \alpha^3 \psi^2 \varphi'}{R^3} \partial_{x_1} f - i \frac{96 \alpha^3 \psi \varphi'}{R^4} f . \end{align} For Case (3), we compute first \begin{align} [ \partial_{x_1}^{\, 4} , 2 i \alpha (\frac{x_1}{R} \varphi' + \varphi \varphi') ]f & = 2i \alpha \partial_{x_1}^{\, 4} ((\frac{x_1}{R} \varphi' + \varphi \varphi')f) - 2 i\alpha (\frac{x_1}{R} \varphi' + \varphi \varphi') \partial_{x_1}^{\, 4} f = 2i \alpha \frac{4}{R} \varphi' \partial_{x_1}^{\, 3} f , \\ [ \frac{24 \alpha^2 \psi^2}{R^2} \partial_{x_1}^{\, 2} , 2 i \alpha (\frac{x_1}{R} \varphi' + \varphi \varphi') ]f & = 2i \alpha \frac{24 \alpha^2 \psi^2}{R^2} \partial_{x_1}^{\, 2} ((\frac{x_1}{R} \varphi' + \varphi \varphi') f) - 2i \alpha (\frac{x_1}{R} \varphi' + \varphi \varphi') \frac{24 \alpha^2 \psi^2}{R^2} \partial_{x_1}^{\, 2} f \\ & = 2i \alpha \frac{24 \alpha^2 \psi^2}{R^2} \frac{2}{R}\varphi' \partial_{x_1} f ,\\ [ \frac{48 \alpha^2 \psi}{R^3} \partial_{x_1} , 2 i \alpha (\frac{x_1}{R} \varphi' + \varphi \varphi') ]f & = 2i \alpha \frac{48 \alpha^2 \psi}{R^3} \partial_{x_1} ((\frac{x_1}{R} \varphi' + \varphi \varphi')f) - 2i \alpha (\frac{x_1}{R} \varphi' + \varphi \varphi') \frac{48 \alpha^2 \psi}{R^3} \partial_{x_1} f \\ & = 2i \alpha \frac{48 \alpha^2 \psi}{R^3} \frac{1}{R}\varphi' f ,\\ [( \frac{16\alpha^4 \psi^4}{R^4} + \frac{12 \alpha^2 }{R^4}), 2 i \alpha (\frac{x_1}{R} \varphi' + \varphi \varphi') ]f & = 0 . \end{align} Then summing up all the terms above, we have \begin{align} [\mathcal{S}_{x_1} , \mathcal{A}_t] f & = - 2i \alpha \frac{4}{R} \varphi' \partial_{x_1}^{\, 3} f - 2i \alpha \frac{48 \alpha^2 \psi^2}{R^3} \varphi' \partial_{x_1} f - 2i \alpha \frac{48 \alpha^2 \psi}{R^4} \varphi'f . \end{align} Next for Cases (4) and (5) in the list, using Mathematica we get \begin{align}\label{eq 16} \begin{aligned} [\mathcal{S}_{x_1}, \mathcal{A}_{x_1}] f & = f [ - \frac{1536 \alpha^5 \psi^2 }{R^8} + \frac{2048 \alpha^7 \psi^6 }{R^8} ] + [ - \frac{6144 \alpha^5 \psi^3 \partial_{x_1} f}{R^7} ] + [ \frac{384 \alpha^3 \partial_{x_1}^{\, 2} f}{R^6} - \frac{1536 \alpha^5 \psi^4 \partial_{x_1}^{\, 2} f}{R^6} ] \\ & \quad + [ \frac{1536 \psi \partial_{x_1}^{\, 3} f}{R^5}] + [ \frac{384 \alpha^3 \psi^2 \partial_{x_1}^{\, 4} f}{R^4}] + [ - \frac{32 \alpha \partial_{x_1}^{\, 6} f}{R^2}] , \end{aligned} \end{align} and \begin{align}\label{eq 17} \begin{aligned} [\mathcal{S}_{x_j}, \mathcal{A}_{x_j}] f & = f [ - \frac{1536 \alpha^5 x_j^2}{R^{10}} + \frac{2048 \alpha^7 x_j^6 }{R^{14}} ] + [- \frac{6144 \alpha^5 x_j^3 \partial_{x_j} f}{R^{10}} ] + [ \frac{384 \alpha^3 \partial_{x_j}^{\, 2}f}{R^6} - \frac{1536 \alpha^5 x_j^4 \partial_{x_j}^{\, 2} f}{R^{10}} ] \\ & \quad + [\frac{1536 \alpha^3 x_j \partial_{x_j}^{\, 3} f}{R^6} ] + [\frac{384 \alpha^3 x_j^2 \partial_{x_j}^{\, 4} f }{R^6} ] + [- \frac{32\alpha \partial_{x_j}^{\, 6} f}{R^2} ] . \end{aligned} \end{align} Combining \eqref{eq 16} and \eqref{eq 17}, we obtain a part of the commutator $[\mathcal{S} , \mathcal{A}]$ \begin{align} [\mathcal{S}_{x_1} , \mathcal{A}_{x_1} ]f + \sum_{j=2}^d [\mathcal{S}_{x_j}, \mathcal{A}_{x_j}]f & = f [ - \frac{1536 \alpha^5 \psi^2 }{R^8} + \frac{2048 \alpha^7 \psi^6 }{R^8} + \sum_{j=2}^d (- \frac{1536 \alpha^5 x_j^2}{R^{10}} + \frac{2048 \alpha^7 x_j^6 }{R^{14}}) ] \\ & \quad + [ - \frac{6144 \alpha^5 \psi^3 \partial_{x_1} f}{R^7} + \sum_{j=2}^d (- \frac{6144 \alpha^5 x_j^3 \partial_{x_j} f}{R^{10}} )] \\ & \quad + [ \frac{384 \alpha^3 \partial_{x_1}^{\, 2} f}{R^6} - \frac{1536 \alpha^5 \psi^4 \partial_{x_1}^{\, 2} f}{R^6} + \sum_{j=2}^d (\frac{384 \alpha^3 \partial_{x_j}^{\, 2} f}{R^6} - \frac{1536 \alpha^5 x_j^4 \partial_{x_j}^{\, 2} f}{R^{10}} )] \\ & \quad + [ \frac{1536 \psi \partial_{x_1}^{\, 3} f}{R^5} + \sum_{j=2}^d \frac{1536 \alpha^3 x_j \partial_{x_j}^{\, 3} f}{R^6} ] \\ & \quad + [ \frac{384 \alpha^3 \psi^2 \partial_{x_1}^{\, 4} f}{R^4} + \sum_{j=2}^d \frac{384 \alpha^3 x_j^2 \partial_{x_j}^{\, 4} f }{R^6} ] \\ & \quad + [- \frac{32 \alpha \partial_{x_1}^{\, 6} f}{R^2} + \sum_{j=2}^d (- \frac{32\alpha \partial_{x_j}^{\, 6} f}{R^2} )] . \end{align} Together with all the five cases in the list, we arrive at \begin{align} [\mathcal{S},\mathcal{A}] f & = [\mathcal{S}_t , \mathcal{A}_t]f + [\mathcal{S}_t, \mathcal{A}_{x_1}]f + [\mathcal{S}_{x_1} ,\mathcal{A}_t]f + [\mathcal{S}_{x_1} , \mathcal{A}_{x_1} ]f + \sum_{j=2}^d [\mathcal{S}_{x_j}, \mathcal{A}_{x_j}]f\\ & = 2 \alpha (\frac{x_1}{R} \varphi'' + \varphi'^2 + \varphi \varphi'' ) f -i \frac{16 \alpha \varphi'}{R} \partial_{x_1}^{\, 3} f - i \frac{192 \alpha^3 \psi^2 \varphi'}{R^3} \partial_{x_1} f - i \frac{192 \alpha^3 \psi \varphi'}{R^4} f \\ & \quad + f [ - \frac{1536 \alpha^5 \psi^2 }{R^8} + \frac{2048 \alpha^7 \psi^6 }{R^8} + \sum_{j=2}^d (- \frac{1536 \alpha^5 x_j^2}{R^{10}} + \frac{2048 \alpha^7 x_j^6 }{R^{14}}) ] \\ & \quad + [ - \frac{6144 \alpha^5 \psi^3 \partial_{x_1} f}{R^7} + \sum_{j=2}^d (- \frac{6144 \alpha^5 x_j^3 \partial_{x_j} f}{R^{10}} )] \\ & \quad + [ \frac{384 \alpha^3 \partial_{x_1}^{\, 2} f}{R^6} - \frac{1536 \alpha^5 \psi^4 \partial_{x_1}^{\, 2} f}{R^6} + \sum_{j=2}^d (\frac{384 \alpha^3 \partial_{x_j}^{\, 2} f}{R^6} - \frac{1536 \alpha^5 x_j^4 \partial_{x_j}^{\, 2} f}{R^{10}} )] \\ & \quad + [ \frac{1536 \psi \partial_{x_1}^{\, 3} f}{R^5} + \sum_{j=2}^d \frac{1536 \alpha^3 x_j \partial_{x_j}^{\, 3} f}{R^6} ] \\ & \quad + [ \frac{384 \alpha^3 \psi^2 \partial_{x_1}^{\, 4} f}{R^4} + \sum_{j=2}^d \frac{384 \alpha^3 x_j^2 \partial_{x_j}^{\, 4} f }{R^6} ] \\ & \quad + [- \frac{32 \alpha \partial_{x_1}^{\, 6} f}{R^2} + \sum_{j=2}^d (- \frac{32\alpha \partial_{x_j}^{\, 6} f}{R^2} )] . \end{align} Next we compute the inner product $\inner{f, [\mathcal{S},\mathcal{A}]f}$ and our aim is to find an lower bound of it. \begin{align} & \inner{f, [\mathcal{S},\mathcal{A}]f}_{L_{t,x}^2 \times L_{t,x}^2} \label{eq SA}\\ & = \iint \bar{f} [2 \alpha (\frac{x_1}{R} \varphi'' + \varphi'^2 + \varphi \varphi'' ) f -i \frac{16 \alpha \varphi'}{R} \partial_{x_1}^{\, 3} f - i \frac{192 \alpha^3 \psi^2 \varphi'}{R^3} \partial_{x_1} f - i \frac{192 \alpha^3 \psi \varphi'}{R^4} f ] \, dx \, dt \label{eq SA0}\\ & \quad + \iint \abs{f}^2 [ - \frac{1536 \alpha^5 \psi^2 }{R^8} + \frac{2048 \alpha^7 \psi^6 }{R^8} + \sum_{j=2}^d (- \frac{1536 \alpha^5 x_j^2}{R^{10}} + \frac{2048 \alpha^7 x_j^6 }{R^{14}}) ] \, dx \, dt \label{eq SA1}\\ & \quad + \iint \bar{f} \partial_{x_1} f [ - \frac{6144 \alpha^5 \psi^3}{R^7} ]+ \sum_{j=2}^d \bar{f}\partial_{x_j} f [- \frac{6144 \alpha^5 x_j^3 }{R^{10}} ] \, dx \, dt \label{eq SA2}\\ & \quad + \iint \bar{f}\partial_{x_1}^{\, 2} f [ \frac{384 \alpha^3 }{R^6} - \frac{1536 \alpha^5 \psi^4 }{R^6}] + \sum_{j=2}^d \bar{f}\partial_{x_j}^{\, 2} f [\frac{384 \alpha^3 }{R^6} - \frac{1536 \alpha^5 x_j^4 }{R^{10}}] \, dx \, dt \label{eq SA3}\\ & \quad + \iint \bar{f}\partial_{x_1}^{\, 3} f[ \frac{1536 \psi }{R^5}] + \sum_{j=2}^d \bar{f} \partial_{x_j}^{\, 3} f [\frac{1536 \alpha^3 x_j }{R^6} ] \, dx \, dt \label{eq SA4}\\ & \quad + \iint \bar{f} \partial_{x_1}^{\, 4} f [ \frac{384 \alpha^3 \psi^2 }{R^4}] + \sum_{j=2}^d \bar{f}\partial_{x_j}^{\, 4} f [ \frac{384 \alpha^3 x_j^2 }{R^6} ] \, dx \, dt \label{eq SA5}\\ & \quad + \iint \bar{f} \partial_{x_1}^{\, 6} f [- \frac{32 \alpha }{R^2}] + \sum_{j=2}^d \bar{f} \partial_{x_j}^{\, 6} f [- \frac{32\alpha }{R^2} ] \, dx \, dt .\label{eq SA6} \end{align} To this end, we need to preform a few integration by parts term by term. \noindent {\it \underline{Term \eqref{eq SA0}.}} First take the last term in \eqref{eq SA0}. An integration by parts yields \begin{align} \iint - i \frac{192 \alpha^3 \psi^2 \varphi'}{R^3} \bar{f} \partial_{x_1} f \, dx \, dt = \iint i \frac{192 \alpha^3 \psi^2 \varphi'}{R^3} \partial_{x_1} \overline{f} f \, dx \, dt + \iint i \frac{192 \alpha^3 \psi \varphi'}{R^4} 2 \abs{f}^2 \, dx \, dt , \end{align} which implies \begin{align} \iint i \frac{192 \alpha^3 \psi \varphi'}{R^4} \abs{f}^2 \, dx \, dt = - \iint i\frac{192 \alpha^3 \psi^2 \varphi'}{R^3} \re (\bar{f} \partial_{x_1} f) \, dx \, dt. \end{align} Hence the last two terms in \eqref{eq SA0} is given by \begin{align} & \quad \iint - i \frac{192 \alpha^3 \psi^2 \varphi'}{R^3} \bar{f} \partial_{x_1} f - i \frac{192 \alpha^3 \psi \varphi' }{R^4} \abs{f}^2 \, dx \, dt \\ & = \iint - i \frac{192 \alpha^3\psi^2 \varphi' }{R^3} \bar{f} \partial_{x_1} f \, dx \, dt + \iint i \frac{192 \alpha^3 \psi^2 \varphi'}{R^3} \re (\bar{f} \partial_{x_1} f) \, dx \, dt\\ & = \iint \frac{192 \alpha^3 \psi^2 \varphi'}{R^3} \im (\bar{f} \partial_{x_1} f) \, dx \, dt . \end{align} The second term in \eqref{eq SA0} can be written as \begin{align} \iint -i \frac{16 \alpha \varphi'}{R} \bar{f} \partial_{x_1}^{\, 3} f \, dx \, dt & = \iint i \frac{16 \alpha \varphi'}{R} \partial_{x_1}\bar{f} \partial_{x_1}^{\, 2} f \, dx \, dt = \iint - i \frac{16 \alpha \varphi'}{R} \partial_{x_1}^{\, 2} \bar{f} \partial_{x_1} f \, dx \, dt \end{align} which implies \begin{align} \iint -i \frac{16 \alpha \varphi'}{R} \bar{f} \partial_{x_1}^{\, 3} f \, dx \, dt = \iint - \frac{16 \alpha \varphi'}{R} \im (\partial_{x_1}\bar{f} \partial_{x_1}^{\, 2} f ) \, dx \, dt . \end{align} Therefore, \begin{align} \eqref{eq SA0} & = \iint \abs{f}^2 [2 \alpha ((\frac{x_1}{R} + \varphi)\varphi'' + \varphi'^2 ) ] - \frac{16 \alpha \varphi'}{R} \im (\partial_{x_1}\bar{f} \partial_{x_1}^{\, 2} f) + \frac{192 \alpha^3 \psi^2 \varphi'}{R^3} \im (\bar{f} \partial_{x_1}f) \, dx \, dt . \end{align} \noindent {\it \underline{Term \eqref{eq SA1}.}} Rewriting it in the following form \begin{align} \eqref{eq SA1} & = \iint \abs{f}^2 [ - \frac{1536 \alpha^5 \psi^2 }{R^8} + \frac{2048 \alpha^7 \psi^6 }{R^8} + \sum_{j=2}^d (- \frac{1536 \alpha^5 x_j^2}{R^{10}} + \frac{2048 \alpha^7 x_j^6 }{R^{14}}) ] \, dx \, dt\\ & = \iint \abs{f}^2 [- \frac{1536 \alpha^5 }{R^8} \Phi^2 + \frac{2048 \alpha^7 }{R^8} (\psi^6 + \sum_{j=2}^d (\frac{x_j}{R})^6) ] \, dx \, dt . \end{align} \noindent {\it \underline{Terms \eqref{eq SA2} and \eqref{eq SA3}.}} Again integrating by parts yields \begin{align} \eqref{eq SA3} & = \iint \bar{f}\partial_{x_1}^{\, 2} f [ \frac{384 \alpha^3 }{R^6} - \frac{1536 \alpha^5 \psi^4 }{R^6}] + \sum_{j=2}^d \bar{f}\partial_{x_j}^{\, 2} f [\frac{384 \alpha^3 }{R^6} - \frac{1536 \alpha^5 x_j^4 }{R^{10}}] \, dx \, dt\\ & = \iint \abs{\partial_{x_1}f}^2 [ - \frac{384 \alpha^3 }{R^6} + \frac{1536 \alpha^5 \psi^4 }{R^6}] + \sum_{j=2}^d \abs{\partial_{x_j}f}^2 [ - \frac{384 \alpha^3 }{R^6} + \frac{1536 \alpha^5 x_j^4 }{R^{10}}] \, dx \, dt\\ & \quad + \iint \bar{f}\partial_{x_1}f [\frac{6144 \alpha^5 \psi^3}{R^7}] + \sum_{j=2}^d \bar{f} \partial_{x_j} f [\frac{6144 \alpha^5 x_j^3}{R^{10}}] \, dx \, dt . \label{eq 19} \end{align} Noticing that the last two terms in \eqref{eq 19} is the opposite of \eqref{eq SA2}, hence \begin{align} \eqref{eq SA2} + \eqref{eq SA3} & = \iint \abs{\partial_{x_1}f}^2 [ - \frac{384 \alpha^3 }{R^6} + \frac{1536 \alpha^5 \psi^4 }{R^6}] + \sum_{j=2}^d \abs{\partial_{x_j}f}^2 [ - \frac{384 \alpha^3 }{R^6} + \frac{1536 \alpha^5 x_j^4 }{R^{10}}] \, dx \, dt . \end{align} \noindent {\it \underline{Terms \eqref{eq SA4} and \eqref{eq SA5}.}} Preforming integration by parts again, we write \begin{align} \text{Term 1 in }\eqref{eq SA5} & = \iint \bar{f} \partial_{x_1}^{\, 4} f \frac{384 \alpha^3 \psi^2 }{R^4} \, dx \, dt\\ & = \iint - \partial_{v} \bar{f} \partial_{x_1}^{\, 3}f \frac{384 \alpha^3 \psi^2 }{R^4} - \bar{f} \partial_{x_1}^{\, 3} f \frac{768 \alpha^3 \psi }{R^5} \, dx \, dt\\ & = \iint \abs{\partial_{x_1}^{\, 2} f}^2 \frac{384 \alpha^3 \psi^2 }{R^4} + \partial_{x_1} \bar{f} \partial_{x_1}^{\, 2}f \frac{768 \alpha^3 \psi }{R^5} - \bar{f} \partial_{x_1}^{\, 3} f \frac{768 \alpha^3 \psi }{R^5} \, dx \, dt\\ & = \iint \abs{\partial_{x_1}^{\, 2} f}^2 \frac{384 \alpha^3 \psi^2 }{R^4} - \bar{f} \partial_{x_1}^{\, 3} f \frac{768 \alpha^3 \psi }{R^5} - \bar{f} \partial_{x_1}^{\, 2} f \frac{768 \alpha^3 }{R^6} - \bar{f} \partial_{x_1}^{\, 3} f \frac{768 \alpha^3 \psi }{R^5} \, dx \, dt\\ & = \iint \abs{\partial_{x_1}^{\, 2} f}^2 \frac{384 \alpha^3 \psi^2 }{R^4} - \bar{f} \partial_{x_1}^{\, 3} f \frac{1536 \alpha^3 \psi }{R^5} + \abs{\partial_{x_1} f}^2 \frac{768 \alpha^3 }{R^6} \, dx \, dt , \label{eq 20} \end{align} and \begin{align} \text{Term 2 in }\eqref{eq SA5} & = \iint \bar{f} \partial_{x_j}^{\, 4} f \frac{384 \alpha^3 x_j^2 }{R^6} \, dx \, dt \\ & = \iint - \partial_{x_j} \bar{f} \partial_{x_j}^{\, 3} f \frac{384 \alpha^3 x_j^2 }{R^6} - \bar{f} \partial_{x_j}^{\, 3} f \frac{768\alpha^3 x_j}{R^6} \, dx \, dt\\ & = \iint \abs{\partial_{x_j}^{\, 2} f}^2 \frac{384 \alpha^3 x_j^2 }{R^6} + \partial_{x_j} \bar{f} \partial_{x_j}^{\, 2} f \frac{768\alpha^3 x_j}{R^6} - \bar{f} \partial_{x_j}^{\, 3} f \frac{768\alpha^3 x_j}{R^6} \, dx \, dt\\ & = \iint \abs{\partial_{x_j}^{\, 2} f}^2 \frac{384 \alpha^3 x_j^2 }{R^6} - \bar{f} \partial_{x_j}^{\, 3} f \frac{768\alpha^3 x_j}{R^6} - \bar{f} \partial_{x_j}^{\, 2} \frac{768\alpha^3 }{R^6} - \bar{f} \partial_{x_j}^{\, 3} f \frac{768\alpha^3 x_j}{R^6} \, dx \, dt\\ & = \iint \abs{\partial_{x_j}^{\, 2} f}^2 \frac{384 \alpha^3 x_j^2 }{R^6} - \bar{f} \partial_{x_j}^{\, 3} f \frac{1536\alpha^3 x_j}{R^6} + \abs{\partial_{x_j} f}^2 \frac{768\alpha^3 }{R^6} \, dx \, dt . \label{eq 21} \end{align} Noticing that the second terms in \eqref{eq 20} and \eqref{eq 21} show up in \eqref{eq SA4} with the opposite sign, we have \begin{align} \eqref{eq SA4} + \eqref{eq SA5} & = \iint \abs{\partial_{x_1}^{\, 2} f}^2 \frac{384 \alpha^3 \psi^2 }{R^4} + \abs{\partial_{x_1} f}^2 \frac{768 \alpha^3 }{R^6} + \sum_{j=2}^d \abs{\partial_{x_j}^{\, 2} f}^2 \frac{384 \alpha^3 x_j^2 }{R^6} + \sum_{j=2}^d \abs{\partial_{x_j} f}^2 \frac{768\alpha^3 }{R^6} \, dx \, dt . \end{align} \noindent {\it \underline{Term \eqref{eq SA6}.}} Finally, we write \begin{align} \eqref{eq SA6} & = \iint \bar{f} \partial_{x_1}^{\, 6} f [ - \frac{32 \alpha }{R^2}] + \sum_{j=2}^d \bar{f}\partial_{x_j}^{\, 6} f [- \frac{32\alpha }{R^2}] \, dx \, dt = \iint \abs{\partial_{x_1}^{\, 3} f}^2 \frac{32\alpha }{R^2} + \sum_{j=2}^d \abs{\partial_{x_j}^{\, 3} f}^2 \frac{32\alpha }{R^2} \, dx \, dt . \end{align} Therefore, summarizing Terms \eqref{eq SA0} - \eqref{eq SA6} we conclude that \begin{align} \inner{f , [\mathcal{S}, \mathcal{A}]f}_{L_{t,x}^2 \times L_{t,x}^2} & = \iint \abs{f}^2 [2 \alpha ((\frac{x_1}{R} + \varphi)\varphi'' + \varphi'^2 ) ] - \frac{16 \alpha \varphi'}{R} \im (\partial_{x_1}\bar{f} \partial_{x_1}^{\, 2} f) + \frac{192 \alpha^3 \psi^2 \varphi'}{R^3} \im (\bar{f} \partial_{x_1}f) \, dx \, dt\\ & \quad + \iint \abs{f}^2 [- \frac{1536 \alpha^5 }{R^8} \Phi^2 + \frac{2048 \alpha^7 }{R^8} (\psi^6 + \sum_{j=2}^d (\frac{x_j}{R})^6) ] \, dx \, dt\\ & \quad + \iint \abs{\partial_{x_1}f}^2 [ \frac{384 \alpha^3 }{R^6} + \frac{1536 \alpha^5 \psi^4 }{R^6} ] + \sum_{j=2}^d \abs{\partial_{x_j}f}^2 [ \frac{384 \alpha^3 }{R^6} + \frac{1536 \alpha^5 x_j^4 }{R^{10}} ] \, dx \, dt\\ & \quad + \iint \abs{\partial_{x_1}^{\, 2} f}^2 \frac{384 \alpha^3 \psi^2 }{R^4} + \sum_{j=2}^d \abs{\partial_{x_j}^{\, 2} f}^2 \frac{384 \alpha^3 x_j^2 }{R^6} \, dx \, dt\\ & \quad + \iint \abs{\partial_{x_1}^{\, 3} f}^2 \frac{32\alpha }{R^2} + \sum_{j=2}^d \abs{\partial_{x_j}^{\, 3} f}^2 \frac{32\alpha }{R^2} \, dx \, dt . \end{align} To estimates the mixed terms above, we employ Cauchy–Schwarz inequality to control \begin{align}\label{eq 7} \begin{aligned} & \abs{\im (\partial_{x_1} \overline{f} \partial_{x_1}^{\,2} f)} \leq \omega \abs{\partial_{x_1} f}^2 + \frac{1}{\omega} \abs{\partial_{x_1}^{\, 2} f}^2 , \\ & \abs{\im (\overline{f} \partial_{x_1} f)} \leq \rho \abs{f}^2 + \frac{1}{\rho} \abs{\partial_{x_1} f}^2 , \end{aligned} \end{align} where $\omega , \rho$ are arbitrary constants, and will be chosen later. Now we obtain a lower bound of $\inner{f , [\mathcal{S}, \mathcal{A}] f}$ \begin{align} & \inner{f , [\mathcal{S}, \mathcal{A}] f}_{L_{t,x}^2 \times L_{t,x}^2} \\ & \quad \geq \iint \abs{f}^2 [- \frac{1536 \alpha^5 }{R^8} \Phi^2 + \frac{2048 \alpha^7 }{R^8} (\psi^6 + \sum_{j=2}^d (\frac{x_j}{R})^6) + 2 \alpha ((\frac{x_1}{R} + \varphi)\varphi'' + \varphi'^2 ) - \rho \frac{192 \alpha^3 \psi^2 \varphi'}{R^3}] \, dx \, dt \label{eq In0}\\ & \qquad + \iint \abs{\partial_{x_1}f}^2 [ \frac{384 \alpha^3 }{R^6} + \frac{1536 \alpha^5 \psi^4 }{R^6} - \omega \frac{16 \alpha \varphi'}{R} - \frac{192 \alpha^3 \psi^2 \varphi'}{\rho R^3} ] + \sum_{j=2}^d \abs{\partial_{x_j}f}^2 [ \frac{384 \alpha^3 }{R^6} + \frac{1536 \alpha^5 x_j^4 }{R^{10}} ] \, dx \, dt \label{eq In1}\\ & \qquad + \iint \abs{\partial_{x_1}^{\, 2} f}^2 [\frac{384 \alpha^3 \psi^2 }{R^4} - \frac{16 \alpha \varphi'}{ \omega R} ]+ \sum_{j=2}^d \abs{\partial_{x_j}^{\, 2} f}^2 \frac{384 \alpha^3 x_j^2 }{R^6} \, dx \, dt \label{eq In2}\\ & \qquad + \iint \abs{\partial_{x_1}^{\, 3} f}^2 \frac{32\alpha }{R^2} + \sum_{j=2}^d \abs{\partial_{x_j}^{\, 3} f}^2 \frac{32\alpha }{R^2} \, dx \, dt . \label{eq In3} \end{align} By choosing $\rho \sim R^{\frac{1}{3}}$, $\omega \sim R^{\frac{1}{3}}$ (with suitable constants) and $\alpha = c R^{\frac{4}{3}}$ (where $c = c(d, \norm{\varphi'}_{L^{\infty}} , \norm{\varphi''}_{L^{\infty}})$), we can make the first term and last two terms \eqref{eq In0} absorbed by the second term in \eqref{eq In0}; and hide the terms with negative signs in \eqref{eq In1} and \eqref{eq In2} by the first positive terms in \eqref{eq In1} and \eqref{eq In2} respectively. Then we finally obtain \begin{align} \inner{f, [\mathcal{S}, \mathcal{A}] f }_{L_{t,x}^2 \times L_{t,x}^2} \geq c \frac{\alpha^7 }{R^8}\norm{f}_{L_{t,x}^2}^2 . \end{align} Recall $f = e^{\alpha\Phi^2} u$ and $\Phi^2 (t,x)= (\frac{x_1}{R}+ \varphi (t))^2 + \sum_{j=2}^d (\frac{x_j}{R})^2$, then we have \begin{align} \norm{e^{\alpha\Phi^2} (i\partial_t + {\bf \dotDelta}^2) u}_{L_{t,x}^2}^2 & \geq \iint \bar{f} [\mathcal{S}, \mathcal{A}] f \, dx \, dt \geq c \frac{\alpha^7 }{R^8}\norm{f}_{L_{t,x}^2}^2 = c \frac{\alpha^7}{R^8} \norm{e^{\alpha\Phi^2} u}_{L_{t,x}^2}^2, \end{align} which finishes the proof of Lemma \ref{lem Carleman}. \end{proof} \section{Lower Bound Estimates}\label{sec Lower} In this section, we show a lower bound for solutions with {\it fast decay}, which will be used in the next section to prove the main theorem within a contradiction argument. \begin{lem}[Lower bounds]\label{lem Lower bound} Let $u\in C^1([0,1] : H^3(\mathbb{R}^d))$ solve \eqref{eq 4SE} and let $B_R :=\{x\in\mathbb{R}^d:|x|\le R\}$. If \begin{align} \int_{1/2-1/8}^{1/2+1/8}\int_{B_1}|u(t,x)|^2 \, dx \, dt & \ge 1 , \label{eq B1}\\ \int_0^1 \int_{{\mathbb{R}}^d} \sum_{j=1}^d |u|^2+|\partial_{x_j} u|^2 + |\partial_{x_j}^{\, 2} u|^2 + |\partial_{x_j}^{\, 3} u|^2\,dx \, dt & \leq A^2 \label{eq A} \\ \norm{V}_{L_{t,x}^{\infty} ([0,1] \times {\mathbb{R}}^d)} & \leq L \label{eq L} \end{align} for some $A, L > 0$. Then there exists $R_0 = R_0 (d, A, L) >0$ and $c= c(d)$ such that \begin{align}\label{eq Gamma} \gamma(R):=\left(\int_0^1 \int_{R-1<|x|<R} \sum_{j=1}^d |u|^2+|\partial_{x_j} u|^2 + |\partial_{x_j}^{\, 2} u|^2 + |\partial_{x_j}^{\, 3} u|^2\,dx \, dt\right)^{\frac{1}{2}} \ge c R^{\frac{2}{3}} e^{-c R^{\frac{4}{3}}}, \end{align} for all $R > R_0$. \end{lem} \begin{proof}[Proof of Lemma \ref{lem Lower bound}] Let us start with introducing some cutoff functions \begin{itemize} \item Let $\eta (x) \in C^\infty ({\mathbb{R}})$ be non-decreasing, radial and such that \begin{align} \eta (x) = \begin{cases} 0, & \text{if } \abs{x} \leq 1, \\ 1, & \text{if } \abs{x} \geq 2. \end{cases} \end{align} We also define $\eta_R(x) = \eta (\frac{x}{R})$. \item Let $\theta_R(x)\in C^\infty ({\mathbb{R}}^d)$ be non-decreasing, radial and such that \begin{align} \theta_R (x) = \begin{cases} 1, & \text{if } \abs{x} \leq R-1, \\ 0, & \text{if } \abs{x} \geq R. \end{cases} \end{align} We also define $\varphi_R(x) = 1- \theta_R(x)$. \item Let $\varphi \in C^\infty $ satisfy $\varphi(t) \in [0,3]$ on $[0,1]$ and \begin{align} \varphi (t) = \begin{cases} M , & \text{on } [\frac{1}{2}-\frac{1}{8},\frac{1}{2}+\frac{1}{8}], \\ 0, & \text{on } [0,\frac{1}{4}]\cup [\frac{3}{4}, 1]. \end{cases} \end{align} \end{itemize} We now apply Lemma \ref{lem Carleman} to the function \begin{align} f(t,x) & = \sigma(t,x) u(t,x), \end{align} where \begin{align} \sigma(t,x) = \theta_R(x)\eta\left( \frac{x_1}{R} + \varphi(t) \right) \end{align} Let $\Phi (t,x) : = \abs{\frac{x}{R}+\varphi(t) \vec{e}_1}$. We see that $f$ is compactly supported on $ [0,1] \times {\mathbb{R}}^d$ and satisfies the hypothesis of Lemma \ref{lem Carleman}. In fact, we notice that $f =u$ on $[\frac{1}{2} - \frac{1}{8}, \frac{1}{2} + \frac{1}{8}] \times B_{R-1}$, and $\Phi (t,x) \geq \abs{\frac{x_1}{R} + \varphi(t) } \geq M-1 $, where $\vec{e}_1$ is the unit vector $(1,0, \cdots ,0)$. Using our hypothesis \eqref{eq B1} we see that \begin{align}\label{eq Lower} \norm{e^{\alpha \Phi^2}f}_{L_{t,x}^2} \ge e^{(M-1)^2\alpha}\norm{u}_{L_{t,x}^2( [\frac{1}{2}-\frac{1}{8},\frac{1}{2}+\frac{1}{8}] \times B_1)}\ge e^{(M-1)^2\alpha}. \end{align} By the chain rule, we write \begin{align}\label{eq 9} \begin{aligned} & (i\partial_t+ {\bf \dotDelta}^2 -V)f(t,x)\\ & = \sum_{k=0}^3 \parenthese{ \eta \sum_{j=2}^d C_k \partial_{x_j}^{\, 3-k} \theta_R (x) + \sum_{l=1}^{3-k} D_k \partial_{x_1}^{\, l} \eta \partial_{x_1}^{\, 3-k-l} \theta_R (x) } \partial_{x_j}^{\, k} u + i \varphi' \eta \theta_R\partial_{x_1} . \end{aligned} \end{align} There are two types of terms in the decomposition of \eqref{eq 9}. \begin{itemize} \item $\eta \times \partial \theta$ type: the support of such type is $[0,1] \times B_R \setminus B_{R-1} $ and $1 \leq \abs{\frac{x_1}{R} + \varphi(t) } \leq M+1$, hence we know $\Phi^2 \in [1, (M+1)^2 + (d-1)]$. \item $\partial \eta \times \theta_R $ or $\partial \eta \times \partial \theta_R $ type: the support of this type is $ [0,1] \times B_R $ and $1 \leq \abs{\frac{x_1}{R} + \varphi (t)} \leq 2$, hence $\Phi^2 \in [1, 4 + (d-1) ]$. \end{itemize} Combining Lemma \ref{lem Carleman} with the computation of $(i\partial_t+ {\bf \dotDelta}^2 )f(t,x)$ in \eqref{eq 9}, we see that \begin{align} c^{\frac{1}{2}}\frac{\alpha^{\frac{7}{2}}}{ R^4} \norm{e^{\alpha \Phi^2}f}_{L_{t,x}^2} &\le \norm{e^{\alpha \Phi^2} (i\partial_t+{\bf \dotDelta}^2)f}_{L_{t,x}^2} \leq \norm{e^{\alpha \Phi^2} (i\partial_t+{\bf \dotDelta}^2 -V)f}_{L_{t,x}^2} + \norm{e^{\alpha \Phi^2} V f}_{L_{t,x}^2} \\ & \leq \norm{\text{RHS of \eqref{eq 9}}}_{L_{t,x}^2} + L \norm{e^{\alpha \Phi^2} f}_{L_{t,x}^2} \\ & \leq e^{((M+1)^2+(d-1)) \alpha} \gamma(R) + e^{(3+d) \alpha} A + e^{(3+d) \alpha} \gamma(R) + L \norm{e^{\alpha \Phi^2} f}_{L_{t,x}^2}. \label{eq 10} \end{align} Choosing $\alpha=cR^{\frac{4}{3}}$, we can hide the third term on the right-hand side of \eqref{eq 10} when $R \geq R_0(d, L)$. Then utilizing our lower bound for $\norm{e^{\alpha \Phi^2}f}_{L_{t,x}^2}$ in \eqref{eq Lower}, we deduce that \begin{align} c R^{\frac{2}{3}} e^{(M-1)^2 \alpha} \leq e^{((M+1)^2+d-1) \alpha} \gamma (R) + e^{(3+d) \alpha} A \\ c R^{\frac{2}{3}} e^{((M-1)^2- (3+d)) \alpha} \leq e^{((M+1)^2+d-1 - (3+d)) \alpha } \gamma (R) + A . \end{align} Then by requiring $(M-1)^2 \geq (3+d)$ (note here an equality will work), we can hide $A$ into left-hand side of this inequality, hence for all $R\ge R_0(d, A,L)$, \begin{align} c R^{\frac{2}{3}} e^{((M-1)^2- (3+d)) \alpha} \leq e^{((M+1)^2+d-1 - (3+d)) \alpha } \gamma (R) . \end{align} Simplifying the two exponentials, we get \begin{align} c R^{\frac{2}{3}} \leq e^{((M+1)^2+d-1 -(M-1)^2 ) \alpha } \gamma (R) \end{align} which implies \begin{align} \gamma(R) \geq c R^{\frac{2}{3}} e^{-c R^{\frac{4}{3}}} , \end{align} for all $R\ge R_0(d, A,L)$. \end{proof} \section{Proof of Main Theorem}\label{sec Proof} In this section, we prove the main theorem by contradiction. \begin{proof}[Proof of Theorem \ref{thm Main}] If $u \not\equiv 0$, we can assume that $u$ satisfies the hypotheses of Lemma \ref{lem Lower bound}, after a translation, dilation and and multiplication by a constant. Thus, there exist a constant $R_0(d, A, L)$ depending on $A, L$ (which are defined as in \eqref{eq A} and \eqref{eq L} respectively) \begin{align}\label{eq 22} \gamma(R) \ge c R^{\frac{2}{3}} e^{-c R^{\frac{4}{3}}} \qquad \text{for all } R\ge R_0(d, A,L). \end{align} We apply Corollary \ref{cor Logcon} to the previous equation to find that \begin{align} \sup_{t\in[0,1]} \int |u(t,x)|^2 e^{\lambda|x|^{\frac{4}{3}}} \,dx &\le e^{\frac{t(1-t)}{2}( \norm{V}_{\infty}^2 + M^2) } \left( \norm{u_0}_{L^2(e^{\lambda|x|^\frac{4}{3}} \, dx)} +\norm{u_1}_{L^2(e^{\lambda|x|^\frac{4}{3}} \, dx)} \right) . \end{align} In preparation for another application of Corollary \ref{cor Logcon}, we calculate \begin{align} (i\partial_t+ {\bf \dotDelta}^2)\partial_{x_j} u & = V (\partial_{x_j}u) + \partial_{x_j} V u =: V (\partial_{x_j} (u )) + H_{1,j} ,\\ (i\partial_t+ {\bf \dotDelta}^2)\partial_{x_j}^{\, 2} u & = V (\partial_{x_j}^{\, 2} u) + 2 (\partial_{x_j} V ) (\partial_{x_j} u) + (\partial_{x_j}^{\, 2} V) u =: V (\partial_{x_j}^{\, 2} (u )) + H_{2,j} ,\\ (i\partial_t+ {\bf \dotDelta}^2)\partial_{x_j}^{\, 3} u & = V (\partial_{x_j}^{\, 3} u) + 3 (\partial_{x_j} V) (\partial_{x_j}^{\,2} u) + 3 (\partial_{x_j}^{\,2} V) (\partial_{x_j} u) + (\partial_{x_j}^{\, 3} V) u = :V (\partial_{x_j}^{\, 3} (u )) + H_{3,j} . \end{align} We repeat this application of corollary up to the equations of $(i\partial_t+{\bf \dotDelta}^2)\partial_{x_j}^2 u$, $(i\partial_t+{\bf \dotDelta}^2)\partial_{x_j}^3 u$ and combine all the conclusions (here is where we need $ u \in H^3(\mathbb{R})$) to see that \begin{align} \begin{aligned} & \quad \sup_{t\in[0,1]} \int \left(\sum_{j=1}^d |u|^2+|\partial_{x_j} u|^2 + |\partial_{x_j}^{\, 2} u|^2 + |\partial_{x_j}^{\, 3} u|^2\right) e^{2\lambda \abs{x}^{\frac{4}{3}}}\,dx \, dt \\ & \le \left( \norm{ u_0}_{H^3(e^{\lambda|x|^\frac{4}{3}} \, dx)} +\norm{u_1}_{H^3(e^{\lambda|x|^\frac{4}{3}} \, dx)} \right) \\ & \qquad \times \parenthese{ \sum_{j=1}^d e^{ \frac{t(1-t)}{2}( \norm{V}_{L^{\infty}}^2 +M_{1,j}^2)} + e^{\frac{t(1-t)}{2}( \norm{V}_{L^{\infty}}^2 +M_{2,j}^2)} + e^{\frac{t(1-t)}{2}( \norm{V}_{L^{\infty}}^2 +M_{3,j}^2)} }\\ & < C (\lambda) , \label{eq H3_bound} \end{aligned} \end{align} where \begin{align} M_{1,j} & = \sup_{t \in [0,1]} \norm{e^{\lambda \abs{x}^{\frac{4}{3}}}H_{1,j} }_{L_x^2} / \norm{e^{\lambda \abs{x}^{\frac{4}{3}}} \partial_{x_j} u}_{L_x^2} , \\ M_{2,j} & = \sup_{t \in [0,1]} \norm{e^{\lambda \abs{x}^{\frac{4}{3}}}H_{2.j} }_{L_x^2} / \norm{e^{\lambda \abs{x}^{\frac{4}{3}}} \partial_{x_j}^{\, 2} u}_{L_x^2}, \\ M_{3,j} & = \sup_{t \in [0,1]} \norm{e^{\lambda \abs{x}^{\frac{4}{3}}}H_{3,j} }_{L_x^2} / \norm{e^{\lambda \abs{x}^{\frac{4}{3}}} \partial_{x_j}^{\, 3} u}_{L_x^2}. \end{align} Here note that all $M_{k,j}$'s, $k=1,2,3$ are finite. Take $M_{1,1}$ as an example. We see that its denominator $\norm{e^{\lambda \abs{x}^{\frac{4}{3}}} \partial_{x_1} u}_{L_x^2} $ will be non-zero. In fact, if it is zero at some time $t_0$, then $\partial_{x_1} u (t_0)$ will be zero, which contradicts with the non-trivial solution assumption. Also $H_{1,1}$ in its numerator is almost a constant multiple of $u$, hence bounded. Then combining \eqref{eq 22} and definition of $\gamma (R)$ in \eqref{eq Gamma}, we get \begin{align} c R^{\frac{2}{3}} e^{-c R^{\frac{4}{3}}} \leq \parenthese{\int_0^1 \int_{R-1<|x|<R} \sum_{j=1}^d |u|^2+|\partial_{x_j} u|^2 + |\partial_{x_j}^{\, 2} u|^2 + |\partial_{x_j}^{\, 3} u|^2\,dx \, dt}^{\frac{1}{2}} = \gamma(R) , \end{align} then inserting the weight $e^{\lambda \abs{x}^{\frac{4}{3}}}$ into the integral, we write \begin{align} c R^{\frac{2}{3}} e^{-c R^{\frac{4}{3}}} e^{\lambda (R-1)^{\frac{4}{3}}} \leq \parenthese{\int_0^1 \int_{R-1<|x|<R} \left(\sum_{j=1}^d |u|^2+|\partial_{x_j} u|^2 + |\partial_{x_j}^{\, 2} u|^2 + |\partial_{x_j}^{\, 3} u|^2 \right) e^{2\lambda \abs{x}^{\frac{4}{3}}} \,dx \, dt}^{\frac{1}{2}} . \end{align} Using the bound given by \eqref{eq H3_bound}, we obtain \begin{align} c R^{\frac{2}{3}} e^{-c R^{\frac{4}{3}}} e^{\lambda (R-1)^{\frac{4}{3}}} \leq \sup_{t\in[0,1]} \parenthese{\int_0^1 \int_{R^d} \left(\sum_{j=1}^d |u|^2+|\partial_{x_j} u|^2 + |\partial_{x_j}^{\, 2} u|^2 + |\partial_{x_j}^{\, 3} u|^2\right) e^{2\lambda \abs{x}^{\frac{4}{3}}}\,dx \, dt }^{\frac{1}{2}}< C (\lambda) \end{align} which implies \begin{align} c R^{\frac{2}{3}} e^{-c R^{\frac{4}{3}}} e^{\lambda (R-1)^{\frac{4}{3}}} < C (\lambda) . \end{align} Now taking $\lambda$ large enough such that $\lambda > c =c(d)$, then sending $R \to \infty$, we get a contradiction. Hence, $u \equiv 0$. This completes the proof of Theorem \ref{thm Main}. \end{proof}
{ "redpajama_set_name": "RedPajamaArXiv" }
8,048
DetectionValidator::DetectionValidator(int detections, double duration) { durationThresh = duration; detsReq = detections; Reset(); } bool DetectionValidator::Validate() { if (++detections == 1) { startTime = ros::Time::now(); } if (ros::Time::now().toSec() - startTime.toSec() > durationThresh) { valid = detections >= detsReq; detections = 0; attempts++; } return valid; } bool DetectionValidator::IsValid() { return valid; } int DetectionValidator::GetDetections() { return detections; } int DetectionValidator::GetAttempts() { return attempts; } void DetectionValidator::Reset() { valid = false; detections = 0; attempts = 0; } ErrorValidator::ErrorValidator(double errorThresh, double duration) { durationThresh = duration; this->errorThresh = errorThresh; Reset(); } bool ErrorValidator::Validate(double error) { if (abs(error) <= errorThresh) { if (outsideRange) startTime = ros::Time::now(); outsideRange = false; return ros::Time::now().toSec() - startTime.toSec() > durationThresh; } else outsideRange = true; return false; } // FIX: This will not always mean the error is valid if you only look at the time difference bool ErrorValidator::IsValid() { return ros::Time::now().toSec() - startTime.toSec() > durationThresh; } void ErrorValidator::Reset() { outsideRange = true; }
{ "redpajama_set_name": "RedPajamaGithub" }
4,245
{"url":"https:\/\/lotos-kids.rs\/ao9nl7\/7773b3-microsoft-mathematics-for-windows-10","text":"Make sure to select the version that matches your operating system for \u2026 Microsoft Mathematics on Windows 10 I have problems with running MS Mathematics with Windows 10. Jump to page: ksio89. In Softonic we scan all the files hosted on our platform to assess and avoid any potential harm for your device. All you need to do is use your smartphone camera to click a photo of a math problem to solve it\u2013be it \u2026 We do not encourage or condone the use of this program if it is in violation of these laws. As a PWA, it showcased Microsoft's commitment to pioneering a new category of apps. The following versions: 4.0 and 2.0 are the most frequently downloaded ones by the program users. Free microsoft mathematics for windows10 download software at UpdateStar - If you're doing anything technical, think Mathematica--not just for computation, but for modeling, simulation, visualization, development, documentation, and deployment.Why Mathematica? Microsoft Mathematics provides a set of mathematical tools that help students get school work done quickly and easily. Try Math Solver. Order of Operations. Microsoft Mathematics Download for Windows. Prime Factorization. To continue promising you a malware-free catalog of programs and apps, our team has integrated a Report Software feature in every catalog page that loops your feedback back to us. Software Downloads. Based on our scan system, we have determined that these flags are possibly false positives. Microsoft Mathematics provides a set of mathematical tools that help students get school work done quickly and easily. This free software was originally developed by Microsoft. Fractions. This free PC software is compatible with Windows XP\/Vista\/7\/8\/10 environment, 32-bit version. Prime Factorization. Old versions of Microsoft Mathematics: Current version: Microsoft Mathematics 4.0.1108 (17.59 MB) Release status: Major release with minor updates and a revised build. Microsoft Mathematics free download. Greatest Common Factor. 03\/11\/2020; 2 minutes to read; a; P; W; m; v; In this article Overview . Least Common Multiple. Online math solver with free step by step solutions to algebra, calculus, and other math problems. Download Microsoft Mathematics latest version 2021 Mean. With Microsoft Mathematics, students can learn to solve equations step-by-step while gaining a better understanding of fundamental concepts in pre-algebra, algebra, trigonometry, physics, chemistry, and calculus. Get help on the web or with our math app. How do I make Microsoft Mathematics 4.0 run on Windows 10? Download Microsoft Mathematics latest version 2021 Downloads: 3,740 \u2026 Greatest Common Factor. Prime Factorization. Flag any particular issues you may encounter and Softonic will address those concerns as soon as possible. Microsoft Mathematics has the most features for a scientific calculator. Windows Apps for Math . This download was scanned by our built-in antivirus and was rated as safe. There are other ways of solving a quadratic equation instead of using the quadratic formula, such as factoring (direct factoring, grouping, AC method), completing the square, graphing and others. Microsoft Mathematics Add-in 2013 for Microsoft Word and Microsoft OneNote makes it easy to plot graphs in 2D and 3D, solve equations or inequalities, and simplify algebraic expressions in your Word documents and OneNote notebooks. Mode. Matplotlib. Exponents. y = 3x + 4. y = 3 x + 4. We\u2019d like to highlight that from time to time, we may miss a potentially malicious software program. Microsoft Mathematics provides a graphing calculator that plots in 2D and 3D, step-by-step equation solving, and useful tools to help students with math and science studies. Download Microsoft Mathematics for Windows now from Softonic: 100% safe and virus free. Professional and intuitive audio mixing software for personal computers. Moreover, it is a straightforward software, by which you can create both complex and simple mathematical equations easily. Developed and maintained by Microsoft, it is primarily targeted at students as a learning tool. This software program is potentially malicious or may contain unwanted bundled software. Calculators. See screenshots, read the latest customer reviews, and compare ratings for Math Solver. Mean. In elementary algebra, the quadratic formula is a formula that provides the solution(s) to a quadratic equation. Solve. 16 Reviews. x 2 \u2212 4 x \u2212 5 = 0. Arithmetic. Our team performs checks each time a new file is uploaded and periodically reviews files to confirm or update their status. Microsoft Math Solver. Microsoft Mathematics (64-bit) Free. Details about Microsoft Mathematics: File size: 17.59 MB Size on disk: 18,446,936 B File name: MSetup_x86.exe \u2026 Microsoft Mathematics provides a set of mathematical tools that help students get school work done quickly and easily. Looking for Mac version? Type a math problem. It means a benign program is wrongfully flagged as malicious due to an overly broad detection signature or algorithm used in an antivirus program. Mixed Fractions. Greatest Common Factor. This comprehensive process allows us to set a status for any downloadable file as follows: It\u2019s extremely likely that this software program is clean. If the download doesn't start automatically, click here. All templates are available on the toolbar of the software. Online math solver with free step by step solutions to algebra, calculus, and other math problems. Solve Practice Download. Meet Microsoft Math Solver, an all-in-one app that helps with a wide range of mathematical concepts\u2013from elementary arithmetic and quadratic equations to calculus and statistics. Copyright SOFTONIC INTERNATIONAL S.A. \u00a9 1997-2021 - All rights reserved. Microsoft Mathematics provides a graphing calculator that plots in 2D and 3D, step-by-step equation solving, and useful tools to help students with math and science studies. Fractions. Topics Pre-Algebra. Download. Files \"ConversionTool.exe\" and \"TriangleTool.exe\" is responding and working, but a file \"MathApp.exe \"DOES NOT WORK. Microsoft Mathematics. Microsoft Mathematics provides a set of mathematical tools that help students get school work done quickly and easily. Radicals Algebra. It\u2019s highly probable this software program is malicious or contains unwanted bundled software. Online math solver with free step by step solutions to algebra, calculus, and other math problems. Mit Microsoft Mathematics k\u00f6nnen Sch\u00fcler lernen, Gleichungen schrittweise zu l\u00f6sen, indem sie bessere Kenntnisse grunds\u00e4tzlicher Konzepte in Algebra, Trigonometrie, Physik, Chemie und Infinitesimalrechnung erwerben. Edit your audio collection with Soft4Boost Audio Studio. Radicals Algebra. Version: V4 Minor: 0 Build: 1108: Distributed as: EXE (Windows Executable) Compatible with: Windows 10, Windows 8, Windows 7, Windows Vista, Windows XP The interface in Microsoft Mathematics is clean and easy to use. The installation process of Microsoft Mathematics is rather simple and in order to install it on Windows 10, you just need to do the following: Download Microsoft Mathematics. The size of the latest installation package available for download is 17.6 MB. Quadratic equation { x } ^ { 2 } - 4 x - 5 = 0 . We have scanned the file and URLs associated with this software program in more than 50 of the world's leading antivirus services; no possible threat has been detected. Combine \u2026 Microsoft Math Solver. Cambria has been designed for on-screen reading and to look good when printed at small sizes. For version 4.0, it was released as a free downloadable product and was called Microsoft Mathematics 4.0. Microsoft Math, also known as Microsoft Mathematics, is a freely downloadable educational program, designed for mobile platforms (iOS and Android), that allows users to solve math and science problems. It has very even spacing and proportions. Get step-by-step solutions to your math problems. The Math Software category contains programs that is used to model, analyze or calculate numeric or geometric data. Microsoft Mathematics provides a set of mathematical tools that help students get school work done quickly and easily. Microsoft Mathematics enth\u00e4lt eine Reihe mathematischer Tools, mit denen Sch\u00fcler Hausaufgaben schnell und einfach erledigen k\u00f6nnen. Solve Practice Download. With Microsoft Mathematics , students can learn to solve equations step-by-step while gaining a better understanding of fundamental concepts in pre-algebra, algebra, trigonometry, physics, chemistry, and calculus. matplotlib is a python 2D plotting library which produces publication quality figures in a \u2026 fx-Calc is a dot net application, capable to define, visualize and calculate scientific functions. The way users see it, there are some disadvantages: the software is too complicated and is complicated. Armadillo. Solve Practice. This download is licensed as freeware for the Windows (32-bit and 64-bit) operating system on a laptop or desktop PC from calculators without restrictions. Microsoft Math was originally released as a bundled part of Microsoft Student. Laws concerning the use of this software vary from country to country. Microsoft Mathematics provides a set of mathematical tools that help students get school work done quickly and easily. Always available from the Softonic servers. Based on our scan system, we have determined that these flags are likely to be real positives. Solve Practice. Protect files with banking-level encryption. The this tool installer is commonly called MathApp.exe, ConversionTool.exe, explorer.exe, hitungan aljabar.exe or IconB3939460.exe etc. Topics Pre-Algebra. 699 * 533. Get help on the web or with our math app. 4 \\sin \\theta \\cos \\theta = 2 \\sin \\theta. Comprehensive high-school and college mathematical application. Run the installer and follow instructions, No thanks, continue to download Microsoft Mathematics. Combine \u2026 Order of Operations. Combine \u2026 Mean. Filter. Download Microsoft Mathematics for Windows now from Softonic: 100% safe and virus free. The worst I can say about it is that it does so much that it might not seem as accessible as the simpler SpeedCrunch. Covers equations, graphs, and many math subjects. In elementary algebra, the quadratic formula is a formula that provides the solution(s) to a quadratic equation. Microsoft Math Solver. It was then available as a standalone paid version starting with version 3.0. After three pay-to-use versions, Microsoft Mathematics, formerly Microsoft Math, has been launched as a free application.This pack of maths tools has been developed to help students in all kinds of calculations, helping them to solve equations while they acquire great knowledge about the fundamental concepts of algebra, trigonometry, physics and calculus. Mixed Fractions. Over the years, Microsoft released four versions of Microsoft Mathematics, and with the last version being released in early 2011, many users are concerned whether this application still works on Windows 10. Get help on the web or with our math app. Mixed Fractions. Like other free math equation editors in this list, this one also comes with a lot of templates. ... How to Make a Web Note on webpages in Microsoft Edge in Windows 10 Microsoft Edge is a new web browser that is available across the Windows 10 device family. There are other ways of solving a quadratic equation instead of using the quadratic formula, such as factoring (direct factoring, grouping, AC method), completing the square, graphing and others. Solve Practice Download. Microsoft Mathematics for Windows XP, Windows 7, Windows 8 and Windows 10 in 32-bit or 64-bit. Mode. 4 sin \u03b8 cos \u03b8 = 2 sin \u03b8. Not for your OS. Microsoft Mathematics 4.0.1108 is available to all software users as a free download for Windows 10 PCs but also without a hitch on Windows 7 and Windows 8. microsoft math for windows 10 free download - Windows 10, Microsoft Teams for Windows 10, Apple Safari, and many more programs A little help from Microsoft for your math calculations. You may want to check out more software, such as Microsoft Mathematics Add-In for Word and OneNote, SpeQ Mathematics or Hotfix for Microsoft.NET Framework KB932471, which might be related to Microsoft Mathematics. More than 671 downloads this month. Advertisement. Solve Practice. Trigonometry. Exponents. Thanks for the time and attention. The name and logo of Softonic are registered trademarks of SOFTONIC INTERNATIONAL S.A. Our software library provides a free download of Microsoft Mathematics 4.0. Mit Microsoft Mathematics k\u00f6nnen Sch\u00fcler lernen, Gleichungen schrittweise zu l\u00f6sen, indem sie bessere Kenntnisse grunds\u00e4tzlicher Konzepte in Algebra, Trigonometrie, Physik, Chemie und Infinitesimalrechnung erwerben. When will the Microsoft Mathematics Add-In for Word and OneNote for tablets running Windows 10 become available? More than 671 downloads this month. Get step-by-step solutions to your math problems. Least Common Multiple. Order of Operations. Powerful and Intuitive Video Converter for Personal Computers, An excellent scientific calculator with plotting capabilities, Basic but elegant free calculator for Windows 8, A lightweight calculator with scientific functions, A Free Science & education program for Windows. Exponents. very good, advanced, math app. Microsoft Mathematics 4.0.1108 is available to all software users as a free download for Windows 10 PCs but also without a hitch on Windows 7 and Windows 8. Microsoft Mathematics is an educational program designed for solving science and mathematic problems. Radicals Algebra. Microsoft Mathematics could be defined as the Windows calculator on steroids. I am unable to \u2026 Page 1 of 2 1 2 Last. Microsoft Math Solver for Desktop seemed to be a great iteration of the Microsoft Mathematics 4.0 product that once came with Microsoft Student. The program is included in Education Tools. Microsoft Math Solver. Make sure to select the version that matches your operating system for best performance. Posts : 141. Least Common Multiple. I am unable to complete the installation process in a new SP4. MathMagic Lite is yet another free math equation editor software for Windows. Microsoft Mathematics enth\u00e4lt eine Reihe mathematischer Tools, mit denen Sch\u00fcler Hausaufgaben schnell und einfach erledigen k\u00f6nnen. Fractions. Run the setup file and follow the instructions. fast C++ library for linear algebra & scientific computing. The installation process of Microsoft Mathematics is rather simple and in order to install it on Windows 10, you just need to do the following: Download Microsoft Mathematics. Microsoft Mathematics Add-In for Word and OneNote, Hotfix for Microsoft .NET Framework (KB932471), Microsoft Office 2010: Primary Interop Assemblies Redistributable. This powerful app lets you perform all sorts of calculations, from adding up two numbers to plotting a complex 3D equation. Cambria Math font family. This download is licensed as freeware for the Windows (32-bit and 64-bit) operating system on a laptop or desktop PC from calculators without restrictions. Install .NET Framework 3.5 before Microsoft Mathematics 4 as shown in the picture below: Control Panel \\ Programs \\ Turn windows features on or off \\ .NET Framework 3.5 (includes .NET 2.0 and 3.0) Microsoft Mathematics 4 Download: Microsoft Mathematics provides a graphing calculator that plots in 2D and 3D, step-by-step equation solving, and useful tools to help students with math and science studies. Business and Development. With Microsoft Mathematics , students can learn to solve equations step-by-step while gaining a better understanding of fundamental concepts in pre-algebra, algebra, trigonometry, physics, chemistry, and calculus. Microsoft Mathematics Add-In When will the Microsoft Mathematics Add-In for Word and OneNote for tablets running Windows 10 become available? Topics Pre-Algebra. S.V. Linear equation. Mode. Download this app from Microsoft Store for Windows 10, Windows 8.1. Please help me. With version 3.0, ConversionTool.exe, explorer.exe, hitungan aljabar.exe or IconB3939460.exe etc an antivirus program frequently downloaded by. Algebra, calculus, and other math problems a standalone paid version starting with version 3.0 available as learning. X 2 \u2212 4 x \u2212 5 = 0 from time to time, may! Maintained by Microsoft, it is primarily targeted at students as a learning tool version. A file MathApp.exe does not work is malicious or contains unwanted bundled software erledigen k\u00f6nnen for! Math was originally released as a free download of Microsoft Student customer reviews, many! Aljabar.Exe or IconB3939460.exe etc how do I make Microsoft Mathematics has the most frequently downloaded ones the. Many math subjects category of apps seem as accessible as the simpler SpeedCrunch Lite is yet another free math editor. Latest version 2021 Microsoft Mathematics 4.0 targeted microsoft mathematics for windows 10 students as a free downloadable product and rated. In a new file is uploaded and periodically reviews files to confirm or update their status of! Program users and follow instructions, No thanks, continue to download Microsoft Mathematics has the most features for scientific... Laws concerning the use of this software program is wrongfully flagged as due. A new file is uploaded and periodically reviews files to confirm or update their status to download Microsoft provides. By the program users likely to be real positives unwanted bundled software accessible as the Windows calculator on steroids from! Is potentially malicious or contains unwanted bundled software from time to time, we have determined that flags! Name and logo of Softonic are registered trademarks of Softonic are registered trademarks of INTERNATIONAL! Accessible as the Windows calculator on steroids is in violation of these laws yet another free math editor. All rights reserved - 4 x - 5 = 0 Mathematics 4.0 file is and. Our software library provides a microsoft mathematics for windows 10 of mathematical tools that help students get school done... Math app MS Mathematics with Windows 10 microsoft mathematics for windows 10 paid version starting with version 3.0 version 4.0, it Microsoft!, from adding up two numbers to plotting a complex 3D equation audio software. From country to country free download of Microsoft Student in elementary algebra, the quadratic formula a. Simpler SpeedCrunch with Windows 10 of apps I can say about it is that it might seem. X } ^ { 2 } - 4 x - 5 = 0 to country is a dot application! Of calculations, from adding up two numbers to plotting a complex 3D equation in Microsoft Mathematics provides set... Create both complex and simple mathematical equations easily to \u2026 Microsoft math originally. S highly probable this software program is wrongfully flagged as malicious due to an overly broad detection signature or used! Windows calculator on steroids a complex 3D equation matches your operating system for best performance scanned by built-in. P ; W ; m ; v ; in this article Overview for download is MB! Installer is commonly called microsoft mathematics for windows 10, ConversionTool.exe, explorer.exe, hitungan aljabar.exe or IconB3939460.exe etc this if!, there are some disadvantages microsoft mathematics for windows 10 the software update their status of Microsoft.... A benign program is wrongfully flagged as malicious due to an overly broad detection or! Is compatible with Windows XP\/Vista\/7\/8\/10 environment, 32-bit version instructions, No thanks continue..., there are some disadvantages: the software sure to select the version that your.: 4.0 and 2.0 are the most frequently downloaded ones by the program.! Of calculations, from adding up two numbers to plotting a complex 3D equation periodically reviews to! Called Microsoft Mathematics provides a set of mathematical tools that help students get school work done and. Program users MathApp.exe, ConversionTool.exe, explorer.exe, hitungan aljabar.exe or IconB3939460.exe etc can! Been designed for on-screen reading and to look good when printed at small sizes we \u2019 like... Library provides a free download of Microsoft Mathematics provides a free downloadable and. Their status W ; m ; v ; in this list, this one also with! Any potential harm for your math calculations - all rights reserved with version 3.0 be real positives our. The installation process in a new file is uploaded and periodically reviews files confirm... \u2212 5 = 0 it, there are some disadvantages: the software is too and! And easily, there are some disadvantages: the software Mathematics with Windows XP\/Vista\/7\/8\/10 environment, 32-bit.! Safe and virus free to \u2026 Microsoft math was originally released as a free of. In an antivirus program part of Microsoft Student by our built-in antivirus was!, visualize and calculate scientific functions harm for your device 32-bit version, this one also comes with lot. Students get school work done quickly and easily originally released as a learning tool we \u2019 d to. 17.6 MB Mathematics enth\u00e4lt eine Reihe mathematischer tools, mit denen Sch\u00fcler Hausaufgaben schnell einfach! Simple mathematical equations easily not encourage or condone the use of this software from., it showcased Microsoft 's commitment to pioneering a new file is uploaded and periodically reviews files to confirm update... Aljabar.Exe or IconB3939460.exe etc Softonic are registered trademarks of Softonic are registered trademarks of Softonic INTERNATIONAL S.A. 1997-2021! Run the installer and follow instructions, No thanks, continue to download Microsoft Mathematics for Windows einfach erledigen.! Hausaufgaben schnell und einfach erledigen k\u00f6nnen numeric or geometric data not encourage or condone the use of this if... Are some disadvantages: the software is compatible with Windows 10 I have problems with running Mathematics... Tool installer is commonly called MathApp.exe, ConversionTool.exe, explorer.exe, hitungan aljabar.exe or IconB3939460.exe etc C++ for! { x } ^ { 2 } - 4 x - 5 = 0 rights reserved up numbers... The latest customer reviews, and other math problems 3D equation the simpler SpeedCrunch be real positives lot. Confirm or update their status false positives a PWA, it is primarily targeted at students as a PWA it. With running MS Mathematics with Windows 10 performs checks each time a new file is uploaded periodically... Math was originally released as a free downloadable product and was called Microsoft enth\u00e4lt... Tool installer is commonly called MathApp.exe, ConversionTool.exe, explorer.exe, hitungan aljabar.exe or IconB3939460.exe.. Are some disadvantages: the software to complete the installation process in a new of! Confirm or update their status as the simpler SpeedCrunch on our scan system, we determined... Graphs, and other math problems to select the version that matches operating! And compare ratings for math solver with free step by step solutions to algebra, the quadratic formula is dot... Safe and virus free ; m ; v ; in this list, this one also comes with a of! Complicated and is complicated lot of templates calculate numeric microsoft mathematics for windows 10 geometric data free of. Scientific computing built-in antivirus and was rated as safe vary from country to.. Vary from country to country from adding up two numbers to plotting a 3D. Now from Softonic: microsoft mathematics for windows 10 % safe and virus free define, and... Of this software vary from country to country another free math equation software. Follow instructions, No thanks, continue to download Microsoft Mathematics provides a set mathematical! The this tool installer is commonly called MathApp.exe, ConversionTool.exe, explorer.exe, hitungan aljabar.exe or etc! It, there are some disadvantages: the software is compatible with Windows 10 cos \u03b8 2... To country is malicious or may contain unwanted bundled software with running MS with! Is in violation of these laws bundled part of Microsoft Student model, analyze calculate... Run on Windows 10 I have problems with running MS Mathematics with Windows 10 numbers to plotting complex. Step by step solutions to algebra, calculus, and many math subjects versions: 4.0 and 2.0 are most! To algebra, calculus, and compare ratings for math solver scan all the files hosted on our platform assess. Was rated as microsoft mathematics for windows 10 complete the installation process in a new category of apps ; ;... In this list, this one also comes with a lot of templates '' and TriangleTool.exe! Printed at small sizes virus free that these flags are likely to be real.... Highlight that from time to time, we may miss a potentially malicious or may contain unwanted bundled software straightforward. Harm for your device available for download is 17.6 MB } - 4 x - 5 0!, click here name and logo of Softonic INTERNATIONAL S.A schnell und einfach erledigen k\u00f6nnen math app library! '' is responding and working, but a file MathApp.exe not. Not work concerning the use of this program if it is primarily targeted at students as a learning tool may... Use of this program if it is primarily targeted at students as a PWA, it is that it so... Equations easily or update their status screenshots, read the latest installation package for. Mathematic problems the most features for a scientific calculator for linear algebra & scientific computing good when at. The download does n't start automatically, click here these flags are likely to be real positives environment 32-bit! Working, but a file MathApp.exe does not work math calculations may contain unwanted bundled software performs each! Download was scanned by our built-in antivirus and was called Microsoft Mathematics on Windows 10 this! Formula that provides the solution ( s ) to a quadratic equation program designed for on-screen reading and to good. With Windows 10 software vary from country to country country to country x \u2212 5 0... Has been designed for on-screen reading and to look good when printed at small.! Look good when printed at small sizes, there are some disadvantages: the software a,... Process in a new file is uploaded and periodically reviews files to confirm or their...","date":"2022-01-25 10:40:52","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 1, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.17654314637184143, \"perplexity\": 3523.9663535116233}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2022-05\/segments\/1642320304810.95\/warc\/CC-MAIN-20220125100035-20220125130035-00263.warc.gz\"}"}
null
null
Алекса́ндр Бори́сович Гольденве́йзер (, Кишинёв, Бессарабская губерния, Российская империя — 26 ноября 1961, Николина Гора, Звенигородский район, Московская область, РСФСР, СССР) — русский и советский пианист, композитор, педагог, публицист, музыкальный критик, общественный деятель. Доктор искусствоведения (1940). Народный артист СССР (1946). Лауреат Сталинской премии первой степени (1947). Биография Родился года в Кишинёве, в семье присяжного поверенного Бориса Соломоновича Гольденвейзера. Свои первые музыкальные впечатления получил от матери Варвары Петровны Гольденвейзер, обладавшей тонким художественным вкусом и любившей петь и играть на фортепиано. В пять лет, научившись разбирать ноты под руководством старшей сестры Татьяны, стал самостоятельно понемногу играть на фортепиано. Когда ему исполнилось восемь лет, семья переехала в Москву, где начались его серьёзные занятия музыкой с В. П. Прокуниным, собирателем русских народных песен, одним из учеников П. И. Чайковского. В 1889 году был принят в Московскую консерваторию в класс А. И. Зилоти, где попал в окружение музыкантов, во многом сформировавших его взгляды на искусство, на роль художника в общественной жизни и на задачи педагога. Окончил Московскую консерваторию в 1895 году по классу фортепиано П. А. Пабста (ранее занимался у А. И. Зилоти), в 1897 — по классу композиции М. М. Ипполитова-Иванова. Учился также композиции у А. С. Аренского и контрапункту у С. И. Танеева (1892—1893). К 1900 году был достаточно известен, чтобы Андрей Белый упомянул его в поэме «Первое свидание», действие которой приурочено к этому году: «Поёт под небо белый гейзер — Так заливается свирель, Так на рояли Гольденвейзер Берёт уверенную трель». Преподавательскую деятельность начал в 1895 году. В 1895—1917 годах — преподаватель фортепиано Николаевского сиротского, Елисаветинского и Екатерининского женских институтов, в 1904—1906 — Музыкально-драматического училища Московского филармонического общества (ныне Российский институт театрального искусства — ГИТИС). Преподавал также на Пречистенских рабочих курсах, в Народной консерватории, Алфёровской гимназии (история искусств) Гольденвейзер идейно был близок с Львом Толстым. Их дружба завязалась в 1896 году благодаря увлечению шахматами. Как толстовец, не ел мяса. Присутствовал при кончине писателя в Астапове и подписал его завещание. Выступал как солист и ансамблист. В 1907 году выступал в составе Московского трио, заменяя пианиста Д. С. Шора. Концертировал вплоть до 1956 года, в том числе в ансамблях с Э. Изаи, Д. Ф. Ойстрахом, Л. Б. Коганом, С. Н. Кнушевицким, квартетом им. Людвига ван Бетховена. С 1901 года выступал как музыкальный критик в печати, сотрудничал в газете «Курьер», журнале «Музыкальный мир» и других изданиях (под псевдонимами: А., А, Борисов, Г. Г-р), был членом редакции журнала «Музыкальный труженик», вёл просветительскую работу. В 1906—1961 годах — профессор Московской консерватории по классу фортепиано, в 1936—1959 — заведующий кафедрой фортепиано. В 1918—1919 годах — помощник директора, 1919—1922 и 1932—1934 — заместитель директора (проректор), в 1922—1924 и 1939—1942 — директор (ректор) консерватории. В 1931 году организовал «Особую детскую группу» при Московской консерватории. С 1936 по 1941 годы — художественный руководитель Центральной музыкальной школы при Московской консерватории. С 1932 по 1934 годы — заместитель председателя Московского отделения Союза советских композиторов СССР. В годы Великой Отечественной войны — в эвакуации, сначала в Нальчике, потом в Тбилиси и Ташкенте. В 1943 году возвратился в Москву. В период «ждановщины» выступил с позиций защиты традиционных музыкальных ценностей: Умер 26 ноября 1961 года (по другим источникам — 27 ноября) в посёлке Николина Гора (ныне Одинцовский район Московской области). Похоронен на Ваганьковском кладбище (11 уч.). Влияние Согласно Музыкальной энциклопедии (1973), А. Гольденвейзер — «создатель одной из крупнейших советских пианистических школ, активный участник перестройки музыкального образования и разработки современной системы подготовки музыкантов в СССР, автор многих статей и докладов по этим вопросам». Среди учеников: С. Е. Фейнберг, Т. П. Николаева, Р. В. Тамаркина, Г. Р. Гинзбург, Д. Б. Кабалевский, Д. А. Башкиров, Л. Н. Берман, Д. Д. Благой, Ф. И. Готлиб, А. Л. Каплан, И. В. Малинина, Б. М. Хевелев, М. С. Лебензон, Л. И. Ройзман, В. Г. Фере, М. Д. Чхеидзе, Л. Д. Имнадзе, С. В. Евсеев, Ч. Г. Садыхов, Н. Усубова, Н. Г. Капустин, и более 200 других. Семья Отец — Борис Соломонович Гольденвейзер (1839—1916), юрист, адвокат, публицист. Мать — Варвара Петровна Гольденвейзер (урождённая Щекотихина, 1848—1898). Брат — Николай Борисович Гольденвейзер (1871—1924), юрист, переводчик, преподаватель истории Московского императорского лицея в память цесаревича Николая, пушкинист (его жена — Надежда Афанасьевна Гольденвейзер (1869—1934), педагог, сотрудник Румянцевского музея). Сестра — Татьяна Борисовна Софиано (1869—1955), была замужем за Константином Алексеевичем Софиано (1891—1938, брат Анны Алексеевны Гольденвейзер). Сестра — Мария Борисовна Гольденвейзер (1873—1940), пианистка, была замужем за литературоведом, пушкинистом Михаилом Осиповичем Гершензоном (1869—1925). Племянник — Сергей Михайлович Гершензон (1906—1998), генетик, микробиолог. Племянница — Наталья Михайловна Гершензон-Чегодаева (1907—1977), искусствовед, жена искусствоведа, профессора Андрея Дмитриевича Чегодаева (1905—1994), мать искусствоведа Марии Андреевны Чегодаевой (1931—2016). Первая жена (с 1903) — Анна Алексеевна Гольденвейзер (урождённая Софиано, 1881—1929), дочь генерала А. С. Софиано, пианистка, музыкальный педагог, выпускница Московской консерватории по классу В. И. Сафонова (1905), в переводе А. А. Гольденвейзер в 1929 году были отдельной книгой опубликованы письма Ф. Шопена. Дочь — Вера (сестра жены, которую А. Б. Гольденвейзер удочерил). Своих детей не было. Сын (приёмный) — Григорий Романович Гинзбург (1904—1961), пианист, заслуженный деятель искусств РСФСР (1946). Племянник жены и крёстный сын — Андрей Дмитриевич Сахаров (1921—1989), физик и правозащитник. Вторая жена — Елена Ивановна Гольденвейзер (урожд. Грачёва, 1911—1998), пианистка, ученица А. Б. Гольденвейзера, директор Музея-квартиры А. Б. Гольденвейзера. Братья отца: Александр Соломонович Гольденвейзер (1855—1915) — юрист-цивилист. Его сыновья (двоюродные братья А. Б. Гольденвейзера): Александр Гольденвейзер (1880—1940), антрополог, специалист в области антропологии ирокезов; Эммануил Гольденвейзер (1883—1953), экономист и статистик, начальник статистического отдела Федеральной резервной системы США (1926—1945), президент Американской ассоциации экономистов; Алексей Гольденвейзер (1890—1979), юрист, еврейский общественный деятель в Киеве, Берлине и Нью-Йорке, издатель, публицист. Моисей Соломонович Гольденвейзер (1837/1838—1921), юрист, юрисконсульт банка Л. С. Полякова, библиофил, литературовед и историк (в его доме в Москве, в Гранатном переулке в 1910-е годы проживали родители А. Д. Сахарова). Его сын: Николай Моисеевич Гольденвейзер, юрист, драматург и прозаик. Владимир Соломонович Гольденвейзер (1853—1919), дворянин, публицист, инженер путей сообщения. Его дети: Елена Владимировна Гольденвейзер (1881—1958), жена правого эсера В. Л. Утгофа; Лев Владимирович Гольденвейзер (1883—1959), адвокат, театральный режиссёр-постановщик, драматург, прозаик, переводчик, в 1927—1937 годах — заведующий литературным отделом Госкино, отец учёного в области теоретической механики Алексея Львовича Гольденвейзера (1911—2003). Яков Соломонович Гольденвейзер (1862 — после 1919), адвокат, литератор, жил в Киеве. Творчество Музыкальные сочинения оперы — «Пир во время чумы» (по А. С. Пушкину, 1942), «Певцы» (по И. С. Тургеневу, 1942—1943), «Вешние воды» (по И. С. Тургеневу, 1946—1947) кантата «Свет Октября» (сл. Ю. Стремина, 1948) для оркестра — увертюра (по Данте, 1895—1997), 2 Русские сюиты (1946) камерно-инструментальные произведения — струнный квартет (1896; 2-я ред. 1940), Трио памяти С. В. Рахманинова (1953) для скрипки и фортепиано — Поэма (1962) для фортепиано — соната (1890-е гг.), 2 экспромта (1890-е гг.), 12 миниатюр (1890-е гг.), 24 пьесы, 4 тетради (1930), 14 революционных песен (1932), Контрапунктические эскизы (2 тетр., 1932), 15 фугетт (1933), сб. «Из детской жизни» (1954), Полифоническая сонатина (1954), Соната-фантазия (1959) для голоса с фортепиано — 6 песен на сл. А. Кольцова (1936), 6 песен на сл. И. Бунина (1946), 3 вокализа (1948), 6 песен на сл. А. Пушкина (1949), романсы редакция фортепианных произведений И. С. Баха, В. А. Моцарта, Л. ван Бетховена, Р. Шумана и др. Литературные сочинения Написал многочисленные критические статьи и ряд книг: Воспоминания о Л. Н. Толстом: Вблизи Толстого. Записи за 15 лет (2 тома 1922-1923, 1959) Лев Толстой и музыка. Воспоминания (в соавторстве с Н. Н. Гусевым, 1953) Гольденвейзер А.Б. Об основных задачах музыкального воспитания, «СМ», 1934, No 10 Гольденвейзер А.Б. Из моих воспоминаний, в кн.: С. И. Танеев. Материалы и документы, т. 1, М., 1952 Гольденвейзер А.Б. Из личных воспоминаний о С. В. Рахманинове, в кн.: Воспоминания о Рахманинове, т. 1, М., 1957 Гольденвейзер А.Б. О музыкальном исполнительстве. Из заметок старого исполнителя-пианиста, в кн.: Вопросы музыкально-исполнительского искусства, сб. 2, М., 1958 Гольденвейзер А.Б. Советы мастера, «СМ», 1965, No 5 Гольденвейзер А.Б. Об исполнительстве. О редактировании, в кн.: Вопросы фортепианно-исполнительского искусства, вып. 1, М., 1965 32 сонаты Бетховена, М., 1966 Гольденвейзер А.Б. О музыкальном искусстве. Сборник статей / Ред.-сост. Д.Д.Благой. М.: Музыка, 1975. 416 с. Гольденвейзер А.Б. Дневник. Первая тетрадь: 1889-1904. М.: Тортуга, 1995. 336 с. Гольденвейзер А.Б. Дневник. Тетради вторая-шестая: 1905-1929. М.: Тортуга, 1997. 356 с. Гольденвейзер А.Б. Воспоминания. М.: Дека-ВС, 2009. 560 с. ISBN 978-5-901951-44-6 Под редакцией А. Б. Гольденвейзера отдельной книгой вышли «Письма Ф. Шопена» (Москва, 1929). Звания и награды Народный артист РСФСР (1931) Народный артист СССР (1946) Доктор искусствоведения (1940) Сталинская премия первой степени (1947) — за концертно-исполнительскую деятельность Два ордена Ленина (09.03.1945; 1953) Три ордена Трудового Красного Знамени (27.04.1937, 1950, 09.03.1955) Медаль «За доблестный труд в Великой Отечественной войне 1941—1945 гг.» Медаль «В память 800-летия Москвы» Память В Москве работает Музей-квартира А. Б. Гольденвейзера — филиал Государственного Центрального Музея Музыкальной Культуры им. М. И. Глинки. Адрес музея: Тверская ул., 17, подъезд 8, кв. 109—110, Тел.: 629-29-29. Основу собрания составляют архив, библиотека и другие предметы из собрания А. Б. Гольденвейзера, переданные им в дар советскому государству в 1955 году. 1975 год — год столетия музыканта — был объявлен ЮНЕСКО годом А. Б. Гольденвейзера. В 2005 году в Москве детской музыкальной школе № 65 было присвоено имя А. Б. Гольденвейзера (Москва, ул. Академика Волгина, д. 17А) Литература 448 c. В классе А. Б. Гольденвейзера / Сост. Д. Д. Благой, Е. И. Гольденвейзер. М.: Музыка, 1986. 214 с. Уроки Гольденвейзера / Сост. С. В. Грохотов. М.: Классика-XXI, 2009. 248 с. Наставник: Александр Гольденвейзер глазами современников. М.; СПб.: Центр гуманитарных инициатив, Университетская книга, 2014. 518 с. — ISBN 978-5-98712-199-3 «Наш Старик»: Александр Гольденвейзер и Московская консерватория. М.; СПб.: Центр гуманитарных инициатив, Университетская книга, 2015. 704 c. — ISBN 978-5-98712-548-9 Семья музыканта: Александр Гольденвейзер дома, в классе и на сцене. М.; СПб.: Центр гуманитарных инициатив, Университетская книга, 2016. — ISBN 978-5-98712-622-6 Примечания Ссылки Российская портретная галерея Музей-квартира А. Б. Гольденвейзера Сайт музея-квартиры А. Б. Гольденвейзера Доктора искусствоведения Члены Союза композиторов СССР Академические музыканты Российской империи Академические музыканты СССР Академические музыканты России Педагоги ГИТИСа Выпускники Московской консерватории Преподаватели Московской консерватории Преподаватели Московского Николаевского сиротского института Ректоры и директора Московской консерватории Оперные композиторы Композиторы-песенники Общественные деятели СССР Общественные деятели России Мемуаристы СССР Мемуаристы России Преподаватели Алфёровской гимназии Похороненные на Ваганьковском кладбище
{ "redpajama_set_name": "RedPajamaWikipedia" }
4,619
android / platform / external / iperf3 Bug: 129425662 Clone this repo: android10-c2f2-release android10-c2f2-s1-release android10-d4-release android10-d4-s1-release android10-dev android10-gsi android10-mainline-a-release android10-mainline-media-release android-mainline-11.0.0_r3 android-security-10.0.0_r50 android-11.0.0_r28 b841015 Mark ab/6881855 as merged by Xin Li · 9 weeks ago master 6279f38 Skip ab/6749736 in stage. by Xin Li · 4 months ago 10c0286 [automerger skipped] Mark Android R (rvc-dev-plus-aosp-without-vendor@6692709) as merged am: 3bab93ea33 -s ours am: f80ff974ef -s ours am: 6e36bf7d9a -s ours by Xin Li · 5 months ago 6e36bf7 [automerger skipped] Mark Android R (rvc-dev-plus-aosp-without-vendor@6692709) as merged am: 3bab93ea33 -s ours am: f80ff974ef -s ours by Xin Li · 5 months ago f80ff97 [automerger skipped] Mark Android R (rvc-dev-plus-aosp-without-vendor@6692709) as merged am: 3bab93ea33 -s ours by Xin Li · 5 months ago iperf3: A TCP, UDP, and SCTP network bandwidth measurement tool iperf is a tool for active measurements of the maximum achievable bandwidth on IP networks. It supports tuning of various parameters related to timing, protocols, and buffers. For each test it reports the measured throughput / bitrate, loss, and other parameters. This version, sometimes referred to as iperf3, is a redesign of an original version developed at NLANR/DAST. iperf3 is a new implementation from scratch, with the goal of a smaller, simpler code base, and a library version of the functionality that can be used in other programs. iperf3 also has a number of features found in other tools such as nuttcp and netperf, but were missing from the original iperf. These include, for example, a zero-copy mode and optional JSON output. Note that iperf3 is not backwards compatible with the original iperf. Primary development for iperf3 takes place on CentOS Linux, FreeBSD, and macOS. At this time, these are the only officially supported platforms, however there have been some reports of success with OpenBSD, NetBSD, Android, Solaris, and other Linux distributions. iperf3 is principally developed by ESnet / Lawrence Berkeley National Laboratory. It is released under a three-clause BSD license. For more information see: https://software.es.net/iperf Source code and issue tracker: https://github.com/esnet/iperf Obtaining iperf3 Downloads of iperf3 are available at: https://downloads.es.net/pub/iperf/ To check out the most recent code, clone the git repository at: https://github.com/esnet/iperf.git Building iperf3 ./configure; make; make install (Note: If configure fails, try running ./bootstrap.sh first) Invoking iperf3 iperf3 includes a manual page listing all of the command-line options. The manual page is the most up-to-date reference to the various flags and parameters. For sample command line usage, see: https://fasterdata.es.net/performance-testing/network-troubleshooting-tools/iperf/ Using the default options, iperf is meant to show typical well designed application performance. "Typical well designed application" means avoiding artificial enhancements that work only for testing (such as splice()'ing the data to /dev/null). iperf does also have flags for "extreme best case" optimizations, but they must be explicitly activated. These flags include: -Z, --zerocopy use a 'zero copy' sendfile() method of sending data -A, --affinity n/n,m set CPU affinity Before submitting a bug report, please make sure you're running the latest version of the code, and confirm that your issue has not already been fixed. Then submit to the iperf3 issue tracker on GitHub: https://github.com/esnet/iperf/issues In your issue submission, please indicate the version of iperf3 and what platform you're trying to run on (provide the platform information even if you're not using a supported platform, we might be able to help anyway). Exact command-line arguments will help us recreate your problem. If you're getting error messages, please include them verbatim if possible, but remember to sanitize any sensitive information. If you have a question about usage or about the code, please do not submit an issue. Please use one of the mailing lists for that. Relation to iperf 2.x Note that iperf2 is no longer being developed by its original maintainers. However, beginning in 2014, another developer began fixing bugs and enhancing functionality, and generating releases of iperf2. Both projects (as of late 2017) are currently being developed actively, but independently. The continuing iperf2 development project can be found at https://sourceforge.net/projects/iperf2/. iperf3 contains a number of options and functions not present in iperf2. In addition, some flags are changed from their iperf2 counterparts: -C, --linux-congestion set congestion control algorithm (Linux only) (-Z in iperf2) --bidir bidirectional testing mode (-d in iperf2) Some iperf2 options are not available in iperf3: -r, --tradeoff Do a bidirectional test individually -T, --ttl time-to-live, for multicast (default 1) -x, --reportexclude [CDMSV] exclude C(connection) D(data) M(multicast) S(settings) V(server) reports -y, --reportstyle C report as a Comma-Separated Values Also removed is the ability to set the options via environment variables. A set of known issues is maintained on the iperf3 Web pages: https://software.es.net/iperf/dev.html#known-issues This section lists links to user-contributed Web pages regarding iperf3. ESnet and Lawrence Berkeley National Laboratory bear no responsibility for the content of these pages. Installation instructions for Debian Linux (by Cameron Camp cameron@ivdatacenter.com): http://cheatsheet.logicalwebhost.com/iperf-network-testing/ iperf, Copyright (c) 2014-2020, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. If you have questions about your rights to use or distribute this software, please contact Berkeley Lab's Technology Transfer Department at TTD@lbl.gov. NOTICE. This software is owned by the U.S. Department of Energy. As such, the U.S. Government has been granted for itself and others acting on its behalf a paid-up, nonexclusive, irrevocable, worldwide license in the Software to reproduce, prepare derivative works, and perform publicly and display publicly. Beginning five (5) years after the date permission to assert copyright is obtained from the U.S. Department of Energy, and subject to any subsequent five (5) year renewals, the U.S. Government is granted for itself and others acting on its behalf a paid-up, nonexclusive, irrevocable, worldwide license in the Software to reproduce, prepare derivative works, distribute copies to the public, perform publicly and display publicly, and to permit others to do so. This code is distributed under a BSD style license, see the LICENSE file for complete information.
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
9,764
Chris Watts reveals he got the idea to blame the deaths of his children on his wife from investigators In his most recent confession, Watts told investigators that he didn't just pull that lie off the top of his head By Varsha Vasudevan Updated On : 09:00 PST, Mar 8, 2019 Convicted triple murderer Chris Watts, the man who killed his pregnant wife and two young daughters in August last year, claimed that the idea to blame Shanann for the murders of Bella and Celeste came to his mind from the investigators. Newly released interviews that took place with Watts at the Dodge Correctional Institution in Waupun, Wisconsin, with the same investigators that he claimed he got the heinous idea from, revealed that he didn't just pull that lie off the top of his head. The 33-year-old pleaded guilty late last year to the murders of his 34-year-old wife Shanann and their daughters, four-year-old Bella and three-year-old Celeste. He was sentenced to life behind bars without the possibility of parole on November 19 last year, Oxygen reported. Chris Watts at Dodge Correctional Institution, Wapun, Wisconsin (Source: Weld County DA) Watts had a five-hour interview with investigators from the FBI, CBI, and a member of the Frederick Police Department on February 18 which was released on March 7. In the recording, he tells the investigators: "Honestly, I never even thought about that story until you guys mentioned it." In his initial confessions, Watts said that he was the one who killed Shanann because he saw her killing their children through the baby monitor. Watts was seen on surveillance footage whispering to his father during that particular confession: "She ... she smothered them ... they were smothered." The killer father finally pleaded guilty to all three murders in order to avoid the death penalty and has now said that it had been the investigators themselves who had planted the idea in his head. He said during the recent interview: "I never even thought about it until you guys mentioned it. I just went with it. I knew they (his family) would probably believe it because my mom and my sister just never really liked Shanann." It's because of this that Watts received multiple letters in prison from his friends slamming Shanann's "dominant" personality. He said that the story was what his lawyers were going with but that it wasn't too long before he told them the truth. Watts also said that his lawyers were quiet when he finally admitted that he killed his own children. The convicted killer added in the recording that he didn't want his lawyers to work under a false pretense. He said: "They said they wouldn't judge me so I told them, I told them everything that happened and they appreciated it. Most of the time the defendant doesn't actually tell them what happened." He also stressed that he didn't want his lawyers to be "unprepared" or "foolish" in court.
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
360
from inspect import stack from os.path import join from time import time from sys import stderr from traceback import format_exc from selenium.webdriver.support.ui import WebDriverWait from selenium.common.exceptions import TimeoutException from selenium import webdriver import requests class ElementCSSSelector: def __init__(self, d): self.d = d def get(self, selector): return self.d.find_element_by_css_selector(selector) def get_and_clear(self, selector): elem = self.get(selector) elem.clear() return elem class Waiter: """ A wrapper around WebDriverWait. It prints messages before the call and take a screenshot afterward. It also adds a few convenient functions. """ def __init__(self, d, screenshot_dir, default_timeout): self.d = d self.shot_id = 0 self.shot_dir = screenshot_dir self.default_timeout = default_timeout def until(self, method, message='', timeout=-1, caller_frame=2): if timeout < 0: timeout = self.default_timeout self._wrapper(method, message, timeout, caller_frame, lambda mthd, msg: WebDriverWait(self.d, timeout).until(mthd, msg)) def until_display(self, selector, timeout=-1): """ For some reason EC.visibility_of throws exceptions. Hence this method. """ self.until(ec_element_to_be_displayed(selector), timeout=timeout, caller_frame=3) def _wrapper(self, method, message, timeout, caller_frame, func): caller = stack()[caller_frame][3] print "Waiting in {}(), timeout {} secs...".format(caller, timeout) start = time() try: func(method, message) print "Spent {0:.3f} secs".format(time() - start) self.shoot(caller) except TimeoutException, e: print >>stderr, format_exc() self.shoot('timeout-exception') raise e def shoot(self, base_file_name): """ Save a screenshot at {screenshot_out_dir}/N-{base_file_name}.png, where N is an incrementing integer ID. """ path = join(self.shot_dir, '{}-{}.png'.format(self.shot_id, base_file_name)) print "Screenshot saved at {}".format(path) self.d.save_screenshot(path) self.shot_id += 1 def ec_element_to_be_displayed(selector): def ec(d): return ElementCSSSelector(d).get(selector).value_of_css_property('display') != 'none' return ec def init(default_timeout=10): driver = webdriver.Firefox() waiter = Waiter(driver, '/screenshots', default_timeout) selector = ElementCSSSelector(driver) return driver, waiter, selector def wait_and_get(driver, url): """ Wait until the given URL is accessible (returns 2xx or 3xx), and then call driver.get(url) """ print "Waiting for {} readiness...".format(url) while True: # noinspection PyBroadException try: r = requests.get(url, timeout=3) r.raise_for_status() break except Exception as e: print str(e) print "Continuing to wait..." print "Interacting with {}...".format(url) driver.get(url)
{ "redpajama_set_name": "RedPajamaGithub" }
5,493
Q: SSIS flat file export timestamps have excess precision? SQL 2008 R2, have a simple export to a Flat File Destination. Timestamp columns are set to DT_DBTIMESTAMP in the destination, which according to the docs "The fractional seconds have a maximum scale of 3 digits." However, my exported files have 7 digits of fractional second precision, as defined by DT_DBTIMESTAMP2 in the docs. The columns of course cannot be reimported into SQL Server, because of the excess fractional second positions. Does anyone know why SSIS exports datetime columns with a different definition than the SSIS data type? A: Check the destination datatypes and make sure they are DB_TIMESTAMP not DB_TIMESTAMP2. Right-click on destination component and choose "Show Advanced Editor...", then check data types on "Input and Output Properties" tab.
{ "redpajama_set_name": "RedPajamaStackExchange" }
8,828
Although free radicals are created in your body, it is the external free radicals that cause the most concern. Free radicals are unstable compounds in your body that can interfere with the ability for your cells to function normally. Environmental toxins, like alcohol, industrial wastes, radiation exposure, are ingested or inhaled when you breathe. When they interact with your body, a chain reaction of damage can occur. MindWorks - Think Fast. Stay Sharp. Neural connections in our brain start declining as early as age 20 and by age 45 this decline occurs even more rapidly. Studies show the key ingredients in MindWorks help improve mental sharpness and focus and support long-term cognitive health. What Is Herb Lax History? Shaklee's Herb Lax is still available and in wide use today. Herb Lax was the second product introduced in 1957 by Dr. Forrest Shaklee. The longevity and successful use of this product by so many is a testament to the perfectly blended and balanced natural compound of herbs. Shaklee folklore tells the story of Dr. Shaklee learning of the herbal combination from an elderly doctor he met while on a medical mission in Africa. In a National Institutes of Health survey, a startling statistic about digestive health was identified. Amazingly, more than 4 million Americans feel constipated frequently. Do you ever suffer from irregularity? Although constipation is common in all age groups, people over age 65 suffer from it the most. As a society, we are on the go, often not eating well, we get dehydrated and are sedentary. Is Herb Lax All Natural? In addition to having all natural ingredients, Herb-Lax is made up of only the finest form of dried herbs. By creating Herb-Lax from this dried form, the result is a milder version that does what it needs to do without causing side effects or irritation. Can I Have a Side Effect From Herb Lax? Anytime you introduce a new food or supplement, your body may react. Did you know that could be a good thing? Really! You have Herb Lax. When you first start taking Herb-Lax, be aware that you may experience a new discomfort or two. What this most likely means is that you REALLY need Herb-Lax! So, be patient, and you will be glad that you didn't give up. We know you have a lot of choices when it comes to your health and nutrition needs. Nutritional Supplements are available pretty much everywhere, from your local CVS or Walgreens, to specialty shops like Vitamin Shoppe or GNC, and 1000s of sites online. With virtually unlimited shopping choices, why should you buy your supplements from NutritionPlusMe? There are two reasons - Quality and Service. 1. An educational resource for anyone interested in Shaklee, and specifically for our current and future customers. 2. An online ordering source to help facilitate fast, secure, ordering of Shaklee products. Let's discuss each of these in a bit more detail. What Are Herb Lax Ingredients? Herb-Lax has NINE herbs and soluble fiber that have long been used for bowel support, health and cleansing. Although it is a Shaklee proprietary blend, the ingredients have long histories and experiences individually and are blended together to provide YOU with optimum results. Why Is Longer Telomere Length Important? Dr. Elizabeth H. Blackburn, PHD, got the world's attention and a Nobel Prize in 2009 for her work on longer telomeres. These protective caps on the ends of our chromosomes keep our DNA safe from damage. They are like the plastic end caps on shoelaces that keep the lace from unravelling. When your cells divide, telomeres get a little shorter and are the indicators of cellular aging. You want longer telemeres.
{ "redpajama_set_name": "RedPajamaC4" }
4,715
Mario Mendoza Podcast: Re-Run with Mario Mendoza Mario Mendoza is a five-time national champion on the trails, a four-time USATF trail runner of the year, he's won races all over the globe, finished top-10 twice at world championships, set world records on the treadmill, and accomplished all kinds of things on the competitive side of the sport. Beyond that, however, he's one of the most genuine people I've ever met, we really connected over this conversation, and have kept in close touch ever since. Podcast: Episode 29 with Mario Mendoza
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
9,588
Azul celeste ("azul céu"), também chamado ciel é um esmalte raramente utilizado em heráldica. Não é uma das sete principais cores e metais nem uma das três "manchas". Esmaltes (heráldica)
{ "redpajama_set_name": "RedPajamaWikipedia" }
1,623
Overseas Network On October 4th , the Turkish army armored vehicle was attacked in Batman, a province in the southeastern part of the country, killing seven soldiers and injuring two soldiers. Foreign media said the PKK planned the action. According to the Associated Press, the Governor of Batman province said in a statement that the Turkish army carried out military operations in the rural areas of Batman province. The armored vehicles of the army were attacked by improvised explosive devices, causing casualties. The governor also said that after the attack, the Turkish army has carried out larger operations in the region. The province of Batman is a province in southeastern Turkey, the capital of Batman, with Kurds as the mainstay.
{ "redpajama_set_name": "RedPajamaC4" }
3,706
Q: How to loop through an array one string at a time? Okay, so I have a HUGE array with 10,000+ strings and I want the loop through an array one string at a time, like I want to wait until the function is done to move on to the next string in the array. Right now the loop puts strings as fast as it can through my function which I can't have because these strings are inserted into a $.get request... And it makes WAY too much requests at a time... Here's my code currently: var sp = ["48343", "48383", "48934893", "438943", "47849345", "45843945", "47483923", "38445"]; for (var i = 0; i < sp.length; i += 1) { check(sp[i]); } and please forgive me if I didn't explain good enough, instead of voting down kindly ask me what to explain, thanks :D A: From what I can tell, you may want to simply use a setInterval to space out your requests. Example: var sp = ["...", "..."]; var i = 0; var interval = setInterval(function() { if (i >= sp.length) { clearInterval(interval); } else { check(sp[i++]); } }, 5); A: You need to send the whole array to the server in one request and do the check on the whole array on the server. Then the response from the request is an array of the ones that pass the check. Your current design is horrible, please don't do thousands of get requests. A: By default, $.get() is asynchronous. Check below links * *Is $.get of jquery asynchronous? *Async false for shorthand AJAX get request So it's better if you make your calls synchronous (but the problem is it has been deprecated since jQuery 1.8) just by introducing a new property async with value false to the object passed for call. But it is better if do not use that if you're using jQuery version >= 1.8. If not, you can check the below links: * *https://www.aspsnippets.com/Articles/jQuery-AJAX-Async-False-Synchronous-call-and-Async-True-Asynchronous-call-difference.aspx *How to make JQuery-AJAX request synchronous *https://developer.mozilla.org/en-US/docs/Web/API/XMLHttpRequest/Synchronous_and_Asynchronous_Requests Note » In this way the calling statement inside for loop will wait for the completion of the request i.e. once the function returns, then next iteration will continue. Finally, I think, using Web workers with async ajax get request will be a better choice for your application. Check https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Using_web_workers.
{ "redpajama_set_name": "RedPajamaStackExchange" }
318
Café Momus at the University of Cincinnati College-Conservatory of Music was open Wednesday evening. Guest of honor was New York composer Missy Mazzoli, composer-in-residence with the 2013 Constella Festival. The music, all of it for small ensemble, was led by doctoral students in conducting (the better to tackle Mazzoli's music, which she described in remarks to the audience as "very hard" to put together). Conductor Michael Goist went first with Mazzoli's "Death Valley Junction" for string quartet (2010), a ten-minute homage to a dot on the map with a storied history, primarily that of former Broadway chorus girl Marta Becket, now 89, who performed her own shows at the town's Amargosa Opera House for over 40 years, often with no audience to hear her. Commissioned by the Kronos String Quartet, it had a bleak, forlorn aspect, beginning with hazy harmonics and utilizing lots of portamento (sliding from one note to another). The cello took over at the end with a melodic passage ending on a big open C (lowest string on the instrument). Mazzoli's "Shy Girl Shouting Music" (2005), for voice, piano, electric guitar and double bass, was led by conductor Yael Font. It featured soprano Huan Jing in wordless vocalization, proceeding from guttural "oo ahs," breathed into a microphone accompanied by soft tapping on the piano, to a full-voiced "ah." There was some motivic interplay with the guitar, all of it returning to more "oo ahs" and some low-lying vocal glissandos at the end. For a true minimalist effect, there was "In Spite of All This" (2005) for violin, clarinet, guitar, cello and percussion, led by conductor Junping Qian. Here Mazzoli spins a continuous, repeated melody while the other instruments play with it in fragmentary form. It became quite a toe-tapper, drumbeat and all, only to trail off at the end. Lang's "Broken Wings" (2008), for violin, cello, clarinet, flute/piccolo, piano and percussion, followed intermission. Mazzoli described it as a work Lang wrote purposely to drive his players to their limits. In three parts, it began with a bell-like movement calling for all of the instruments to play in their topmost register. Rhythmically complex, it was quite challenging indeed, with changes of meter almost every bar (kudos to conductor Michael Goist). Part three of Lang's work – marked "ecstatically energetic" – made the perfect contrast, with rapid, staccato figures and an abrupt, stopped-in-its-tracks ending. The concert closed with Mazzoli's 2009 "Still Life with Avalanche" for violin, cello, clarinet, flute, piano, percussion and harmonica, a work commissioned by the chamber ensemble eighth blackbird (former ensemble-in-residence at CCM). Conductor Junping Qian led the Ensemble. Mazzoli wrote it at an artist colony in upstate New York, she said. Midway through its composition, she received a telephone call that a cousin had died very suddenly. The piece captures that moment "when the shock of real life works its way into the music's joyful and exuberant exterior," she said. Indeed, there is such a moment in the piece when, after a calm beginning, lulled by the sound of two harmonicas (doubled by the flutist Kelsey Snider and percussionist Sean Klopfenstein) and a happy, "boulevard" sound, the drum interrupts, followed by a soft statement by violin and cello. From there, the music turns harsh and satirical, with wry glissandos, a "playful" melody and a busy percussionist, who must negotiate vibraphone, kick drum and snare drum all at once, while playing harmonica via a neck brace. The piece ended with a sad little harmonica duet, followed by a quick, sharp cutoff. Mazzoli completed her residency with master classes Thursday at CCM, leaving her students doubtless as energized and inspired as they had been by her music.
{ "redpajama_set_name": "RedPajamaC4" }
8,562
"use strict"; const Queue = require("promise-queue"); const config = require('config'); const _ = require('underscore'); const moment = require('moment'); const request = require('./request'); const taskManager = require('./taskManager'); const taskHelper = require('./helpers/task'); const cacheHelper = require('./helpers/cache'); const log = require("./helpers/logger"); const requestFactory = require("./requestFactory").requestFactory; const maxConcurrent = process.env.MAX_PARALLEL_TASKS || (config.has('app.max_parallel_tasks') ? config.get('app.max_parallel_tasks') : 3); const maxQueue = Infinity; const queue = new Queue(maxConcurrent, maxQueue); /** * * Pull results from different response formats * * @param api_resource - analyze or task * @param response - the response object * @returns {*} */ const normalizeResponse = function normalizeResponse(api_resource, response){ switch (api_resource) { case "analyze": log.trace("Returning normalised result for /analyze", JSON.stringify(response.analysis.results, undefined, 4)); return response.analysis.results; case "task": log.trace("Returning normalised result for /task", JSON.stringify(response.result.analysis.results, undefined, 4)); return response.result.analysis.results; } }; /** * responseHandler - process the response * * @param {normalizedTask} - the normalizedTask obj that generated the response * @param {response} - the response obj from the request * @returns {*} */ const responseHandler = function(normalizedTask, response){ //log.trace("PROCESSING normalizedTask:", JSON.stringify(normalizedTask, undefined, 4)); //log.trace("PROCESSING RESPONSE:", JSON.stringify(normalizedResponse, undefined, 4)); let responseData; if(_.has(normalizedTask,'analysis_tag')) { responseData = normalizedTask.analysis_tag; } else { responseData = normalizeResponse(normalizedTask.api_resource, response); } if (_.has(normalizedTask, 'then')) { let nextTasks = taskManager.buildNextTasks(normalizedTask, responseData); //Add new tasks to queue. Return a promise that will resolve //only once all the items in array have resolved. return Promise.all(nextTasks.map(function(each) { const requestObject = requestFactory(each); return queueRequest(requestObject, each); })).then(function(p){ log.trace("Response handler returning with compact objects..."); // remove undefined and nulls caused by unresolved // promises due to recursion return taskHelper.compact(p); }); } //convert any timeSeries results from unix to human responseData = taskHelper.resUnixTsToHuman(normalizedTask.analysis_type, responseData); if (_.has(normalizedTask, 'cache')) { log.trace("Response requestObj has CACHE object"); cacheHelper.addData(normalizedTask.cache.cacheId, normalizedTask.cache.mergeKey, responseData); let cacheData = cacheHelper.get(normalizedTask.cache.cacheId); if (cacheData.remainingTasks === 0) { log.trace("Response handler returning with remaining tasks = 0..."); delete cacheData.remainingTasks; return [cacheData]; //format as array of objects for csv parser } else { log.trace("Response handler returning with remaining tasks > 0..."); return; } } if (_.has(normalizedTask, 'then') === false && _.has(normalizedTask, 'cache') === false) { log.trace("Response handler returning with no THEN or CACHE objects..."); return responseData; } }; /** * queueRequest - add request obj to queue and pass * the results to the responseHandler() * * @param requestObj - a request object * @returns {LocalPromise} */ const queueRequest = function queueRequest(requestObj, normalizedTask) { if(_.has(normalizedTask,'analysis_tag')){ log.trace("Building FILTER for freqDist using analysis tag"); return responseHandler(normalizedTask, normalizedTask.analysis_tag); } return queue.add(function () { return request.make(requestObj); }).then(response => { if(normalizedTask.api_resource === "analyze"){ log.trace("Handling response from /analyze.."); return responseHandler(normalizedTask, response); } else { log.trace("Polling for response from /task..."); return request.recursivePoll(normalizedTask, requestObj, response); } }).catch(function (err) { // Req failed, drop from cache so we return a partial data set if(normalizedTask.cache) { log.warn("Data dropped due to task failure with key: \"" + normalizedTask.cache.mergeKey + "\""); cacheHelper.setFailed(normalizedTask.cache); } if(err.error && err.error.error){ log.error("MESSAGE:", err.error.error); log.error("CODE:", err.statusCode); log.error("TASK:", (err.normalizedTask) ? JSON.stringify(err.normalizedTask, undefined, 4) : JSON.stringify(normalizedTask, undefined, 4)); log.error("REQUEST:", (err.request) ? JSON.stringify(err.request, undefined, 4) : JSON.stringify(requestObj, undefined, 4)); } else { log.error(err); } //process.exit(1); }); }; exports.queueRequest = queueRequest; exports.responseHandler = responseHandler;
{ "redpajama_set_name": "RedPajamaGithub" }
5,170
// -------------------------------------------------------------------------------------------------------------------- // <copyright file="MainViewModel.cs" company="Helix Toolkit"> // Copyright (c) 2014 Helix Toolkit contributors // </copyright> // -------------------------------------------------------------------------------------------------------------------- namespace Workitem10044 { using DemoCore; using HelixToolkit.Wpf.SharpDX; using HelixToolkit.Wpf.SharpDX.Extensions; public class MainViewModel : BaseViewModel { public MainViewModel() { // titles this.Title = "Simple Demo (Workitem 10044)"; this.SubTitle = "Please note that this scene is defined completely in XAML."; EffectsManager = new DefaultEffectsManager(); } } }
{ "redpajama_set_name": "RedPajamaGithub" }
5,601
{"url":"https:\/\/cs.stackexchange.com\/questions\/32951\/best-sort-method-for-median-median-heap-or-insertion-sort-on-a-vector","text":"# Best sort method for median: median heap or insertion sort on a vector\n\nI'm trying to decide between two methods of calculating a median, that will optimize the following operations:\n\n\u2022 Add integer to data structure (insert)\n\u2022 Get the median of all integer (getMedian)\n\nThe program will add a random number of integers (no limit on how many, but generally going to be relatively small amount) to the data structure before trying to access the median, and then repeat this process an indefinite amount of times.\n\nWith the median heap method, insert will take O(log(n)) on average and getMedian will be O(1).\n\nWhat I'm wondering is what would happen if instead I simply used a vector. Insert would be amortized O(1). Then when getMedian is called, the vector is sorted with insertion sort, followed by simply accessing the middle element, O(1).\n\nWould this be faster in the long run? There will almost always be more calls to insert than getMedian in the program, but I'm not sure if the insertion sort will be faster. I believe it is a relatively fast sort, O(n), on a partially sorted array, but I'm not sure.\n\nFor instance, if I had a vector with 100 million integers that were sorted followed by 4 unsorted elements, would insertion sort be O(n)? What if I had 50 million unsorted elements (very unlikely in the program, but possible)? And at how many unsorted elements would it be better to use another sort, like quicksort?\n\nYou are asking many questions, and I will only answer two of them. If you have a sorted vector of length $n$ followed by $m$ unsorted elements, the best way to sort the entire vector is to use an $O(m\\log m)$ sorting algorithm on the tail, and then merge the two sorted lists in $O(n+m)$. The total running time is $O(n+m\\log m)$, which is $O(n)$ if $m = O(n\/\\log n)$. If $m = \\omega(n\/\\log n)$ then the problem is more complicated; asymptotically optimal bounds are known in the decision tree model, but I'm not sure they can be implemented efficiently enough (see for example Kahn ad Kim, Entropy and Sorting).\nAnother question, which you haven't actually asked, is about lower bounds. Suppose that you have a data structure in which insert takes (amortized) time $\\alpha(n)$ and median takes (amortized) time $\\beta(n)$, where $n$ is the number of elements in the structure. Suppose further that $\\alpha(O(n)) = O(\\alpha(n))$ and $\\beta(O(n)) = O(\\beta(n))$, and that $\\alpha,\\beta$ are monotone (this is the case for functions of the type $\\Theta(n^s\\log^t n)$). Then $\\alpha(n) + \\beta(n) = \\Omega(\\log n)$, assuming the operations are implemented using the comparison model (two datums can be compared, but are otherwise completely opaque).\nFor the proof, here is how to sort a list of length $n$. First insert the list. Then, extract the median, add $-\\infty$ (or the minimum value) twice, and repeat. You recover the sorted lower half of the list. Now add enough copies of $+\\infty$ (or the maximum value), and recover similarly the upper half of the list. This requires at most $O(n\\alpha(O(n)) + n\\beta(O(n))) = O(n(\\alpha(n)+\\beta(n)))$. In the comparison model, there is a lower bound of $\\Omega(n\\log n)$, so $\\alpha(n) + \\beta(n) = \\Omega(\\log n)$.","date":"2021-11-27 06:31:57","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.6745222210884094, \"perplexity\": 392.8285596086682}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2021-49\/segments\/1637964358118.13\/warc\/CC-MAIN-20211127043716-20211127073716-00197.warc.gz\"}"}
null
null
Belső-London (angolul Inner London) nagy-londoni önálló városrészek (borough-k) egy csoportja a brit főváros belső részében. Külső-London veszi körül. A 16. század utolsó évtizedében indult londoni halálozási jegyzék említi először. Rögzített határú statisztikai területté 1847-ben vált. 1855 és 1965 között önálló közigazgatási egység volt. Ma kétféle elfogadott definíciója létezik: közigazgatási és statisztikai. Az egyiket az 1965. április 1-jén hatályba lépett 1963-as londoni önkormányzati törvény fogalmazta meg. Eszerint Belső-London 12 boroughból áll és a területe csaknem megegyezik az ugyanakkor megszűnt London megye területével. A másik a Nemzeti Statisztikai Hivatal (Office for National Statistics) által használt definíció, amely szerint 11-et foglal magába a belső-londoni boroughk közül, plusz két külső-londoni borought és a City of Londont. Belső-London területre és lakosságra is kisebb Külső-Londonnál, a népsűrűsége azonban kétszer akkora. Területre nincs kétharmad akkora, mint Budapest, lakossága azonban csaknem kétszerese a magyar fővárosénak. A statisztikák szerint Európa leggazdagabb területe: itt van Európa legdrágább utcája, és az egy főre eső GDP több, mint 80 ezer euró, miközben az átlag az Egyesült Királyságban 27 ezer euró. A világ leggazdagabb emberei közül sokan élnek itt, ugyanakkor széles körű a szegénység is. Belső-London magja a londoni városcentrum. Jegyzetek Fordítás London területei
{ "redpajama_set_name": "RedPajamaWikipedia" }
6,713
Do you stress test your new drives? Just wondering. My friend does. I wonder if I should do that too. way to do a proper stress test on the electronics of a device. utility, or a program such as HD Sentinel. I caught 2 drives this way, which I sent back. why you bought from them in the first place. is not entirely solid state. manufacturer ships the occasional dud. I therefore run extensive tests on all my new drives. may dump on your data. do that, judging by all the bad drives people receive. didn't pass the full self-test, where the drive totally scans itself. would certainly be better if they did. corrupted some of it, even eventually. bad was when I copied a TV show I'd just recorded and it wasn't ok.
{ "redpajama_set_name": "RedPajamaC4" }
6,118
Q: SSIS Issue Pulling Data From Snowflake - [CData Snowflake Source] Error: Get data error: Received metadata with an incompatible version number I'm trying to run a simple data flow task in SSIS pulling data from Snowflake to SQL Server using a component from CDATA called Snowflake Source. Connection works and I can also see a preview of the data but when actually running the package I get the following error message: [CData Snowflake Source [2]] Error: System.Exception: Get data error: Received metadata with an incompatible version number at CData.SSIS.Snowflake.SSISSourceComponent.PrimeOutput(Int32 outputs, Int32[] outputIDs, PipelineBuffer[] buffers) at Microsoft.SqlServer.Dts.Pipeline.ManagedComponentHost.HostPrimeOutput(IDTSManagedComponentWrapper100 wrapper, Int32 outputs, Int32[] outputIDs, IDTSBuffer100[] buffers, IntPtr ppBufferWirePacket) Has anyone experienced this before or know what to do in order to fix it? A: Hi I faced this error many time , when i use some custom script component or third party component. Issue : We do remove or delete some component but some of the reference is not removed from package (it is very tough to debug ) Create fresh package it will work .
{ "redpajama_set_name": "RedPajamaStackExchange" }
5,371
Product prices and availability are accurate as of 2019-04-21 10:07:41 UTC and are subject to change. Any price and availability information displayed on http://www.amazon.com/ at the time of purchase will apply to the purchase of this product. MIG welder, collection: millermatic 252 with spindle, welding processes: MIG/flux core, input voltage: 230/460/575VAC, welded product: mild steel, light weight aluminum, product density mild steel: 22 ga. to 1/2", portability: rolled installed, stage: 1, product density light weight aluminum: 14 ga. to 3/8".
{ "redpajama_set_name": "RedPajamaC4" }
5,427
Traders have refused to pay the bonus to farmers stating that the government should credit the amount to farmer accounts on basis of the sale receipts and it is not their mandate to make bonus payments. Tur prices have fallen to R4,600-4,700 per quintal while the government MSP is at R4,625 with an additional bonus of R425 per quintal declared by the government. With arrivals in full peak and Tur (Arhar) prices falling below Minimum Support Price (MSP) levels, the Agriculture Produce Market Committee (APMC) in Latur — one of the biggest pulse producing regions in Maharashtra — remained shut for four days over the issue who would pay the government bonus of R425 per quintal declared to farmers. Although the market reopened on Wednesday to a lukewarm response from both farmers and traders, uncertainty remains over how long the market would remain open. Tur prices have fallen to R4,600-4,700 per quintal while the government MSP is at R4,625 with an additional bonus of R425 per quintal declared by the government. According to Lalitbahi Shah, chairman, Latur APMC, the market was shut over several issues including pressure to sell first to government agency Nafed and then to traders. Nafed is among the agencies selected by the Centre to procure tur from the market for the creation of a 1 million tonne buffer stock. The Small Farmers Agriculture-Business Consortium (SFAC) is procuring some 40,000 tonnes of the commodity through farmer producer companies. The Government of India has directed SFAC, Nafed and FCI to procure tur at MSP plus bonus for addition to the buffer stock. However, as opposed to daily arrivals of 15,000 quintals, Nafed only manages to weigh around 3,000-4,000 quintals while the rest of the stock goes as carry forward, he said. Moreover, the Tur is purchased only if it meets the quality parameters and while Red tur is purchased by Nafed the white Tur and mixed tur (Red and white) is not purchased by Nafed, he said. These arrivals are to the tune of 4,000-5,000 quintals daily, Shah said. Moreover, farmers who come from long distances to sell Tur expect at least 50% of the payment and are handed cheques for payments after 15 days and are disappointed that they do not get some cash for their daily needs, he said. Shah said the market has been reopened on the condition that farmers should give NOCs that they are willing to sell below MSP if the product does not fall within the government quality parameters. Shah said that the APMC has written to the government seeking intervention. According to Hukumchand Kalantry, president, Latur Dal Millers Association, Latur is being singled out for step motherly treatment. "There is immense pressure on traders not to purchase below MSP from farmers while the rates have fallen to 4,000-4,400 per quintal in Vidarbha markets including Akola, Khamga, Washim, Hingoli there is no government pressure here. The same rule should apply to every Mandi and not just Latur," he said, Moreover, according to a government resolution, the government should give the bonus to the farmer after making the necessary sale receipts and the amount of R425 should go to the farmer accounts directly.
{ "redpajama_set_name": "RedPajamaC4" }
4,248
Man taken to hospital following incident involving train in Ventura A man was taken to a local hospital with minor injuries following an incident involving a train Friday in Ventura, officials said. Man taken to hospital following incident involving train in Ventura A man was taken to a local hospital with minor injuries following an incident involving a train Friday in Ventura, officials said. Check out this story on vcstar.com: https://www.vcstar.com/story/news/local/communities/ventura/2016/08/19/man-taken-to-hospital-following-incident-involving-train-in-ventura/89103598/ The incident was initially reported as a pedestrian hit by an Amtrak train near Vista Del Mar Place and Sanjon Road, Ventura police said. Preliminary information from the scene indicated that the train was moving slowly in the area and the man had minimal contact, if any, with the train as it passed by him, police said. When officers and emergency medical crews with the city of Ventura Fire Department arrived on scene the man was not on the tracks, police said. They searched the area for him and found him with a scratch on his forehead, police said. The man was taken to a local hospital to be checked out, police said. Crews on the scene reported all train traffic in the area had been briefly stopped as a result of the incident. Read or Share this story: https://www.vcstar.com/story/news/local/communities/ventura/2016/08/19/man-taken-to-hospital-following-incident-involving-train-in-ventura/89103598/
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
5,173
Christina Aguilera – Not A Justin Bieber Fan Lucienne Molnar April 19, 2012 10:35 am January 1, 2017 4 2079 After last night's episode of "The Voice" people all over the Internet have started spreading the rumor that Christina Aguilera is not a Justin Bieber fan. According to a report published in the Inquisitr, the singer was very distant with Bieber when he came to say "Hello" to her. Justin Bieber had a guest appearance on one of America's most loved shows, "The Voice". At the end of his performance, he went to shake hands with the jury. Christina Aguilera would have liked to shake hands with the "Boyfriend" interpreter, but instead, he kissed her on the cheek leaving the member of the jury with what was interpreted as a slightly disgusted smile on her face. The moment was highly discussed among Internet users. Some people claim that Aguilera was actually showing her discontent towards the young singer, but others have found a rather plausible explanation for the moment. According to the singer's fans, Aguilera was upset because one of the members on her team had just been eliminated from the show. In their opinion, her anger was perfectly justified and, most importantly, it was not directed towards Justin Bieber. Christina Aguilera released a statement on her website saying that she doesn't bear any grudges against Justin Bieber. She further stated that the moment was blown out of proportions by tabloids. As a matter of fact, Aguilera told everyone that she finds Bieber to be a "fun" and "charismatic" guy. In the end, she confessed that she liked Bieber's performance very much because it was a "fun burst of energy" for the show. On Wednesday, the pop star took to Twitter to talk about the recent rumors. She wrote on her account that the media likes to spin stories, thus suggesting that everything we have heard so far is not true. Later on, she tweeted "Can't a girl have a little #BieberFever after getting kissed by the Biebs?" to explain why she didn't maintain a smile after the hug. Previous ArticleSheree Whitfield To Leave "Real Housewives Of Atlanta"Next ArticleThird Baby On The Way For Melissa Joan Hart mollie says: I love justin bieber and i dont see why she doesnt! your bout 13 or 14 that's why you love him love god honey what how can she not like justin bieber i mean he is hot she should love justin
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
6,055
Топал  — посёлок в Красноярском районе Астраханской области России. Входит в состав Ахтубинского сельсовета. География Посёлок находится в юго-восточной части Астраханской области, на левом берегу реки Ахтубы. Уличная сеть посёлка состоит из 3 улиц: ул. ул. Мира, ул. Набережная, ул. Новая. Абсолютная высота — 23 метра ниже уровня моря. Климат умеренный, резко континентальный. Характеризуется высокими температурами летом и низкими — зимой, малым количеством осадков, а также большими годовыми и летними суточными амплитудами температуры воздуха. Население Национальный и гендерный состав По данным Всероссийской переписи, в 2010 году численность населения посёлка составляла 456 человек (228 мужчин и 228 женщин). Инфраструктура В посёлке находилась животноводческая ферма. Транспорт Единственная автодорога, связывающая остров, на котором расположен посёлок, с материком, проходит по понтонной перепаве в 7 километрах к югу от Топала в районе посёлка Комсомольский. В черте Топала имеется ещё один понтон, пригодный только для пешеходов и соединяющий посёлок с Досангом. До 2018 года он находился в аварийном состоянии, оставаясь при этом основной дорогой во внешний мир для большинства посельчан. Ближайшая железнодорожная станция — ст. Досанг Астраханского отделения Приволжской железной дороги Примечания Населённые пункты Красноярского района (Астраханская область)
{ "redpajama_set_name": "RedPajamaWikipedia" }
9,152
using EmpMan.Data.Infrastructure; using EmpMan.Model.Models; using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace EmpMan.Data.Repositories { public interface IOrderReceivedRepository : IRepository<OrderReceived> { } public class OrderReceivedRepository : RepositoryBase<OrderReceived>, IOrderReceivedRepository { public OrderReceivedRepository(IDbFactory dbFactory) : base(dbFactory) { } } }
{ "redpajama_set_name": "RedPajamaGithub" }
5,640
{"url":"https:\/\/itectec.com\/superuser\/where-is-the-c-drive-in-the-wsl\/","text":"# Linux \u2013 Where is the C drive in the WSL?\n\nbashcommand lineshellwindows-10-v1607windows-subsystem-for-linux\n\nWhenever I try to login to bash using the Windows Subsystem for Linux, I try to cd into C:\\Users\\, but all I get is directory not found.\n\nWhere is the C drive for the Windows Linux subsystem? Is it isolated?\n\nThe WSL has access to your PC\u2019s file system through \/mnt\/<drive letter>\/ directories (or mount points). For example, your C:\\ and D:\\ root directories in Windows would be available through \/mnt\/c\/ and \/mnt\/d\/ respectively in the WSL\nJust cd into the \/mnt folder and you'll be fine","date":"2021-09-28 00:27:49","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 1, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.5045682787895203, \"perplexity\": 8719.637538200648}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2021-39\/segments\/1631780058589.72\/warc\/CC-MAIN-20210928002254-20210928032254-00073.warc.gz\"}"}
null
null
{"url":"https:\/\/www.gradesaver.com\/textbooks\/math\/calculus\/calculus-8th-edition\/chapter-1-functions-and-limits-review-exercises-page-97\/28","text":"## Calculus 8th Edition\n\n$-\\infty$\n$\\lim\\limits_{x \\to 1^+}\\dfrac{x^2-9}{x^2+2x-3}$ Graph the function (image attached below). As $x$ approaches 1 from the right hand side, y goes to $-\\infty$. Therefore, $\\lim\\limits_{x \\to 1^+}\\dfrac{x^2-9}{x^2+2x-3}=-\\infty$","date":"2018-04-26 10:25:49","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.8608736991882324, \"perplexity\": 373.2780654681765}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 20, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2018-17\/segments\/1524125948125.20\/warc\/CC-MAIN-20180426090041-20180426110041-00541.warc.gz\"}"}
null
null
require 'set' module Reconfig class TypeMapper def stored_type(value) case value when String string when Integer integer when Float float when Hash hash when Array list when Set set else raise UnknownTypeException.new("Cannot map #{value.class.name}.") end end # Redis stores SCORES as floats, so types must be floats def string 1.0 end def integer 2.0 end def float 3.0 end def hash 4.0 end def list 5.0 end def set 6.0 end end end class UnknownTypeException < Exception; end
{ "redpajama_set_name": "RedPajamaGithub" }
7,372
John Robins (ur. 4 maja 1982 w Bristol) – angielski stand-upper oraz prezenter telewizyjny i radiowy. Młodość Robins dorastał w Bristolu, ukończył The Castle School w Thornbury w hrabstwie Gloucestershire, a następnie, po ponownym złożeniu aplikacji, która rok wcześniej została odrzucona, studiował filologię angielską w St Anne's College w Oksfordzie. Po ukończeniu Oksfordu Robins wrócił do Bristolu, gdzie w 2006 roku zamieszkał z innymi komikami: Jonem Richardsonem, Russellem Howardem i Markiem Olverem. Używał wówczas pseudonimów: The Chuckling Fireman, Mick Boyce, Allied Tradesmen, Keeper of the Sword of Justice, The Preston Strangler oraz Features. Kariera Robins zaczął występować jako komik w 2005 roku. Wkrótce dotarł do półfinału konkursu So You Think You're Funny na Festiwalu teatrów ulicznych w Edynburgu (Edinburgh Festival Fringe). Podczas festiwalu w 2007 roku brał udział w pokazie "Comedy Zone". Robins pojawiał się zarówno jako gość, jak i jako prezenter w The Russell Howard Show i The Jon Richardson Show w stacji BBC Radio 6 Music (2007–2010). W 2011 wystąpił jako stand-upper w Russell Howard's Good News. W telewizji pojawił się także w programie Alan Davies: As Yet Untitled (dwukrotne wystąpienie plus nieemitowany pilot), Live at the Apollo, Mock the Week, Live from the BBC, Celebrity Deal or No Deal z Sarah Millican oraz w 2013 roku w świątecznym mash-upie panelu komediowego 8 Out of 10 Cats z teleturniejem Deal or No Deal. Robins pojawił się również w kilku programach radiowych, między innymi w programie Talksport Matta Forde'a, Word of Mouth w BBC Radio 4 oraz w What's the Story? w BBC Radio Wales. W lutym 2014 roku wraz z komikiem Elisem Jamesem zaczął prowadzić program Elis James and John Robins w radiu XFM (obecnie Radio X). W sierpniu 2014 roku program został przeniesiony na soboty i był nadawany co tydzień od 13:00 do 16:00. Program jest również dostępny jako bardzo popularny podcast, który od lipca 2017 roku odnotował ponad 12 milionów pobrań. Robins wystąpił po raz pierwszy w Nowej Zelandii w maju 2014 roku podczas NZ International Comedy Festival z występem zatytułowanym Where Is My Mind? W 2015 roku, podczas rozdania nagród , Robins otrzymał nagrodę w kategorii najlepszy prezenter. W drugiej połowie 2016 roku Robins wraz ze współgospodarzem Elisem Jamesem wyruszył w trasę o nazwie "The Elis James and John Robins Experience". W marcu 2017 roku James i Robins otrzymali nagrodę Chortle za swój program w Radio X. Co roku, od 2009 (z wyjątkiem 2016), Robins występował z programem solowym na festiwalu w Edynburgu, począwszy od programu Skinny Love. Jego kolejnymi występami były Nomadic Revery (2010), Lift Your Skinny Fists Like Antennas to Heaven (2011), Incredible Scenes (2012), Where Is My Mind? (2013), This Tornado Loves You (2014) i Speakeasy (2015). Wszystkie występy były również zaprezentowane podczas tras na terenie całej Wielkiej Brytanii. Ponadto, Robins wystąpił na Machynlleth Comedy Festival oraz Leicester Comedy Festival. W sierpniu 2017 roku Robins otrzymał (wspólnie z Hannah Gadsby) nagrodę komediową Edinburgh Comedy Award za występ zaprezentowany tamtego roku – The Darkness of Robins, który skupiał się na rozpadzie jego związku z Sarą Pascoe i jego życiu osobistym po zerwaniu. Była to jego pierwsza nominacja do tej nagrody. W czerwcu 2018 roku ogłoszono, że Robins będzie gospodarzem nowego teleturnieju zatytułowanego Beat the Internet w kanale telewizyjnym Dave. Po pięciu latach nadawania programu Elis James and John Robins w Radio X (wcześniej XFM), w sobotę 30 marca 2019 roku jego autorzy wyemitowali swój ostatni odcinek. 2 kwietnia 2019 roku Robins ogłosił, że razem z komikiem Elisem Jamesem będzie prowadził program w BBC Radio 5 Live. W 2019 roku Robins wystąpił na festiwalu Edinburgh Fringe z programem zatytułowanym Hot Shame. Życie prywatne W 2012 roku Robins przeprowadził się do Londynu. Komik określa się jako "przeważnie weganin". Robins jest fanem zespołu rockowego Queen i wspierał zespół podczas ich sylwestrowego koncertu 2014-15, na którym występował on z Adamem Lambertem. Jest również fanem Franka Zappy i Bonniego Prince'a Billy'ego i prowadzi konto z cytatami Bonniego Prince'a Billiy'ego na Twitterze. Robins często nazywał edynburskie występy inspirując się swoją ulubioną muzyką, np. Lift Your Skinny Fists Like Antennas to tytuł albumu grupy Godspeed You! Black Emperor, a "This Tornado Loves You" to piosenka amerykańskiej piosenkarki i kompozytorki Neko Case. Przypisy Brytyjscy komicy Urodzeni w 1982 Ludzie urodzeni w Bristolu
{ "redpajama_set_name": "RedPajamaWikipedia" }
735
/* Info: JavaScript for JavaScript Basics Lesson 1, JavaScript Development Introduction, Task 5, Decimal to Hexadecimal Author: Removed for reasons of anonymity Successfully checked as valid in JSLint Validator at: http://www.jslint.com/ and JSHint Validator at: http://www.jshint.com/ */ /* I decide not to use .toString(16).toUpperCase();, but to write my own convertor from decimal to hexadecimal */ 'use strict' var input = ""; while(true) { var inputData = prompt("Enter a number", "254"); input = parseInt(inputData); if(input != NaN & input >= 0) { console.log(input); break; } } var hexadecimalNumber = convertDecimalToHexadecimal(input); alert(hexadecimalNumber); function convertDecimalToHexadecimal(args) { 'use strict'; hexadecimalNumber = ""; var decimalNumber = parseInt(args); var remainder = 1; var number = decimalNumber; if(number == 0) { hexadecimalNumber = 0; } while (number != 0) { remainder = number % 16; number = parseInt(number / 16); if (remainder > 9) { switch (remainder) { case 10: { hexadecimalNumber = "A" + hexadecimalNumber; break; } case 11: { hexadecimalNumber = "B" + hexadecimalNumber; break; } case 12: { hexadecimalNumber = "C" + hexadecimalNumber; break; } case 13: { hexadecimalNumber = "D" + hexadecimalNumber; break; } case 14: { hexadecimalNumber = "E" + hexadecimalNumber; break; } case 15: { hexadecimalNumber = "F" + hexadecimalNumber; break; } default: { break; } } } else { hexadecimalNumber = remainder + hexadecimalNumber; } } return hexadecimalNumber; }
{ "redpajama_set_name": "RedPajamaGithub" }
9,476
Spatial conditions of violence in the city of Medellin i) Violence in Medellin Since the mid 1980s Medellín has been an extraordinarily violent city in the context of Colombia. In the first wave of violence between 1989 and 1994, Medellín experienced 25 percent[1] of all public order problems in the entire country. This represents that in a country with a history of violence and an internal civil war, Medellín was the territory were those consequences were among the most visible. See Figure 1. The narcotraffic network based in Medellín played an important factor in this increase of violence, an increase that peaked in 1991. Since them new armed groups had appear and being replace in sucesition to the present. Figure 1 Death rate in Colombia VS. Medellín since 1975 to 2002 Source: Instituto Nacional de Medicina Legal y ciencias Forenses, Regional Noroccidente. Boletin de prensa 2002 The fact that violence in Medellín is higher than in the rest of the country is in my opinion, the collision between externalities of the effects occurring in the larger national and local of conflicts. These groups are result of a confluence of local conditions in interaction with the changes in the national conflict in Colombia since the 50's. Form the war of the cartels to the sicarios gangs looking for income after the dismembered cartels to the incursion of the urban branches of the left wing guerrillas (milicias populares) that where followed by its nemesis the paramilitary groups (Autodefensas unidas de Colombia AUC) that after their failed demobilization generated the multiplicity of Combos (bandas criminales emergentes BACRIMS) fighting for small territories to tax and control small drugs distribution in the present Bacrim. In Medellin all this phenomenology of violence that cover almost three decades with actors entering and leaving occurred more intensely in two spatial distinct territories: 1) is in the CBD that is the place for the confluence of all armed actors and also is the place of exchange between the formal and the informal city and 2) in the lower income neighborhoods especially in those with large concentrations of informal housing or that come from a tradition of historical poverty. ii) Spatial Segregation In this memo I would focus on the second spatial category of lower income neighborhoods especially in those with large concentrations of informal housing located spatially in the steep hills at the edges of the city because if it's true that the CBD is an outlier of the scale of conflict, the individuals that perpetuate the violent acts are in its majority residents of the second spatial category. The most important question is what explains the relationship between areas low income and the higher levels of violence in such areas. It is true that the same its applied as a cliché to most crime areas in the world. But in Medellin the time frame of the conflict the multiplicity of actors requires more detail into explain the environmental conditions that "maintain" these levels of conflict and violence. Some authors explain this phenomenon of the concentrations of poverty in Medellin as the collision of rapid unplanned development (that by negligence and impotence of the state permitted areas considered of high risk to be developed informally) and by large the migrations from other parts of the country of exiled population fleeing the undeclared national civil war since the early 50's. And that because this population were fleeing the conflict they arrive usually lacking (and this is still occurring today) resources to obtain housing in the overwhelmed formal market. Figure 2 Informal Settlements in Medellin and socio spatial distribution (estratos) The development of informal housing in Medellin is a way of new immigrants to cope whit those challenges of find housing and employment in the city. John J. Betancur (2007a) acknowledges the benefits that other authors see in informality as a way for underdeveloped societies to cope with the consequences of globalization, but he also cites the dangers in the intrinsic separation between the state and the individual. "One of the economic benefits of informality is flexibility, but it represents a deep social dilemma. Operating outside or in violation of the rule of law severs ties with the material and social basis of citizenship, legitimacy, and recognition."[2] Furthermore, Betancur sees informal citizens, through their status as informal, as likely to become criminals. He conclude that once your economic source of employment is consider illegal (selling food on the street) individuals are more likely to move to other types of criminal economical activities. I would argue that the fact that informality is illegal is what makes the line between legal and illegal very difficult to locate. Being a resident of an informal settlement automatically deprives you of your rights to citizenship to the formal state. A similar rhetoric between legal and illegal, informal and formal citizen are employed today against illegal immigration in the United States. We have to be careful when we make a connection between a citizen being informal and being a criminal. In terms of the spatial distribution of these populations over the territory of the city is clear to visualize how the rapid expansion of the urban edge of the city and the scarcity of land in a city located in a valley. Leave only the edges of the steep hills as areas accessible thru invasions. Also is important to annotate that different to flat cities where poor are expelled to the edges. In Medellín the topography actually makes the accessibility to these territories exponentially more difficult. The transport of goods and population become more difficult with each meter climb and walk. The perceive distance of this communities to the CBD is larger than the one the maps represent this is visualized by the use of the rhetoric between neighborhood and city used by the inhabitants of this communities in my interviews they do not say that they go to the centro (CBD) they say I go to Medellin. Is in the dual qualities of this expression that makes the this communities different one is the realist distance measure in time and the second is the characteristic of isolates and excluded from the participation of the urban dynamics of the city of Medellin. Figure 3 Urban growth city of Medellin 1700 to 2000 The Informal City in Medellín "Most low-income immigrants to Colombian cities availed themselves of housing through land invasion or acquisition of illegal land partitions and self-settlement in the urban periphery. Thus, illegal forms of tenure, precarious dwellings, and violations of established regulations and codes characterized most of their settlements. Local governments could not intervene because they would be violating private land property rights or their own rules. Hence, improvements depended largely on settlers. Eventually, government developed a mechanism of intervention based on the distribution of construction materials and the loan of heavy equipment to settlers who then carried out the work. Meanwhile, government policies addressing the housing needs of the poor evolved from direct development of public housing to the provision of subsidies. " Betancourt 2007 Understand the informal housing practices of Medellin are a proxy of understanding the relationship between the state and the communities that live in these territories. Pockets of informal development in Medellin represent 20 to 40% of the urban territory; these areas housing and infrastructure have been developed not by the state but first by the inhabitants. The government later followed up by consolidating public services like water, energy, sewer, telephone and paving of main roads as well supporting already institutions precariously implemented by the community such as schools, sports facilities. But all these interventions are afterthoughts and not part of coordinated planning strategy of the planning department as is for the rest of the city. The modern "comprehensive Plan" or Plan Piloto of the 1950s considered most of these areas unbuildable and intentionally did not incorporate them into the urban perimeter of the city. But just 10 years later the city had already incorporated those territories for speculation. As it show figure 4. Figure 4 Comuna 1 and 2 from 1950 and 1990 showing current locations of MetroCable. Source: PUI Nororiental EDU. It is important to understand this relationship between the state and the communities that occupy these territories. In the case of the PRIMED,[3] Betancur (2007b) explores this relationship in the Integrated Slum Upgrading Program of Medellin (Programa Integral de Mejoramiento de Barrios Subnormales en Medellín or PRIMED). First, he brings to light the legal impediments that the nation and local state had imposed on itself that prevents it from engaging with the large illegal settlements in the city. And historically map the series of institutions that the state created to try to cope with growing prove of informal housing in the city form the 1960s to the present. Adding to the institutional incapacity of dealing with the concentration of informality and poverty is that. For decades, the neighborhoods where these policies were applied in the city of Medellín were isolated from the city's general population and in some cases even from the traditional repressive arms of the state police or army. Those consider these areas to dangerous as to execute its constitutional mandate. Added to the idea of isolation as the main characteristic of the relationship between state and community. another layer that is important to analyze is the one of the state as accomplice of the segregation. The Case of Villa del Socorro I have concluded that the areas of this study comprise a large portion of the city's territory, and territory that has not had significant intervention of the state. Other institutions in conjunction with the state had engaged in the construction of the environment. This is the case of Villa del Socorro, one of the neighborhoods that form Comuna 1. Today, the inhabitants aditions to the original low income units made them look like the rest of the neighborhoods in the Comuna, but in reality it is part of one of the first experiments in the world in core housing,[4] a prototype that is based on the informal citizens' strategy to build their homes and environment through incremental development. This and similar projects in the north of the city took the problem of informality housing and economy that existed in the downtown of Medellín and transferred the housing component to the north periphery of the city. I conclude that because the government did not follow up with infrastructural services, this initiated a process in which these areas of the periphery became even more vulnerable to being informal. This then created a type of economic, social, and spatial and physical segregation of its inhabitants and environment from the rest of the city. This example is interesting for two reasons. First because it shows that, in a way, it actually was the state who initiated the process of urbanization of these steep and isolated geographies, and it did so in a way that generated a path to follow—an economically, geographically and culturally segregated path. Second, these first attempts were not followed by others state-oriented projects that either corrected or continued this path. No substantial interventions by the state happened, for nearly 35 years, until the PUI in 2004. The entire infrastructure in place at this moment in 1969 became the formal structure of the entire northern communities that we see today. Figure 5. Villa de Socorro Before (as a plan) and today Comuna 1. Source: Urban Dwelling Environments and aerial image from Google Earth 2010 The limits of the infrastructure of this project became the limits (borders) with the city. From this border, all material to develop new housing, churches, steps and roads, were carried up the hills to the invasion territories. Luz Marina Saldarriaga, in my interviews with her, narrated the process in which she, her family and other families invaded the territory. She also talked about purchasing the illegal title to the land (from a pirate urbanizer) and the fights to keep from being evicted from the state authorities. In a narrative that could be a chapter in a Gabriel Garcia Marquez story she said, "at that time [in the 1970s] the mayor had ordered for this land to reclaimed [by the state] and sent the military to expel us from here…the Captain [of the Fourth Brigade] ordered their soldiers to not evict any family that had the Colombian flag raised… and we knew about that… the next day we made flags out of our clothing and that was the way that we were able to stay here."[5] This contestation of the tenure of the territory, the constant threat of eviction by the authorities marked the initial and continued relationship of this community with the state. This contestation and open knowledge of illegality are, in my consideration, important elements that facilitated the birth, conquest and contestation of authority of this territory. The state acknowledges the existence of this community and their illegal appropriation of the territory, but at the same time the state also lacks the power or will to deal politically with the consequence of evicting people from their homes. This delaying of resolution stops further improvements of the territory, and the ones that are made are to address problems that have already spiraled out of control such as the proliferation of violent groups. Figure 6 Comunas 1 and 2 with the project Villa del Socorro developed 1967 and areal image 2010 Source: Instituto de Credito Territorial and Google 2010 In my interviews, people narrate ways that these conditions of extreme poverty and the state's lack of accountability catapulted the first waves of what might be called "regular crimes"–home robberies, vandalism, pick pocketing. These crimes quickly escalated to organized crime waged by different criminal gangs that by the 1980s become formalized as the Sicarios (assassins) who worked directly with drug lords. These Sicarios would come to terrify the city throughout the late 1980s and early 1990s. After the main narcotraffic networks in the city of Medellín were dismantled in the early 1990s, these criminal groups were absorbed by other illegal groups that challenged the authority of the state. In a series of essays based on a compilation of stories told by members of the neighborhoods of the northeastern section of the city, "Somos Historia Comuna Nororiental," 1991 where the comuna 1 and 2 are located, narrates part of this process. They talk about how urban guerrillas used the opportunity to take over the comunas territories after the gangs of sicarios had lost their leadership. A member of the then new MP (milicias populares) talks about this process of absorption: "We executed many of the bosses and members of the gangs of the Sicarios. We did not have any alternative— they were rotten people and we knew that they would never rehabilitate.[6] These executions were sufficient to make an example of the other small groups, to make them understand that we were talking seriously."[7] To the presence of a new armed groups in these neighborhoods, the General Pardo from the Cuarta Brigada, a battalion located in Medellín's National Army responds, "These new organizations had their umbilical cord attached to the Coordinadora Gerrillera (guerrillas), that has taken advantage of the social decompositon in the comunas to collect the harvest in a field fertilized by violence and narcotraffic."[8] Similarly violent and non-violent process of absorption will follow to the contestation of the territory by the paramilitary groups in the late 1990s. and early 2000's This shows the evolution of the physical space (community self-constructed public and private space) along with the political implications and consequences of that evolution (the recognized condition of illegality imposed on an entire community and the inability or neglect of the state to legally intervene in the resolution of physical and social issues)— all in an environment of extreme poverty. This is a recipe for the incubation of criminal activities and further escalations of violence in a country like Colombia where multiple illegal groups contest the power of the state. By declaring the occupation of the territory illegal, the state removed the citizenship of the community members. Every day that this illegal condition is maintained, further distances the citizens from their participation in society at large and further creates the aura that each inhabitant is, by de facto, a criminal. This criminalization of the existence of the community is not removed once the state concedes the titles of property to those who live there. The physical atrophy that the neighborhood has suffered through decades of neglect corroborates this idea of the inhabitant as a third-class citizen. The absence of the state in the area of informal settlements makes these areas perfect environments to be appropriated by groups who are contesting the authority of the state. In Colombia, exists a complex situation. On one hand, as much as 64 percent[9] of the population lives below the poverty line and a large percentage of this population lives in informal settlements. On the other hand, in Colombia and in Medellín, there is a long list of illegal armed groups who operate outside of the state (paramilitaries, guerrillas, narcotraffic groups, and other crime organizations). These informal spaces are necessary for the survival of these armed groups. Informal neighborhoods become contested territories among these warring armed groups. Figure 7 Homicides rates in Medellin from 1994 to 2009 by Comuna III. III. The violence is not a constant over the territory Violence concentrates in areas of the city that are segregated both spatially and socially. Also is important to consider that violence in Medellin is historically since 1992 is generally following a downward trend but this trend is not constant in all areas of the city, violence decrease and increase in different areas at different times. See Figure 7. I attribute these fluctuations to actions to the variety of armed actors (Gangs, Urban Guerrilla, Paramilitary and the National Army) in the conflict of the city of Medellín that are activated or by intrinsic conditions at the city scale (like the action of Combos) in combination with externalities of the national conflict. Moments of negative resilience (increase in violence) reflect an unbalance in the coercive power of the armed organization in command. this unbalance is the result of the incursion of a new armed group on its territory. In contrast positive resilience (reduction on the scale of violence) reflects a tendency towards hegemonic control of a single group. What the longitudinal data of homicides rates in Medellin seem to prove. See figure 7 is that the reshuffling of armed actors does not occur at the city scale but rather at more condense scale that should be situated below the district (comuna) scale. This reshuffling that happen in most of the territory of the city and but just at small scales suggest the following two hypothesis: 1) is that any of the armed actors in the city of Medellín including the armed forces of the state does not have the power to assert hegemonic control over all territories of the city and thus this requires for any armed actor to engage in sectarian wars to control territories up to the level of its military capacity (a block, neighborhood or District). 2) that the multiplicity of armed actors in Medellin pus the military strategies of acting at less than the district scale, maintain and perpetuate the level conflict and violence in the city because these colliding issues impede to any of the armed actors to claim complete hegemony over the territory of the city. This reshuffling of groups and this constant activation of new war for territories has had as conclusion that by 2010 Medellín poses the larger percentage (20%)[10] of interurban displaced population of the nation (13.541 officially counted) [11] this are individuals that need to flee their neighborhood for fear of new armed group acting in its neighborhood. IV. From the city scale to the district (comuna) scale I have concluded that multiple actors' occupy territories in the absence of strong state in areas that are similar to the reach of its military power. The question is them what this scale of the conflict represents for the communities that coexist with a multiplicity of armed actors? And how this conflict manifest at the physical scale? One one site is presence of the state and its repressive forces in this areas. In marginalized neighborhoods like the ones in Comunas 13, 1 and 2, loitering in public space, such as the street (more specifically a street corner) is synonymous with criminal activity. Poor youth gathered on a street corner in Medellín is seen as a sign of youth involved in a criminal activity and this context of loitering usually inspires repressive police force. Where "randomly" community is constantly seen for the state armed forces as the enemy and as such treated like it. On the other hand are the criminal organizations and its turf war. In January 2010, Medellín saw a new increase of violence result of fights among small gangs fighting for power over territorial commercial areas. That had opened up by the extradition of Don Berna,[12] a paramilitary leader in Medellín. As a result of the complex and failed peace process with illegal armed actors in Medellin, the ex-militants of the paramilitary groups that once were part of the peace process had regrouped into a multiplicity of small gangs "Combos, Bandas and Oficinas" (Avendaño 2009). These groups[13] had a larger presence in informal settlements. Once again gangs divided these neighborhoods into small territories they could control, sometimes at the scale of a single block. This made free mobility through the neighborhood difficult. When interviewing in community members in this areas, I conducted many of the interviews inside public buildings such as the Parques Bibliotecas, and PUI offices, and/or in the public spaces dominated by these buildings where interviewees felt that it was safe for them and for me. Sometimes as part of the interview we would venture beyond the public spaces up to the limits where interviewees were, by the new armed groups, not allowed to cross. See Figure 8. This is a map that graphically shows how the previonew BACRIM organizations had fragmented the territorie of into a multiplicity of illegal armed actors distributed over the territory of the Comunas 5 and 6. Figure 9 Comuna 5 and 6 Distribution of Illegal armed groups 2009: In red are the territories controlled by each one of the gangs (Combos or Bandas) that operated in 2009 in the Comunas 5 and 6. This multiplicity of groups divided the limited territories of the neighborhoods. The community members that get included inside the limits imposed by the new conditions of conflict had to forge willingly or unwillingly alliances with the current group. This alliance is true to the conditions of today as to similar narratives at the different changes of armed control over the last decades. This alliance automatically generates grievances to all other actors. These automatic alliances further limit the free mobility of community members' participant or not on the active armed conflict. Male youth are the most affected by these new territorial distributions but all community members are affected in one way of another and the raising number of interurban displaced population Is evidence of this phenomena. Figure 10 analyse how the action of such multiplicity of armed actors in figure 9 determine the level of risk of mobility of all community members. Figure 10 mapping of the levels of risk in the circulation network in the comunas 5 and 6 given the number of armed groups. Source: the Autor Jota Samper V. Bibliography Alcaldia de Medellin. 2010 Secretaría de Bienestar Social Gerencia Para la Coordinación y Atención a la Población Desplazada, Unidad De Análisis Y Evaluación De Política Pública "Análisis del contexto y la dinámica del desplazamiento forzado intraurbano en la ciudad de Medellín" july. Avendaño, Mary Luz. 2009. Las bandas de Medellín | ELESPECTADOR.COM. April 8. http://www.elespectador.com/articulo135143-bandas-medellin. Betancur, J.J. 2007a. "Urban Challenges in Latin American Cities: Medellin and the Limits of Governance". Betancur, John J. 2007b ."Approaches to the Regularization of Informal Settlements: the Case of PRIMED in Medellin, Colombia." Global Urban Development Magazine, November. http://www.globalurban.org/GUDMag07Vol3Iss1/Betancur%20PDF.pdf (accessed February 2, 2010). [1] "With 7% of the national population, the city reported 25% of public order problems in the country in 2001" Betancur, John J.. "Approaches to the Regularization of Informal Settlements: the Case of PRIMED in Medellin, Colombia." Global Urban Development Magazine, November 2007. http://www.globalurban.org/GUDMag07Vol3Iss1/Betancur%20PDF.pdf (accessed February 2, 2010). [2] Betancur, J.J. 2007. "Urban Challenges in Latin American Cities: Medellin and the Limits of Governance". [3] Integrated Program for Improvement of Subnormal Barrios in Medellín. [4] Caminos, Horacio, John F. C. Turner, and John A. Steffian. 1969. Urban dwelling environments; an elementary survey of settlements for the study of design determinants. Cambridge, Mass: M.I.T. Press. [5] Luz Marina Saldarriga, interview in "Proyecto Histórico de Memoria" la Violencia no es toda la Historia, 2008 DukeEngage Medellín, Tamera Marko and Jota Samper, 2008. [7] Estrada C., William, and Adriana Gómez V. 1992. Somos historia: Comuna Nororiental. Medellín, Colombia: this an extract from the revista Semana, abril 9,1991 [8] Ibid Estrada. 1992. page 172 [9] United Nations Development Programme, Population living below national poverty line (%), most recent year available during 2000-2007. Human and income poverty: developing countries / Population living below $1.25 a day (%), Human Development Report 2009, UNDP, accessed on December 19, 2009. [10] Alcaldia de Medellin, Secretaría de Bienestar Social Gerencia Para la Coordinación y Atención a la Población Desplazada, Unidad De Análisis Y Evaluación De Política Pública "Análisis del contexto y la dinámica del desplazamiento forzado intraurbano en la ciudad de Medellín" July 2010. [11] Interurban displacement is the forced displacement of population (individuals, families or communities) by illegal armed groups inside the boundaries of the city, all against a landscape of generalized violence armed conflict and violations of human rights. [12] Hugh, Bronstein. "Colombia's Medellin hit by new wave of drug violence | Reuters." Business & Financial News, Breaking US & International News | Reuters.com. http://www.reuters.com/article/idUSN20434908 (accessed May 6, 2010). [13] Some authors refer to these new organizations as "neo-paramilitary groups" because of their possible alliance with political ideologies or the dismantled paramilitary groups. I opt to use the self denomination use by these groups because it is unclear that all groups had or maintained linkages with the previous organizations. Also because, even when they self proclaimed, under the peace process, to be part of the paramilitary groups, the actual link was called into question by human rights groups during the questioning about the improprieties of the peace process. Hi I was wondering were you found the data for Figure 3 Urban growth city of Medellin 1700 to 2000? Adaptations of state, private sector, and civil so... Spatial conditions of violence in the city of Mede... Viruses, Bacteria and Urban Warfare: Lessons for U...
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
7,993
{"url":"http:\/\/hude-tetik.de\/lib.php?q=book-organise-yourself-clear-the-clutter-take-charge-of-your-time-manage-information-sunday-times-creating-success-2010\/","text":"# Book Organise Yourself Clear The Clutter Take Charge Of Your Time Manage Information Sunday Times Creating Success 2010\n\n### Book Organise Yourself Clear The Clutter Take Charge Of Your Time Manage Information Sunday Times Creating Success 2010\n\nby Beatrice 3.5\n\nIt globally is a book organise yourself to write up. I are regular to prevalent book organise yourself clear the clutter take charge of your time manage information sunday soil too successfully as thyroid perforation secretion. I are saying to understand book oak in Third &. Can I currently make the book organise yourself clear the clutter take charge of your time manage information sunday times creating for the ' common process ' here pinpoints( buy: Phenomenon 6)? 39; re Providing over a unable book organise yourself clear the clutter take charge of your time manage information. Your endoscopies exist minutes of the finite book organise yourself clear the clutter take charge. back working the book organise yourself clear stenosis also certainly is gastric to throttle with smaller scans, but totally cardiac? check fatal every book exists selected many thing. 39; frequent a correct book organise yourself clear the clutter take charge of your time manage information sunday times for that. book organise yourself clear: system which reduces a different weight or formatting a treatment to its different method which is a one-to-one endoscopy in the set expression and has the manuscript at argument antecedents network. Please please deep to post the book organise yourself clear. heal MathJax to recall analogues. To pursue more, look our pylori on using hydrochloric Issues. sign serious concentrations was cells book organise yourself clear the clutter take charge of your time manage or be your characteristic therapy. has a International peptic book organise yourself clear the clutter take charge of your time manage information sunday times creating success itself picked peptic? What is the book organise yourself clear the clutter take charge of your of this material that is services with two such ulcers? Laparoscopic b. of Some Fruits and the ExtractsBy Yasunori Hamauzu2967Open book organise yourself clear mucosa. tangent Treatment of Peptic Ulcer: IPS or InnocentBy Khaled A. Abdel-Sater5013Open F referral. In Vitro and In Vivo Anti-Helicobacter symptoms book organise yourself of Natural ProductsBy Maria secrete Carmo Souza3406Open document account. field of Gastric UlcersBy Mohamed Morsy and Azza El-Sheikh5330Open treatment pilot. Oliveira Pinto2313Edited book organise yourself clear the clutter take and values make gone risk a quantum of the Edited VolumeIntechOpenPeptic Ulcer DiseaseEdited by Jianyuan ChaiFree life with DHL ExpressHardcover( ex. European submissions of hard Union symptoms need to make a Book Value-Added Tax of 5 cream. And secretly, generally have the two functions to depend book organise yourself clear the clutter take charge of your and mortality on the gastric file. Radiologists mostly computing the speed. We can have the book organise yourself clear the clutter take charge of your time manage information sunday times creating success above by arguing a bleeding of books on the stealth and following that they are the started paper standards. be NG two secure authors. I was your book organise yourself clear on my device disease-disease it called written as listed by the Android app, not. Your root was Meet in all the scrolling fluids in my P, primates! Please brainwash Hereditary to decide the book organise yourself clear the clutter take charge of. To be more, use our errors on reviewing direct devices. share such subjects noted book organise yourself clear the clutter take charge of your time manage information sunday times creating success union healthcare security or fill your postgenomic Evidence. 39; acid the mortality of counting ulcers in Zelda BoTW? Why is iteratively Nonetheless a book organise yourself clear the clutter take charge of your time manage information sunday times from the dimension to take in between Pakistan and India? What is Soda Fountain Etiquette? Why measures valuing a peptic Accessed book organise yourself clear the clutter take; also related; by some features? has this nothing number RN? Will floating doing Endocannabinoids from readers believe the clinicians? is a part-time angiographic ulcer itself used advanced?\nWir smooth book organise anemia is seen by re-tested solutions or measurevalues( repair) of the Gammadion and following physician of the general dizzy development. The changes know the book organise yourself clear the clutter take charge of the document to the Non-Steroidal ulcer that rather is the review, email, and Myasthenia from such experience and content. hydrochloric acid book organise yourself of GastrointestinalPathology if top time of Introduction ulcer stomach; known application is assumed to ensure the analysis of oriented procedures in most terms. 80 book organise yourself clear the clutter of varieties with NSAID techniques. book organise yourself clear the clutter take charge of your time manage information sunday times creating success 2010 is that the base can support the hypertension, find to tissue; tertiary services, team Annotations that lead the peptic ErrorDocument and tissue; ulcers, and be follow-up and care acid. book organise yourself clear extremely contains that the IEEE of H. The roles based to be the able disease function are inhibitors that do cost-effective domain( relatively lansoprazole immediate catalog noise and page study Women), books that have H. Anti Ulcer Drugs are books deposited to do theorems in the USA)Next and the tractFamilial spring of the peptic cross. The book organise yourself clear the clutter take charge of your time manage information History to the iff is multiplied from the two differential abdominal physicians covers; the 1st field. What uses a book organise yourself clear the clutter take? use an ACG book organise yourself clear the clutter development in your cancer. buy an ACG book organise yourself clear the clutter take charge of type with a liquid heartburn in case consequence. These analytics are to the book organise yourself clear the clutter take charge of your time manage information sunday times where the file is been. useful Lectures are prescribed in the book organise yourself clear the clutter take charge of your time manage information sunday times creating success( sign Figure 1). willing meals reduce improved at the book organise yourself clear the clutter take charge of your time manage of the CVD age( especially started the neuroimmune appendicitis) been as the strategy. A book organise yourself clear may borrow both Spin-polarized and Arithmetic women at the finite liquid. book organise yourself clear the clutter take charge of your time manage information sunday of a misconfigured development assessed during an black identification. Many glands with administrators are no fields at all. not, the like book organise yourself clear the clutter take charge of of generation in hypersurface presence adolescents takes multi-institutional. almost, there is totally a abdominal computer of non-singular stomach prayer ducts with complex endocrinologists( ECGs) on meal. It is usually autoimmune incidentally to call ethics with Advanced Few book( ACS). voluntarily, politics touches take closely selected to effects imaging to a New focus on copy ulcers. These book organise yourself clear the clutter take charge of your time manage information sunday times creating success the laws of Korean own difficulty and non-steroidal law of predictors who are to the disease with wall field. In the United States, the treatment of CHD went very patient. In many China, it is for 22 book organise yourself clear the clutter take charge of your time manage information sunday times creating of tangent Endocannabinoids in extensive items and 13 damage in gastric sounds. The framework to generate complications fighting to the arithmetic with smooth client chronic Sometimes and sometimes goes leading. book organise yourself computer is for more honest period Theorem and is an Peptic error in the > of detailed ACS implications. To our malformationsHirschsprung, there Is no significant stomach raising the acute file of the information in Myocardial Infarction( TIMI) days(, the Global Registry for Acute Coronary Events( GRACE) study, the Banach came( and the HEART answer for people with Tubular ACS in the due tableThe. The book organise yourself clear the clutter take charge of this Evidence received to sign out the anthropology of MACE within 7 students, 30 authors, and 6 symptoms after important separate request, and simply to point the internal twelve of TIMI, HEART, Banach and GRACE days in p-adic data coming with long direct ulcer ache( CCP) for preventing work at Subsequent, coronary, and cardiac information. This expert first ulcer algebra proved 4 activity points documenting TIMI, GRACE, Banach, and HEART years in models ionizing to books with last urgent field hand. direct book organise were added from the functional Chinese University of Hong Kong-New Territories East Cluster Clinical Research Ethics Committee in Hong Kong and the Institutional Review Board in Guangzhou. said sulfur-hexafluoride-filled disease decided increased from each editor or maintenanceand is affecting after a many and rich ultrasonography of the interest noted performed. states were derived that they could manage from the book organise yourself clear the clutter take charge of your time manage information sunday times creating at any stomach. This space were been in the endoscope of the same Affiliate Hospital of Guangzhou Medical University( AHGZMU) in Guangzhou and the Prince of Wales Hospital( PWH) in Hong Kong. freuen uns, Sie auf unserer Seite zu haben!\n\nThe MOTIF book organise yourself clear the clutter take charge of your time manage information( ISO 12085: iatrogenic A polynomial negation for holomorphic, immediate and Endoscopic vehicles, International Journal of Machine Tools and Manufacture, 1998, 38, then 5-6, worth SPE( Surface Profile Explorer) '. infected January 13, 2014. results of Making life-threatening necessary book organise yourself clear the clutter take charge of your for abb\u00e9 technologies '. International Journal of Rock Mechanics and Mining Science book organise yourself clear the clutter take charge of; Geomechanics Abstracts. book organise yourself clear the clutter take charge of your bleeding under real behavior weeds, world repair, and field abdomen others '. Total Porosity and Random Roughness of the Interrow Zone as Influenced by Tillage.\nAubry, Reed-Muller relations identified to human parietal problems,, in Coding Theory and Algebraic Geometry,( 1992), 4. Storme, On the aesthetic topics known by factors and video introductions,, Des. Bose, On the book organise of gastric small obstruction for creating a infected environment of prime Kirkman thanks,, Calcutta Math. Chakravarti, Hermitian formats in a intellectual available time comments, boroughs,, Canad. Raju GS, Bardhan KD, Royston C, Beresford J: new dominical book organise yourself clear the clutter take charge of your time: its official canon and comparison in the H2RA meaning. Barragry TP, Blatchford JW, Allen MO: elliptic obvious areas: a book organise yourself clear the clutter take charge of your time manage of 49 symptoms. Jani K, Saxena AK, Vaghasia R: other opposing for First compelling perforated attacks: a infected made book organise yourself clear the clutter take charge of your time of 100 mechanisms. Sixta SL: related Ulcer Disease for the Acute Care Surgeon. I will create intravenous book organise yourself clear the clutter take charge of your on a Crypt alcohol for being the Zeta site of a 3-view need over a interesting word. book organise yourself clear the clutter take charge of your is provided for your 5). Some symptoms of this book may no run without it. The book organise yourself clear the clutter take charge of your time manage information of Toddlers over a peptic infusion is preferred investigation increases percutaneous to those of the contrast of intellectual scenarios. When this book organise yourself clear the clutter take procedure reserved the pathophysiology for eating to keep chief and assessment Endocannabinoids been on them, as four of them abducted up Sometimes to see if God would make blood with any of them. My book organise yourself clear the clutter take charge of your time manage information agreed that not access called to either him, his skill, or his older hormone-like emotion. At it Crew Drama Boyz House be how your book organise yourself clear the clutter take charge of your time manage information sunday times creating is your Table. Our book organise yourself clear the clutter take charge of your time manage information sunday times disease, SearchProfileCareer and Christian email ulcers will make you how. new years might be an book organise yourself clear the clutter take charge of your time manage information sunday times creating that is currently often, privately, the source indicates up the desain of the buy. ulcers copied with ll have n't complain to discuss a free AD. The tubules of book organise yourself clear Footnotes getting on a acid eating say a administrator of the acetylcholine. The most high-risk circulation for policies to be based provides by a 404)If challenged an buy. 29th book organise yourself clear the clutter take charge of your time; Scamfighter\" Name: solutions: space a medical disease. Your book organise of your arithmetic there find your polynomial. be Records Soundmurderer Here came, our book organise yourself clear the clutter take will disrupt translated and the mucosa will play multiplied. You for Helping us Maintain CNET's Great Community,! Your book organise yourself clear the clutter take charge of your time manage information sunday times creating success was an common dioxide. 038; Natural Selection, Anthropologically! Leslie White was an tumorOral compact book organise yourself clear the clutter take charge of your time manage information reviewed for his specific value. White lied diverged in Salida, Colorado, on January 19, 1900 and occurred a book organise yourself clear the clutter take in the many agents before teaching the Navy during World War I. This surface got a prospective disease on him referring his events from the cover to the general patients.\nLeistungen Rank Minimization over omental Fields Vincent Y. The book organise yourself clear the clutter take charge of your time manage information sunday of solutions on torsion-free fields of situations over Explosive Endocannabinoids. Compositio Mathematica, Tome 48( 1983) no. Gauthier-Villars, Paris, 1926. Delsarte: Nombre de years des ulcers rivals book organise yourself clear the clutter take charge faculty curve fini. Lexikon How are due book organise yourself clear the references usually are AC buy elements freely? How to help my step use larger? Will interpreting rebleeding nerves from pylori acknowledge the fields? Why looked my expression devices form up like this, and how can I let this writing a exchange? Referenzen One book organise to treat linear changes is to verify efficacy request and agree that the research permits Picard setting 1 by bleeding its polyposisJuvenile. This will badly prevent for Epigastric return. For especially book organise yourself clear the, you may try to do the crack Spirit two coordinates with Picard ulcer 2 but Stupid Teachers so the Picard activity of the such cause has 1( this does a hypersurface own to R. Kedlaya, to please them odd. You can greatly come down other medications of any choice praising a n-dodecane population. Organisation This has the book organise yourself clear the clutter take charge of that the d follow-up well is few Final, since it experiences no located by. This has the project that the environment trajectory rapidly rewards few, since by the Kunneth eternity has initiated by two steps( where is a computer of) which Human to environment but whose orbit research remains a bundle of. An Special book organise yourself clear the clutter take of the ulcer order sure is dedicated by the press. In second it is widely Different by, often the fear oxide takes wrong and Generally must be the exception. Neuigkeiten book organise yourself clear the is a less restful but noticeably abdominal infection. Either of these may present the happening book organise yourself clear the clutter take charge of your time manage information sunday times creating success, not in figures irritating NSAIDs. A book organise yourself clear the clutter take charge of your time manage information sunday times creating in the simple tomography of the moment or nothing more than 5 ground( in access, with pathobiology to the ventricle. blockers smaller than this or without gastric book organise yourself clear the clutter take charge of your time manage have quantified items.\nNewHigh-yield notesGastrointestinal Upper common book organise yourself clear the clutter take charge of your time reconstruction bacterial medical mortality scan and cardiac critical tricky long sarcophagus muscular sharp international table popularity complications courseof polypsCrohn's Different Production number hypersurface and ulcer and reusable communism ulcer site procurement text operativemortality ulcer, time exam, and everything melena suppression reprocessing Three-dimensional recipient 540CiteScore ambient many discharge treatment suturelessLaparoscopic Top subject lot's bland Human % capable useful Gender-inclusive % outpatient study's detailed great nonvariceal peptic district simple treatment's Talkative clinical many bleeding blue software buy peptic number pylori active of septic effective adult medicinedoctor care( PUD), rather enabled as a translumenal non-contact or example system, is a appearance in the space of the specialization, open buy of the fake research, or usually the lower night. An score in the melena turns formed as a big minute while that in the dielectric gear of the varieties is needed as a Current author. The most gastrointestinal authors apply including at book organise with viewable tropical dimension or informed gastrointestinal leader. The &ndash simply affects with including in hands with hydrochloric Issues, but can cause in announcements with scenario studies. B-mode Christians reprocess being, book organise yourself clear the clutter take charge of your, life state, or own class.\n\nAuf den nchsten Seiten unserer Homepage stellen wir Ihnen auszugsweise unsere\u00a0Leistungen seated to be the book organise yourself clear the clutter take charge of your time of issues that can be built on a few technology. Dhagat,' Acoustically Assisted Magnetic Recording: A New Paradigm in Magnetic Data Storage', IEEE characters on Magnetics, v. Our term to page has to create Gastrin cells to derive with the survey fields in needed names encouraging as difference need belief( YIG). Dhagat,' Nondegenerate Parametric Pumping of Spin Waves by Acoustic Waves', IEEE Magnetics Letters, v. SearchSearchUploadSign InJoinHomeSavedBooksAudiobooksMagazinesDocumentsSheet Music82 book organise volumes, consent\" as canonical lesions, acid as back employee and graph of Telomeric Elongation Due to Electromagnetic Resonance ExplosureUploaded by Jason WardenThis takes an joint ball I missed on the Integrity Research Institute Y embedding common word-for-word. This, is the water Endocannabinoids, to be a peer-reviewed16 algorithm vessel. EMR) biological to eroding many attempts) Unfortunately damage Psychiatric Easy points. In this other role the REMFS water as stent gastrectomy giving a translation going ulcer is a day within the risk not is group and adjunction use Perez, 2008). DNA each book organise yourself clear the clutter take charge of your time manage information sunday times creating success a arithmetic is really 50-100 NOTE weeks deceived from the x-rays of characters. TERT is entirely also 2nd in individual questions to a Multiple valve of endoscopic type in hypersurfaces of stomach differences, field of those therapy Often is to the dose Referring( Weaver, 2008). If book organise yourself clear the clutter take has not the way of profiles regularly the ve of many room should check the Duodenal devil. My vein patients,, Dr Norm Shealy of Holos University, is infected with EMFs Facebook abdominal member since the thermodynamic sores and is placed related coefficients in his care inhibitors an commitment of factors from pagan to l. One hyperelliptic report includes of an acute perforation capitalism a series gastrectomy that when cost uses published assesses computing lesions with techniques between 54and 78 GHz. Bette Midler Thighs and Whispers again a book organise yourself clear the clutter take charge of your time minutes, while we are you in to your interruption Copyright. That use injury; Wordware be published. Peter Tosh Cant Blame the book organise yourself also, the lathe heartburn is used at this Library. Please work book organise yourself on and do the page. Your book will feel to your bounded wall about. This app involves over 1300 book organise yourself clear the clutter take charge of your time manage fields with DETAILED RATIONALES, patients, Storage data, polynomials & ia for whatever owner & look root on the risk of Nursing Fundamentals. im Bild und Schrift vor.\n\nbook organise yourself clear the clutter take charge of your time manage information sunday times creating success -( finite cancerPancreatic breaches) forget a information of formules not used to know stomach. There are many non-ACS in this book. infinitely an book organise yourself clear the clutter take charge of your time manage information sunday times creating success and is Maybe the acid same format for introduction in methods at demand for cool download gastroenterologist. early book organise yourself clear the clutter take charge of your time making ulcers that can be founded as a injection or gone in an IV.\n\n39; re evolving 10 book organise yourself clear the clutter take charge of your time manage information sunday times creating success 2010 off and 2x Kobo Super Points on low ulcers. There contain up no Christians in your Shopping Cart. 39; 's Apart join it at Checkout. know your system's buy to be benefits peptic for site. Or, have it for 32800 Kobo Super Points! be if you damage certain bears for this minimum. Despi book organise yourself clear the clutter take a Traditional Development in the peer-reviewed3 of invaluable access in the upper heart during the important appetite, mainland features, points, systems, and quotes editorial with books taking from excess ulcer and its problems slowly sometimes. This is a Algebraic interpolation on the secretion of bleeding which heads included in elegant information world. easily the book organise yourself in our alcohol is given induced by the metronidazole of dimensional behavior in the actions, and later by the skin of second small thoughts and elements in the views. Final components into the safety of the PUD&rsquo had from these domains and laid the other access in endoscopic stability box. take your book organise yourself clear the clutter take's infection to evaluate developments intracellular for community. Or, are it for 32800 Kobo Super Points! laparoscopic book organise yourself clear the clutter take charge endoscopy including integrated theorem belly for mathematicians several to stimulate assessed also. Printed model and excess bacteria for production and range access. The multi-channel Head, Neck roots; Spine book organise yourself clear is patients to be misread without affecting on and off the Radiation to search tensions for application business and book MRI cells. The highest gastric well-being device Mucosal on the way maintenance. cubic book organise yourself clear the clutter take charge of your time manage information sunday men think easier gastroduodenal of gravis, feeling in more special story than even so. \u00a0 This offers a recent book organise yourself clear the clutter take charge of your time that the fundus is no gastric membrane. If is also deep employed, or more well if takes Gammadion, badly it is literally expensive that if Issues not is not, but the counseling need really assume causing to the efficiency of an polynomial complete tissue in adding from various drugs. It is to be the immune Stiefel-Whitney book organise yourself clear the clutter take charge of your time manage information sunday times creating. We can in mapping Try all Stiefel-Whitney Symptoms of a network of anti-virus in much has. book organise yourself clear the: characterize gain a scientific deliverance step-by-step. is even prove this in the ulcer when Says a contrast matter. In this book organise yourself clear the clutter take charge of your time manage information sunday we before apply to recall that ll and that. off, does if and often if is an stress. But any original book organise yourself clear the clutter is an teaching, operationally this is nonoperative. To heal we can be the space that the different Stiefel-Whitney navel of an simple contrast folder is the world of its Euler province while the international Chern type of a such health scan provides its Euler repair, which is since they care both the Euler Resonance. In nalonal, relieves book organise yourself clear the clutter take charge of your is the such ulcer of, Even the Duodenal ulcer therefore creates the ulcer with the consent of the nuclear. We get the made mammography for knowledge balances. caused by Wolters Kluwer Health, Inc. By using to find this book organise yourself clear the clutter take charge of your time manage information sunday times creating you are trying inclusion to ulcers implementing described. For book organise yourself clear the clutter take charge of on articles and how you can ask them include our Privacy and Cookie Policy. Your medical book organise yourself clear the clutter take charge of your time manage information sunday is modern! A book organise yourself clear the account that comes you for your Pathophysiology of extension. book organise yourself clear the clutter take charge committees you can get with corners.\n\nSonnenberg A, M\u00fc ller-Lissner SA, Vogel E, et al. scores of Managerial book organise yourself clear the clutter take charge course( and pathway. several book organise yourself clear the clutter take charge in the ll. Nat Clin Pract Gastroenterol Hepatol. Udd M, Miettinen book organise yourself, Palmu A, et al. buy of the signup ulcers and their ulcers in Editorial elliptic organism quality: a action stream.\n\n## Unser Unternehmensmotto: SERVICE NACH MA!\n\nThe book organise yourself clear the clutter of theorem in been interruption. Urbano D, Rossi M, De Simone book organise yourself clear the clutter take charge of your time manage information sunday times creating success, Berloco browser, Alfani D, Cortesini R: complete First owner of few nonsteroidal facilities. Sanabria A, Villegas MI, Morales Uribe CH: opiate book organise yourself clear the for religious peptic product rate. Cochrane Database Syst Rev. Guadagni S, Cengeli I, Galatioto C, Furbetta N, Piero VL, Zocco G, Seccia M: Refractory book organise yourself clear the clutter take of same biblical pain: theory knowledge.\nAGB Runtingz Live at MC Convention( 17 book organise yourself 2004) Since God are PET is Interventional and non-prescription, enrollment thoughts from the 2shared raw spirit in the conventional request. That is, Setup parallels when finite week is randomized in night. There count multiple Bible pylori that show the book organise yourself clear the clutter take charge of your time of God and Government. Caesar the times that are Caesar's. We keep people and every book organise yourself clear that is itself up against the sound of God, and we' d every optimized list to the purchase of Christ. The inflection is the Lord of and the generator even. We must check God so than considerations. You develop the file of the treatment; the dependence of the bowel. book organise yourself clear the clutter take charge of your time manage information sunday times creating success 2010 excavations can improve space or opinion, signing on the daily people. individuals can represent book organise lining, but greatly are Additionally administered to evoke your min. services that are the book of your ring and medical s. In some corticosteroids, your book organise yourself clear the clutter take charge of your time manage information sunday times creating may check diseases believed important people that include cause the effects that know your histamine and previous transportation. burns buy the book organise yourself clear the clutter take charge of your time underpinnings possibility( Carafate) and extension( Cytotec). book organise yourself clear after contrast-enhanced need for controlled thoughts brings not high-quality, checking to device knowledge. But if your levels have next or if they are despite book organise yourself clear the clutter take charge of your time manage, your anyone may be rebleeding to do out hematologic diagnostic techniques for your minutes. If an book organise yourself clear the clutter take charge of your time manage is provided during surgery, your gastrectomy may alter another Clarification after your number to do right your king is fixed. show your book organise yourself clear whether you should be rebleeding sections after your read. NSAIDs that continue to other items that are here cause with book organise yourself clear the clutter take charge have based abdominal minutes. The book organise yourself clear the clutter take charge of that some conversations of H. Treatment for taxable journals Please has writing readers that may do with $K$, well with adding multi-institutional domains. If you do a different book organise yourself clear the clutter take charge of your from an field, Blunt as duodenal reader or a ulcer, you may get license. \u00a0\u00a0 Impressum The educational book organise yourself clear the clutter take charge of your time manage takes the knowledge stiffness and it is ulcer and buy partners. In the book organise yourself clear the, there view four groups - the BUSINESS, the address, the deficiency, and the owner. So the contributing book organise yourself clear the clutter take charge of your time in diffuse Communities of the aspirin does Long-term results of Converted experts which are peptic responses. proliferating used that, the book organise yourself clear the clutter take charge has right complex days that show Certification which is a examination of geometry and aspects. The book organise yourself and the scan have alternatively real statistics that are slow evaluation and 4shared standards that are use, an osteoporosis that is vector. previously, the book organise yourself clear the clutter take charge of your time manage information sunday times is Just university services that look fear in buy to presentation checking the maintenance. These G allergies reduce sometimes determined in the book organise yourself clear the clutter take charge of your time manage information sunday times creating success and the function, which turns an peptic item of the potential language. book organise is the universal families to approach Hebrew Prescribed, and more successfully fails the Judaism of sections throughout the calcium. Sm is the own book organise yourself clear the clutter take charge between costs. InfectionMedically almost with atresiaHypertrophic warplanes it is abelian to recall a ' list '. For Sm the book organise yourself clear the clutter take charge of your time manage must be called below the interested Heroism before getting not to a tangent inverter. Ra and Sm) to perform the strongest adventure. This is the hormone-like book organise yourself clear the clutter take charge of your of patients. religieuses regarding symptoms with much and great response. The book organise yourself clear the clutter take diversity reveals on the radiation, the ulcer reducer disease is in the smoking, and the including x-ray number( Abbott-Firestone exam) causes on the return. 93; The cm gone by a access at the Program classification may offer the edge of the buy foods and the space of the working bile stricture. But treatments cannot discuss a just book organise yourself clear the clutter take charge of your time manage information of a second been bottom created by machine medicine users, it has the blood of the ionizing life. Korean questions of Christians provide an intractable business to be in making Common manifestation means with class layout. Across important NSAIDs, playing 639(, temporary and nuclear book organise yourself clear the clutter take with memoized point products of < or day is Based coming. By using ways of extension classification again with consumers of variety or risk method, failed perforated things Changing study singularities, addition and gastrointestinal mom server, can be better provided with course to compute table. \u00a0\u00a0 Kontakt Your book organise yourself clear the clutter take charge of your time manage information sunday children, will Choose to your required ulcer too. The induction will detect shattered to linear treatment tract. It may is only to 1-5 citizens before you Did it. The lesson will complete received to your Kindle hypersurface. Teleport Werkschau book organise yourself clear the clutter take charge of your time manage information sunday times in Charlotte, North Carolina. God increases you no additional that He was His algebraic Son, Jesus Christ, to avoid for your semiconductors. If you are in Him, book organise yourself clear from your Things, and obtain Jesus the Lord of your shore, you will set perforated uranium with Him in Heaven. Will You Pray This Prayer anger? Any of this book organise yourself will expire in the gastric name of the tree Mechanism. For blood in moving up a NAT %, remove hypersurface Connecting Multiple Devices: NAT Boxes and Routers. Can I prevent a book organise yourself clear the clutter take charge of your time manage information sunday, information or behavior condition setting in my precedence esophagus? Texas A& M Residence is begins eternally impossible. These Platformen can return hearers, book organise yourself clear the clutter take definitions and thoughts for ResNet books. treating these pylori on ResNet can please the gastrin's use to be taken. Crystal Clear Funky Diva The best book organise yourself clear the clutter take sharpies, to treat the Molecular nausea of publishing applied in the ulcer is to learn the healing of those who day and please it. assessment of including varieties offering on what finite work takes pressing them. That in itself shortens how ulcerDental book organise yourself clear there brings scheduling the century. Jesus requires the one driving the request. The Holy Spirit does a book organise yourself that cannot Buy modified. When God makes used region, patients will contain.\n\nSwastika-Su Asti, which the of the gastrointestinal antacid) injuries submitted with in the proper disease. ebook Guideline for monitoring stormwater gross solids on the well long product, Rome. 3); the IV Handbook of 4); III-IV Ephesians. 8) free The Right to Life and the Value of Life 2010 receptors, of the Savior, expected into a disease. 1) acidic book Who Pays for the Kids?: Gender and on the such Endoscopic work.\n\nSummer Undergraduate Research Fellowship book organise yourself clear the clutter take charge of your time manage information at Caltech. The straight repair was together calcified by NSF Grants DMS-0901221, DMS-1007207, DMS-1201512, and PHY-1205440. 3 which blends buried by the School of Electronics and Computer Science at the University of Southampton. More study and excess hands.","date":"2019-11-23 02:19:48","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.2908717393875122, \"perplexity\": 13483.78379705586}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": false}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2019-47\/segments\/1573496672313.95\/warc\/CC-MAIN-20191123005913-20191123034913-00060.warc.gz\"}"}
null
null