text
stringlengths
1
2.15M
meta
dict
// wxGUI.cpp #include "StdAfx.h" // For compilers that support precompilation, includes "wx/wx.h". #include "wx/wxprec.h" #ifdef __BORLANDC__ #pragma hdrstop #endif // for all others, include the necessary headers (this file is usually all you // need because it includes almost all "standard" wxWidgets headers) #ifndef WX_PRECOMP #include "wx/wx.h" #endif #define static const #include "../GUI/p7zip_32.xpm" #undef static #undef ACTIVATE_DIALOG_TESTS int Main1(int argc,TCHAR **argv); #include "Windows/Registry.h" using namespace NWindows; using namespace NRegistry; #include "Common/StringConvert.h" #include "Windows/FileDir.h" #include "Windows/Synchronization.h" #include "ExtractRes.h" #include "../Explorer/MyMessages.h" #include "ExtractGUI.h" #include "UpdateGUI.h" #include "BenchmarkDialog.h" #include "../FileManager/RegistryUtils.h" using namespace NWindows; using namespace NFile; #include "../FileManager/ProgramLocation.h" static LPCWSTR kHelpFileName = L"help/"; void ShowHelpWindow(HWND hwnd, LPCWSTR topicFile) { UString path; if (!::GetProgramFolderPath(path)) return; path += kHelpFileName; path += topicFile; printf("ShowHelpWindow(%p,%ls)=>%ls\n",hwnd,topicFile,(const wchar_t *)path); // HtmlHelp(hwnd, GetSystemString(path), HH_DISPLAY_TOPIC, NULL); wxString path2(path); wxLaunchDefaultBrowser(path2); } ////////////////////////////// TRIES /////////////////////////////////// #ifdef ACTIVATE_DIALOG_TESTS static void ErrorMessage(const wchar_t *message) { MessageBox(0,message, wxT("7-Zip GUI"),wxICON_ERROR); } #include "../FileManager/PasswordDialog.h" #include "../FileManager/MessagesDialog.h" #include "../FileManager/OverwriteDialog.h" #include "Windows/Thread.h" void myErrorMsg(const wchar_t *message) { MessageBox(0,message, wxT("Message"),wxICON_ERROR); } void testCMessagesDialog() { UStringVector Messages; Messages.Add(L"message 1"); Messages.Add(L"message 2"); Messages.Add(L"message 3"); Messages.Add(L"message 4"); Messages.Add(L"message 5"); Messages.Add(L"message 6"); Messages.Add(L"message 7"); Messages.Add(L"message 8"); Messages.Add(L"message 9"); CMessagesDialog messagesDialog; messagesDialog.Messages = &Messages; int ret = messagesDialog.Create( 0 ); // ParentWindow if (ret == IDOK) myErrorMsg(wxT("CMessagesDialog => IDOK")); else if (ret == IDCANCEL) myErrorMsg(wxT("CMessagesDialog => IDCANCEL")); else myErrorMsg(wxT("CMessagesDialog => ?")); } void testCOverwriteDialog() { SYSTEMTIME systemTime; GetSystemTime( &systemTime ); const wchar_t *existName = L"existName"; FILETIME data_existTime; FILETIME *existTime = &data_existTime ; UInt64 data_existSize = 1234; UInt64 *existSize = &data_existSize; const wchar_t *newName = L"newName"; FILETIME data_newTime; FILETIME *newTime = &data_newTime; UInt64 data_newSize = 45678; UInt64 *newSize = &data_newSize; Int32 data_answer=0; Int32 *answer = &data_answer; SystemTimeToFileTime( &systemTime , &data_existTime); SystemTimeToFileTime( &systemTime , &data_newTime); COverwriteDialog dialog; dialog.OldFileInfo.Time = *existTime; dialog.OldFileInfo.TimeIsDefined = true; // FIXME : look again at the sample ! dialog.OldFileInfo.SizeIsDefined = (existSize != NULL); if (dialog.OldFileInfo.SizeIsDefined) dialog.OldFileInfo.Size = *existSize; dialog.OldFileInfo.Name = existName; if (newTime == 0) dialog.NewFileInfo.TimeIsDefined = false; else { dialog.NewFileInfo.TimeIsDefined = true; dialog.NewFileInfo.Time = *newTime; } dialog.NewFileInfo.SizeIsDefined = (newSize != NULL); if (dialog.NewFileInfo.SizeIsDefined) dialog.NewFileInfo.Size = *newSize; dialog.NewFileInfo.Name = newName; /* NOverwriteDialog::NResult::EEnum writeAnswer = NOverwriteDialog::Execute(oldFileInfo, newFileInfo); */ INT_PTR writeAnswer = dialog.Create(NULL); // ParentWindow doesn't work with 7z switch(writeAnswer) { case IDCANCEL: myErrorMsg(wxT("COverwriteDialog => IDCANCEL")); break; case IDNO: myErrorMsg(wxT("COverwriteDialog => IDNO")); break; case IDC_BUTTON_OVERWRITE_NO_TO_ALL: myErrorMsg(wxT("COverwriteDialog => IDC_BUTTON_OVERWRITE_NO_TO_ALL")); break; case IDC_BUTTON_OVERWRITE_YES_TO_ALL:myErrorMsg(wxT("COverwriteDialog => IDC_BUTTON_OVERWRITE_YES_TO_ALL")); break; case IDC_BUTTON_OVERWRITE_AUTO_RENAME:myErrorMsg(wxT("COverwriteDialog => IDC_BUTTON_OVERWRITE_AUTO_RENAME")); break; case IDYES: myErrorMsg(wxT("COverwriteDialog => IDYES")); break; default: myErrorMsg(wxT("COverwriteDialog => default")); break; } } void testCPasswordDialog() { CPasswordDialog dialog; int ret = dialog.Create(0); if (ret == IDOK) { UString Password = dialog.Password; UString msg = wxT("CPasswordDialog => IDOK password=\""); msg += Password; msg += wxT("\""); myErrorMsg(msg); } else if (ret == IDCANCEL) myErrorMsg(wxT("CPasswordDialog => IDCANCEL")); else myErrorMsg(wxT("CPasswordDialog => ?")); } struct CThreadProgressDialog { CProgressDialog * ProgressDialog; static THREAD_FUNC_DECL MyThreadFunction(void *param) { ((CThreadProgressDialog *)param)->Result = ((CThreadProgressDialog *)param)->Process(); return 0; } HRESULT Result; HRESULT Process() { Sleep(1000); int total = 1000; ProgressDialog->ProgressSynch.SetTitleFileName(L"SetTitleFileName"); ProgressDialog->ProgressSynch.SetNumFilesTotal(100); ProgressDialog->ProgressSynch.SetNumFilesCur(1); ProgressDialog->ProgressSynch.SetProgress(total, 0); // ProgressDialog.ProgressSynch.SetRatioInfo(inSize, outSize); // ProgressDialog.ProgressSynch.SetCurrentFileName(name); ProgressDialog->ProgressSynch.SetPos(total/10); ProgressDialog->ProgressSynch.SetCurrentFileName(L"File1"); Sleep(1000); ProgressDialog->ProgressSynch.SetPos(total/2); ProgressDialog->ProgressSynch.SetCurrentFileName(L"File2"); Sleep(1000); ProgressDialog->ProgressSynch.SetPos(total); ProgressDialog->ProgressSynch.SetCurrentFileName(L"File3"); Sleep(1000); ProgressDialog->MyClose(); return 0; } }; void testCProgressDialog() { CProgressDialog ProgressDialog; CThreadProgressDialog benchmarker; benchmarker.ProgressDialog = &ProgressDialog; NWindows::CThread thread; thread.Create(CThreadProgressDialog::MyThreadFunction, &benchmarker); // void StartProgressDialog(const UString &title) int ret = ProgressDialog.Create(L"testCProgressDialog", 0); if (ret == IDOK) myErrorMsg(wxT("CProgressDialog => IDOK")); else if (ret == IDCANCEL) myErrorMsg(wxT("CProgressDialog => IDCANCEL")); else myErrorMsg(wxT("CProgressDialog => ?")); } void testDialog(int num) { NWindows::NControl::CModalDialog dialog; printf("Generic Dialog(%d)\n",num); int ret = dialog.Create(num, 0); if (ret == IDOK) myErrorMsg(wxT("Generic Dialog => IDOK")); else if (ret == IDCANCEL) myErrorMsg(wxT("Generic Dialog => IDCANCEL")); else myErrorMsg(wxT("Generic Dialog => ?")); } void testMessageBox() { int ret = MessageBoxW(0, L"test yes/no/cancel", L"7-Zip", MB_YESNOCANCEL | MB_ICONQUESTION | MB_TASKMODAL); if (ret == IDYES) myErrorMsg(wxT("MessageBoxW => IDYES")); else if (ret == IDNO) myErrorMsg(wxT("MessageBoxW => IDNO")); else if (ret == IDCANCEL) myErrorMsg(wxT("MessageBoxW => IDCANCEL")); else myErrorMsg(wxT("MessageBoxW => ?")); } static void testRegistry() { SaveRegLang(L"fr"); UString langFile; ReadRegLang(langFile); printf("testRegistry : -%ls-\n",(const wchar_t *)langFile); } int Main2(int argc,TCHAR **argv); int Main3(int argc,wxChar **argv) { testRegistry(); int num = -1; if (argc >=2 ) { num = argv[1][0] - L'0'; } printf("num=%d\n",num); switch(num) { case 0: { TCHAR **argv2 = (TCHAR **)calloc(argc,sizeof(*argv)); argv2[0] = argv[0]; for(int i = 2; i < argc; i++) argv2[i-1] = argv[i]; return Main2(argc-1,argv2); } // TODO Benchmark // TODO CCompressDialog // TODO CExtractDialog ? case 1 : testCMessagesDialog(); break; case 2 : testCOverwriteDialog(); break; case 3 : testCPasswordDialog(); break; case 4 : testCProgressDialog(); break; case 5 : testMessageBox(); break; case 9 : if (argc >= 3) { AString str = GetAnsiString(argv[2]); int num = atoi((const char*)str); testDialog(num); } else { printf("usage : 7zG 9 <windowID>\n"); } break; default : printf("usage : 7zG number\n"); }; return 0; } #endif // ACTIVATE_DIALOG_TESTS static const TCHAR *kCUBasePath = TEXT("Software/7-ZIP"); static const WCHAR *kLangValueName = L"Lang"; void SaveRegLang(const UString &langFile) { CKey key; key.Create(HKEY_CURRENT_USER, kCUBasePath); key.SetValue(kLangValueName, langFile); } void ReadRegLang(UString &langFile) { langFile.Empty(); CKey key; if (key.Open(HKEY_CURRENT_USER, kCUBasePath, KEY_READ) == ERROR_SUCCESS) key.QueryValue(kLangValueName, langFile); } ////////////////////////////////// #define NEED_NAME_WINDOWS_TO_UNIX #include "myPrivate.h" // global_use_utf16_conversion void mySplitCommandLineW(int numArguments, TCHAR **arguments,UStringVector &parts) { parts.Clear(); for(int ind=0;ind < numArguments; ind++) { UString tmp = arguments[ind]; // tmp.Trim(); " " is a valid filename ... if (!tmp.IsEmpty()) { parts.Add(tmp); // DEBUG printf("ARG %d : '%ls'\n",ind,(const wchar_t *)tmp); } } } // ---------------------------------------------------------------------------- // private classes // ---------------------------------------------------------------------------- // Define a new frame type class MyFrame: public wxFrame { public: // ctor MyFrame(wxFrame *frame, const wxString& title, int x, int y, int w, int h); // virtual ~MyFrame(); // operations void WriteText(const wxString& text) { m_txtctrl->WriteText(text); } protected: // callbacks void OnWorkerEvent(wxCommandEvent& event); private: // just some place to put our messages in wxTextCtrl *m_txtctrl; DECLARE_EVENT_TABLE() }; enum { WORKER_EVENT=100 // this one gets sent from the worker thread }; BEGIN_EVENT_TABLE(MyFrame, wxFrame) EVT_MENU(WORKER_EVENT, MyFrame::OnWorkerEvent) // EVT_IDLE(MyFrame::OnIdle) END_EVENT_TABLE() // My frame constructor MyFrame::MyFrame(wxFrame *frame, const wxString& title, int x, int y, int w, int h) : wxFrame(frame, wxID_ANY, title, wxPoint(x, y), wxSize(w, h)) { this->SetIcon(wxICON(p7zip_32)); #if wxUSE_STATUSBAR CreateStatusBar(2); #endif // wxUSE_STATUSBAR m_txtctrl = new wxTextCtrl(this, wxID_ANY, _T(""), wxPoint(0, 0), wxSize(0, 0), wxTE_MULTILINE | wxTE_READONLY); } void myCreateHandle(int n); wxWindow * g_window=0; void MyFrame::OnWorkerEvent(wxCommandEvent& event) { int n = event.GetInt(); myCreateHandle(n); } // Define a new application type, each program should derive a class from wxApp class MyApp : public wxApp { public: // override base class virtuals // ---------------------------- // this one is called on application startup and is a good place for the app // initialization (doing it here and not in the ctor allows to have an error // return: if OnInit() returns false, the application terminates) virtual bool OnInit(); }; // Create a new application object: this macro will allow wxWidgets to create // the application object during program execution (it's better than using a // static object for many reasons) and also implements the accessor function // wxGetApp() which will return the reference of the right type (i.e. MyApp and // not wxApp) IMPLEMENT_APP(MyApp) time_t g_T0 = 0; class MyThread : public wxThread { int _argc; TCHAR **_argv; public: MyThread(int argc,TCHAR **argv): wxThread(),_argc(argc), _argv(argv) {} // thread execution starts here virtual void *Entry() { #ifdef ACTIVATE_DIALOG_TESTS int ret = Main3(_argc,_argv); #else int ret = Main1(_argc,_argv); #endif exit(ret); } }; // 'Main program' equivalent: the program execution "starts" here bool MyApp::OnInit() { // don't parse the command-line options ! // : if ( !wxApp::OnInit() ) return false; { // define P7ZIP_HOME_DIR extern void my_windows_split_path(const AString &p_path, AString &dir , AString &base); static char p7zip_home_dir[MAX_PATH]; UString fullPath; NDirectory::MyGetFullPathName(wxApp::argv[0], fullPath); AString afullPath = GetAnsiString(fullPath); AString dir,name; my_windows_split_path(afullPath,dir,name); const char *dir2 = nameWindowToUnix((const char *)dir); snprintf(p7zip_home_dir,sizeof(p7zip_home_dir),"P7ZIP_HOME_DIR=%s/",dir2); p7zip_home_dir[sizeof(p7zip_home_dir)-1] = 0; putenv(p7zip_home_dir); // DEBUG printf("putenv(%s)\n",p7zip_home_dir); } global_use_utf16_conversion = 1; // UNICODE ! g_T0 = time(0); // DEBUG printf("MAIN Thread : 0x%lx\n",wxThread::GetCurrentId()); // Create the main frame window MyFrame *frame = new MyFrame((wxFrame *)NULL, _T("7-zip Main Window"), 50, 50, 450, 340); // Don't Show the frame ! // frame->Show(true); SetTopWindow(frame); g_window = frame; MyThread *thread = new MyThread(wxApp::argc,wxApp::argv); thread->Create(); // != wxTHREAD_NO_ERROR thread->Run(); // success: wxApp::OnRun() will be called which will enter the main message // loop and the application will run. If we returned false here, the // application would exit immediately. return true; } DWORD WINAPI GetTickCount(VOID) { static wxStopWatch sw; return sw.Time(); } ////////////////////////////////////////// #include "resource.h" #include "ExtractRes.h" static CStringTable g_stringTable[] = { /* resource.rc */ /***************/ { IDS_OPEN_TYPE_ALL_FILES, L"All Files" }, { IDS_METHOD_STORE, L"Store" }, { IDS_METHOD_NORMAL, L"Normal" }, { IDS_METHOD_MAXIMUM, L"Maximum" }, { IDS_METHOD_FAST, L"Fast" }, { IDS_METHOD_FASTEST, L"Fastest" }, { IDS_METHOD_ULTRA, L"Ultra" }, { IDS_COMPRESS_NON_SOLID, L"Non-solid" }, { IDS_COMPRESS_SOLID, L"Solid" }, { IDS_COMPRESS_UPDATE_MODE_ADD, L"Add and replace files" }, { IDS_COMPRESS_UPDATE_MODE_UPDATE, L"Update and add files" }, { IDS_COMPRESS_UPDATE_MODE_FRESH, L"Freshen existing files" }, { IDS_COMPRESS_UPDATE_MODE_SYNCHRONIZE, L"Synchronize files" }, { IDS_COMPRESS_SET_ARCHIVE_DIALOG_TITLE, L"Browse" }, { IDS_COMPRESS_INCORRECT_VOLUME_SIZE, L"Incorrect volume size" }, { IDS_COMPRESS_SPLIT_CONFIRM_MESSAGE, L"Specified volume size: {0} bytes.\nAre you sure you want to split archive into such volumes?" }, { IDS_PASSWORD_USE_ASCII, L"Use only English letters, numbers and special characters (!, #, $, ...) for password." }, { IDS_PASSWORD_PASSWORDS_DO_NOT_MATCH, L"Passwords do not match" }, { IDS_PASSWORD_IS_TOO_LONG, L"Password is too long" }, { IDS_PROGRESS_COMPRESSING, L"Compressing" }, { IDS_PROGRESS_TESTING, L"Testing" }, { IDS_MESSAGE_NO_ERRORS, L"There are no errors" }, { IDS_FILES_COLON, L"Files:" }, { IDS_FOLDERS_COLON, L"Folders:" }, { IDS_SIZE_COLON, L"Size:" }, { IDS_COMPRESSED_COLON, L"Compressed size:" }, { IDS_ARCHIVES_COLON, L"Archives:" }, /* Extract.rc */ /**************/ { IDS_CANNOT_CREATE_FOLDER , L"Cannot create folder '{0}'"}, { IDS_OPEN_IS_NOT_SUPORTED_ARCHIVE, L"File is not supported archive."}, { IDS_MESSAGES_DIALOG_EXTRACT_MESSAGE_CRC , L"CRC failed in '{0}'. File is broken."}, { IDS_MESSAGES_DIALOG_EXTRACT_MESSAGE_DATA_ERROR , L"Data error in '{0}'. File is broken"}, { IDS_MESSAGES_DIALOG_EXTRACT_MESSAGE_UNSUPPORTED_METHOD , L"Unsupported compression method for '{0}'."}, { IDS_MESSAGES_DIALOG_EXTRACT_MESSAGE_CRC_ENCRYPTED , L"CRC failed in encrypted file '{0}'. Wrong password?"}, { IDS_MESSAGES_DIALOG_EXTRACT_MESSAGE_DATA_ERROR_ENCRYPTED , L"Data error in encrypted file '{0}'. Wrong password?"}, { IDS_EXTRACT_SET_FOLDER , L"Specify a location for extracted files."}, { IDS_MESSAGES_DIALOG_EXTRACT_MESSAGE_CANNOT_OPEN_FILE, L"Can not open output file '{0}'."}, { IDS_PROGRESS_EXTRACTING, L"Extracting" }, { IDS_CANT_OPEN_ARCHIVE , L"Can not open file '{0}' as archive"}, { IDS_CANT_OPEN_ENCRYPTED_ARCHIVE , L"Can not open encrypted archive '{0}'. Wrong password?"}, { 0 , 0 } }; REGISTER_STRINGTABLE(g_stringTable)
{ "pile_set_name": "Github" }
Cd(ii)-MOF-IM: post-synthesis functionalization of a Cd(ii)-MOF as a triphase transfer catalyst. A robust and porous Cd(ii)-MOF based on a bent imidazole-bridged ligand was synthesized and post-synthetically functionalized with linear alkyl chains to afford imidazolium salt (IM)-type triphase transfer catalysts for organic transformations. The imidazolium salt decorated Cd(ii)-MOF-IM exhibits typical solid phase transfer catalytic behavior for the azidation and thiolation of bromoalkane between aqueous/organic phases. Moreover, they can be easily recovered and reused under the PTC conditions. Cd(ii)-MOF-IM herein created a versatile family of solid phase transfer catalysts for promoting a broad scope of reactions carried out in a biphasic mixture of two immiscible solvents.
{ "pile_set_name": "PubMed Abstracts" }
I got your voice mail message and I understand your concerns around Todd's time. I think we have it backwards for James to dictate how and when Todd's help would be needed. I think that we can review the "to do" list from James and get back with him on how much of that we can help with, who would do it, what the timing would be, how we might suggest organizing and prioritizing the work, etc. As I said originally, I knew that Todd would not be interested (nor available) for a month's worth of work. Todd's tendency (remember this with weather, too) is to think that he has to do all of the work himself. He has had enough experience that his organization, guidance and leadership can be very valuable without his doing all of the work himself. If James' list looks long and involved, how could Todd lead these efforts, what kind of team might he ask James to provide, what additional short-term resources might we be able to tap into in Houston, etc. We have to think a little out of the box to offer a potential solution to James. If you are willing, I would be glad to explore this with Todd and challenge him to think about how he could provide leadership to the effort, but not do all of the work. I want him to think like a consultant on these projects, not as the contracted hired help! Let me know what you think. --Sally Brent A Price 08/17/2000 03:55 AM To: Sally Beck/HOU/ECT@ECT cc: Subject: Re: Todd Hall I don't see any huge problems with Todd helping out Continental Power. He needs to spend some time in Oslo anyways reviewing the weather business there. I agree that one month might be unreasonable and would look to Todd to determine the proper timeframe. I will talk to Todd and Mike Jordan regarding the best way to proceed with this. Enron North America Corp. From: Sally Beck 08/16/2000 05:35 PM To: Brent A Price/HOU/ECT@ECT cc: Subject: Todd Hall As we said before, I would like to be able to utilize Todd Hall for special projects from time to time. I had mentioned to Mike Jordan that once Todd completed his CommodityLogic role that I thought that we could take some of his time to follow up on Continental Power issues, which could fit nicely with some work he could do on Weather in Europe. I don't know about the one month timeframe. I would prefer that Todd review the initiatives and determine the best way to proceed. It might mean a couple of weeks over there for starters, and then a follow up, shorter trip a few weeks later. I don't think that Todd is keen on any elongated stays in Europe, particularly without the family. Can we still use Todd ? ---------------------- Forwarded by Sally Beck/HOU/ECT on 08/16/2000 05:29 PM --------------------------- James New 08/16/2000 07:54 AM To: Sally Beck/HOU/ECT@ECT cc: Mike Jordan/LON/ECT@ECT, Brent A Price/HOU/ECT@ECT Subject: Todd Hall Sally, As you are probably aware the Continental Power book split project went well but there were a number of areas which were not completed for one reason or another but essentially IT did not deliver according to the agreed timetable. Todd was instrumental in helping me to drive this forward and was responsible for much of what we achieved. As you may also be aware we had some personnel issues and there have been significant changes in the Continental Risk Management team. We have now recruited a strong manager in Coralie Evans from CSFB and she starts on the 29th of August. We have also added Michelle Waldhauser from the Houston Gas Risk Management team who arrived on the 7th of August. There are a number of issues and developments which would greatly benefit from having Todd here in London for around a month (I say around a month as I am guessing within this period he would want to spend some time in Oslo pushing the weather Risk Management area forward). Attached is a page I put together that cover our immediate needs and why Todd is ideally placed to potentially add an enormous amount of value to the Continental Power business. Could we please get together with Brent Price and Mike Jordan and discuss whether Todd would be available come over. Thanks and regards James
{ "pile_set_name": "Enron Emails" }
Jyotiraditya Scindia said that in the next review session expected to be held after two weeks (File) AICC General Secretary in charge of western Uttar Pradesh, Jyotiraditya Scindia, said here on Friday that strengthening the Congress organisational structure was the top priority of the party in UP. He briefly spoke to reporters at the Uttar Pradesh Congress Committee (UPCC) headquarters after a six-and-a-half hour long marathon review meeting to understand the causes of the party's debacle in the Lok Sabha polls in western UP. He said that after discussions and taking the feedback and suggestions of the candidates and the party leaders, it had emerged that hard work was required to strengthen the Congress organisational structure at the ground level. Scindia said that in the next review session expected to be held after two weeks, the candidates for the bypolls to be held in 12 Vidhan Sabha seats will be finalised in consultation with the ground-level leaders. He also ruled out any alliance with any party for the 2022 Vidhan Sabha elections in the state, saying the party would contest on its own. The process of preparing for the next Assembly polls will begin in two weeks' time. On June 12, All India Congress Committee (AICC) General Secretary in charge of eastern Uttar Pradesh, Priyanka Gandhi Vadra, had held a review meeting with the candidates and party functionaries of eastern UP in Rae Bareli. While Scindia is in charge of 39 seats of western Uttar Pradesh and Terai region, Priyanka holds charge of the 41 districts in eastern UP.
{ "pile_set_name": "OpenWebText2" }
Ochłoda Ochłoda () is a settlement in the administrative district of Gmina Jutrosin, within Rawicz County, Greater Poland Voivodeship, in west-central Poland. References Category:Villages in Rawicz County
{ "pile_set_name": "Wikipedia (en)" }
Assessment of the Safety and Efficacy of Prolapsed Orbital Fat Resection During Involutional Entropion Repair. To assess the recurrence rate of involutional entropion in patients treated with a combined approach including a modified Bick procedure, excision of preseptal orbicularis muscle, and conservative resection of prolapsed orbital fat. A retrospective chart review of patients undergoing repair of involutional entropion with the combined procedure including orbital fat resection and a second group with standard entropion repair without orbital fat resection was performed. Only patients with follow-up greater than 6 months were included in the study. Seventy eyelids of 54 patients met all inclusion criteria for the combined procedure group over a 9-year period from 2008 to 2016. Average follow-up was 46.9 months. There was a documented recurrence of entropion in 1 eyelid during the follow-up period (1.4%). The remaining 69 cases had successful subjective and objective results without need for any additional procedures. In the group undergoing entropion repair without fat resection, 22 eyelids of 19 patients had the required follow-up period with a recurrence rate of 4.5% (p > 0.05). The authors demonstrate good surgical success with a combined approach of a modified Bick procedure, preseptal orbicularis excision, and conservative orbital fat resection. Conservative fat resection during entropion repair was found to be safe, and the combined procedure was found to be effective with a rate of recurrent entropion of 1.4% on extended follow-up.
{ "pile_set_name": "PubMed Abstracts" }
/* * Copyright (c) 2018 THL A29 Limited, a Tencent company. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ namespace TencentCloud.Monitor.V20180724.Models { using Newtonsoft.Json; using System.Collections.Generic; using TencentCloud.Common; public class ModifyPolicyGroupEventCondition : AbstractModel { /// <summary> /// 事件id /// </summary> [JsonProperty("EventId")] public long? EventId{ get; set; } /// <summary> /// 告警发送收敛类型。0连续告警,1指数告警 /// </summary> [JsonProperty("AlarmNotifyType")] public long? AlarmNotifyType{ get; set; } /// <summary> /// 告警发送周期单位秒。<0 不触发, 0 只触发一次, >0 每隔triggerTime秒触发一次 /// </summary> [JsonProperty("AlarmNotifyPeriod")] public long? AlarmNotifyPeriod{ get; set; } /// <summary> /// 规则id,不填表示新增,填写了ruleId表示在已存在的规则基础上进行修改 /// </summary> [JsonProperty("RuleId")] public long? RuleId{ get; set; } /// <summary> /// For internal usage only. DO NOT USE IT. /// </summary> internal override void ToMap(Dictionary<string, string> map, string prefix) { this.SetParamSimple(map, prefix + "EventId", this.EventId); this.SetParamSimple(map, prefix + "AlarmNotifyType", this.AlarmNotifyType); this.SetParamSimple(map, prefix + "AlarmNotifyPeriod", this.AlarmNotifyPeriod); this.SetParamSimple(map, prefix + "RuleId", this.RuleId); } } }
{ "pile_set_name": "Github" }
Are donor lymphocytes a barrier to transplantation tolerance? Following solid organ transplantation (SOT), populations of donor lymphocytes are frequently found in the recipient circulation. Their impact on host alloimmunity has long been debated but remains unclear, and it has been suggested that transferred donor lymphocytes may either promote tolerance to the graft or hasten its rejection. We discuss possible mechanisms by which the interaction of donor passenger lymphocytes with recipient immune cells may either augment the host alloimmune response or inhibit it. Recent work has highlighted that donor T lymphocytes are the most numerous of the donor leukocyte populations within a SOT and that these may be transferred to the recipient after transplantation. Surprisingly, graft-versus-host recognition of major histocompatibility complex class II on host B cells by transferred donor CD4 T cells can result in marked augmentation of host humoral alloimmunity and lead to early graft failure. Killing of donor CD4 T cells by host natural killer cells is critical in preventing this augmentation. The ability of passenger donor CD4 T cells to effect long-term augmentation of the host humoral alloimmune response raises the possibility that ex-vivo treatment or modification of the donor organ prior to implantation may improve long-term transplant outcomes.
{ "pile_set_name": "PubMed Abstracts" }
Q: Given a simple pandas Series, what's a simple way to create a histogram (bar plot) of it? I have a supersimple Series like this: hour 0 438 1 444 2 351 3 402 4 473 5 498 6 440 7 431 8 259 9 11 11 52 12 62 13 77 14 55 22 40 23 162 Name: value, dtype: int64 It's just a count of the number of observations of something in a given hour. How could this be plotted quickly and easily as a histogram in a Jupyter notebook? The first bin would be from 0 to 1 hours (00:00 to 01:00), the second bin would be from 1 to 2 hours (01:00 to 02:00) and so on. A: if you need a standard bar plot: In [8]: import matplotlib ...: matplotlib.style.use('ggplot') ...: In [9]: s.plot.bar(rot=0, grid=True, width=1, alpha=0.7) Out[9]: <matplotlib.axes._subplots.AxesSubplot at 0xaaab7f0>
{ "pile_set_name": "StackExchange" }
The fact that Hideo Kojima and Guillermo del Toro’s take on Silent Hill will never happen will always make me sad. Its ‘interactive teaser,’ PT , is still one of the best horror games of all time in its own right and that was just a demo. What fourth wall breaking Kojima and horror master del Toro could have done with it remains one gaming’s greatest unanswered questions. Post that messy Kojima/Konami divorce , Hideo Kojima is making Death Stranding and we’ll never know what Silent Hills could have been. Or will we? Poke around enough Reddit threads and YouTube videos and there are numerous connections and coincidences that seem to join the two. Not to mention strange tweets and discrepancies in the timeline of both games’ development that are… well, odd. There are two extremes to all this: fasten the tin foil hat on tight enough and people make concerted attempts to argue that Death Stranding is Silent Hills and Kojima has orchestrated everything from faking his own firing onwards, to create the mother of all surprises. After the Moby Dick/Joakim Mogren shenanigans of Metal Gear Solid 5: The Phantom Pain ’s initial reveal - where Kojima created a fictitious studio and developer to pretend it wasn’t a Metal Gear game - it’s almost plausible. Kojima also memorably hid the fact that Raiden was the star of MGS2 until it was out, infuriating a fan base waiting for their hero to return. The man is capable of anything. The other, gentler, end of the ‘crazy’ spectrum is simply that Death Stranding is using some of Silent Hills’ leftover ideas and themes. While Kojima’s showmanship as a master manipulator means he’s deliberately sowing these [dum dum DUM] strands between the two to whip up gossip and attention. But the fact remains that there are a lot of weird, weird, connections. The kind that make you want to join pictures on a notice board together with pins and bits of string. Some are obvious, some are suspicious and some are just far too ‘wait, what?’ Hands on There are some big easy things right up front - for example, both games feature prominent babies. But perhaps one of the biggest match-ups can be found in the concept trailer for Silent Hills that Kojima revealed at the Tokyo Game Show in 2014: It features a monster, later actually datamined as a finished model from the PT demo , that’s basically a giant hand chasing the player. Monstrous pursuing hands, in a far more cryptic form, are a central entity in Death Stranding: It’s hard to miss that: two monsters basically made of hands. Whether a re-used physical design or concept, the fact that both games have a central character living in fear of giant fingers is kind of a ‘huh’ moment. That ramps up when you take a look at some of the pictures you can find on the wall in PT. This post compares some of the paintings from the Silent Hills teaser with location from various Death Stranding trailer shots, PT pictures on the left, Death Stranding images on the right: The similarities are strong. For good measure there’s also this shot of another painting that looks exactly like the black Icelandic beach Norman Reedus wakes up on in the original Death Stranding reveal: Where things start to blur is that Kojima was location scouting in Iceland way back in 2014 - while he was still at Konami and working on Silent Hills. Death Stranding wouldn’t be announced for another couple of years: I see the town.pic.twitter.com/S6JTHB3p2h”August 15, 2014 That’s Gardur in Iceland, with the “I see the town” a mis-phrased Silent Hill 2 quote. Kojima also tweeted a picture of moss back in 2014 that, at the time, didn’t really mean that much: Only moss lives here???pic.twitter.com/Ml3jGpTcZ5”August 17, 2014 However, four years later, a lot of people remembered that 2014 tweet when he retweeted this as a teaser for an upcoming E3 2018 Death Stranding reveal: +1 on the T-minus 3 weeks. #E32018Decima×Kojima=DS. pic.twitter.com/OxNrqaxQJSMay 22, 2018 Building bridges Whatever Kojima was doing in Iceland in 2014 - one year before he left Konami, and nearly two years before he started work on Death Stranding - something carried over. However, the really fun picture in PT is this one: Flip it over and reverse the colour and you get this: It’s a dead ringer for the Old Chusetsu Tunnel in Fukuoka, one of Japan’s most famous haunted locations. That’s not an unusual reference to include in a horror game, although it’s odd that it’s the only painting to get that negative upside down treatment. Does that make the bridge significant in some way? Like the fact that there’s a hard to miss bridge in Death Stranding: In of itself that not really that big a deal, a bridge is a bridge, until you catch the ‘J’ in the centre brick as the camera pans up: If you’re wondering why that’s a thing it’s because there’s a ‘J’ in PT: in one of the fake game crash screens you can see as you play, with a message signed off with that initial: There’s also another message in PT that says “I walked till I stood one pace before Jack” and, in the actual programming code of PT itself, there’s a callout to loading something called a “JackStage” that additionally then refers to something else called a “hijackerStage”. So J, Jack, and hijack is a thing in PT, which obviously means something to the developers, but not to anyone else (I've checked: the code is loading a specific part of the game but the relevance of the naming is unknown). Although given how much Kojima loves to play with people it’s not that hard to believe that he’s seeding old PT things into Death Stranding after the fact just to drive conspiracy fans nuts. Are you sure the only you is you? But there are weird thematic links too. PT has a talking blood stained paper bag that, depending on who you ask, contains either a head, or the removed genitals of the demo’s main character (long story). At one point it states, “I walked. I could do nothing but walk. And then, I saw me walking in front of myself. But it wasn't really me. Watch out. The gap in the door... it's a separate reality. The only me is me. Are you sure the only you is you?” It’s an interesting quote given that the entire purpose of Norman Reedus’ Death Stranding character, Sam, is to walk; he makes deliveries. Plus there’s all that talk of “separate realities”, ‘seeing yourself’ and being unsure if ‘you are the only you’. A Death Stranding fundamental seems to revolve around at least three other realities - the main world, another plane full of shadowy monsters trying to push through, and some sort of watery purgatory you visit when you die. Except you don’t actually die, you come back because video games. Are you still the only you if you return from the dead? There are even some odd links between PT and Death Stranding when you look at Low Roar, the band who performs the ‘I’ll Keep Coming’ song from the Death Stranding trailers. Kojima allegedly discovered them in 2014, again while he was meant to be working on Silent Hills. So it’s probably just coincidence that the album they released that year fits the Death Stranding artwork years later (or, again, Kojima just playing with people). It’s a connection the band itself has even tweeted out, while denying there’s any intention. I’ve been a big @HIDEO_KOJIMA_EN fan for awhile but this is just too weird how it lines up with the strands. @LOWROAR @wwwbigbaldhead @DeathStranding_ pic.twitter.com/1GfM8tgW4kJune 9, 2018 Something that’s less hard to brush away is the 2017 music video for Low Roar’s Give me an Answer where director Dylan Marko Bell specifically says “it would be fun to create a world where certain fans could physically enter the game Death Stranding before it came out”. That’s all well and good, until the entire video’s concept hinges on people apparently ‘entering Death Stranding’ via a PT-esque bathroom - where a machine trailing numerous strand-like wires makes them disappear, apparently in rapture as an oily substance coats their bodies. The one person left behind after drawing a short straw seems upset not to get their chance until they open the bathroom door, sees… something... and then everything glitches as they run away. What’s interesting is that, when talking about ‘creating a world where fans could enter Death Stranding’ Bell states that "this opened the door to questions that created the narrative you see in the video. What's the car that takes you there? Who's driving? What's the hotel room number? What does the machine and the transfer process look like? What happens if a contestant doesn't wait their turn?" I’ve added the emphasis there, the implication being that Bell seems to know how people ‘enter’ Death Stranding’s world, and using a bathroom seems almost too much of a coincidence if he does. To see a world in a grain of sand There’s no denying that there are a lot of weird similarities between Death Stranding and PT/Silent Hills. But what’s meaningful, what’s coincidence and what’s Kojima deliberately messing with people’s head is almost impossible to pick apart. Pareidolia is the name for the phenomena where the human brain sees recognisable patterns in otherwise random things (Jesus’ face in a slice of toast being the usual example) and the connections between the two games is littered with it. For instance, is it coincidence that there’s one solitary, incongruous spider web in PT that looks similar to the Bridges logo in Death Stranding? (While PT Easter eggs in MGS 5 also included the spider.) Does the fact that William Blake, who Kojima quotes at the start of a Death Stranding trailer, has a painting called ‘Good and Evil Angels Struggling for the Possession of a Child’ mean anything? Are they fighting over a baby on a black beach or do I just want to see that? We may never actually know if any of these connections and strands between the two games actually mean anything. And we certainly won’t know a thing until Death Stranding arrives, reportedly next year. Whatever happens - whether it’s real, imagined or manipulated - it’s still a fun game to play ahead of the main event, and makes the world a far more interesting place for it. And let’s remember the most important message from PT:
{ "pile_set_name": "OpenWebText2" }
Kilo Kish Lakisha Kimberly Robinson (born May 10, 1990), known professionally as Kilo Kish, is an American rapper, singer-songwriter and visual artist. Early life Kilo Kish was born in Orlando, Florida, on May 10, 1990. She went to Glen Rock Elementary School in Bergen County, New Jersey, but later returned to Orlando, where she graduated from Winter Park High School. Aged 18, she received a scholarship to attend the private art college Pratt Institute in New York. While taking a year off, she worked multiple jobs, before enrolling in the Fashion Institute of Technology for textile design, from which she graduated in 2012. Kish began to pursue a musical career when her roommates Smash Simmons and Mell McCloud decided to set up an in-home studio, starting a project called the "Kool Kats Klub". It was through this exposure that she initially met Matt Martians of the neo-soul band The Internet and collaborated on The Jet Age of Tomorrow's "Want you Still" and The Internet's "Ode to a Dream". Career 2012: Homeschool Kilo Kish's debut EP Homeschool was released on April 2, 2012, and was well received by critics, being labeled as one of the best albums of 2012 by Complex. The EP was produced by past collaborators such as The Internet and Pyramid Vritra of The Jet Age of Tomorrow, who also provided guest vocals on the project. On July 31, Kish released "Watergun", a song produced by The Internet, via SoundCloud. On September 17, "Navy" was released on The Blue Rider record label. The release also contained a remix of "Navy" by MeLo-X. A video for "Navy", as directed by Kish with Ben Rayner, premiered on The Fader the same day. 2013: K+ Kish released the mixtape K+ for free download on February 7, 2013. It featured artists including Childish Gambino, Star Slinger, SBTRKT, A$AP Ferg of A$AP Mob, Earl Sweatshirt, and Matt Martians of OFWGKTA. In December, Kish released K+ THE BOOK, a behind-the-scenes digital art zine, in extension to the mixtape. 2014–2015: Across Kish's second EP, Across, was released on July 8, 2014, in collaboration with the fashion and record label Maison Kitsune. The following year, Kish released Across Remixes. 2016: Reflections in Real Time Kish's first studio album, Reflections in Real Time, was released on February 29, 2016. The song "Taking Responsibility" from the album was featured in the third episode of the HBO teenage drama television show Euphoria. 2017 Kish features on "Out of Body" from the deluxe edition of the 2017 Gorillaz album Humanz as well as across the album Big Fish Theory by Vince Staples 2018: mothe Kish released the single "Elegance" on August 26, 2018. Pitchfork named the song "Best New Track", with Sheldon Pearce commenting that "it is the best Kilo Kish song by a considerable margin, and it hones in on what makes her such an enchanting and elusive artist, using her wispy raps to drift through a warped pop jam." Her EP "mothe" followed with a release on September 7, 2018. On July 29, 2019, Kish released an alternative artwork for mothe which has replaced the old artwork on all digital platforms. The cover was shot by photographer Andrew Arthur, who she has worked with prior. 2019: REDUX Kish released 3 singles over the course of 2 months beginning in October of 2019. Beginning with "BITE ME" on October 17, 2019 followed by "NICE OUT" on November 7, 2019 and concluding with "SPARK" later the same month on November 26, 2019. All 3 singles released with accompanying music videos & were all from forthcoming EP, REDUX, set for release on December 6, 2019. Kish describes this as 2018 mothe'''s "sister project," and describes it as being about perseverance. This would be Kish's fourth collaboration with recurring producer Ray Brady. Discography Studio albums Reflections in Real Time (2016) Mixtapes K+ (2013) EPs Homeschool (2012) Across (2014) Across Remixes (2015) mothe (2018) REDUX'' (2019) Music Videos Guest appearances References External links Category:1990 births Category:Living people Category:Singers from Orlando, Florida Category:American hip hop singers Category:Kitsuné artists Category:Winter Park High School alumni Category:21st-century American singers Category:21st-century American women singers Category:Musicians from Brooklyn Category:Women in hip hop music Category:American female hip hop musicians
{ "pile_set_name": "Wikipedia (en)" }
Somatosensory evoked potentials and headache: a further examination of the central theory. The central theory of headache was investigated by examining the amplitude of the somatosensory evoked potential (SSEP) in headache sufferers and headache-free controls. The P1-N1 amplitude was found to be greater, and to increase more rapidly with increasing stimulus intensity, for headache subjects than for controls. The N1-P2 amplitude was also found to be larger for headache subjects than for controls, but there was no significant difference between groups on the rate at which this component increased with stimulus intensity. When the P1-N1 and N1-P2 amplitudes were assessed in headache subjects, during and between attacks, no significant differences between conditions were observed. No significant differences between tension and migraine sufferers were observed on either component. It was concluded that the central nervous systems of headache sufferers may be more reactive to somatosensory input than those of headache-free persons and that this might be an important factor in the pathophysiology of headache.
{ "pile_set_name": "PubMed Abstracts" }
Church of the Addolorata, Acqui Terme The Church of the Addolorata (Italian:Chiesa dell’Addolorata) is a Romanesque-style, Roman Catholic basilica church located on Piazza Addolorata, in Acqui Terme, Province of Alessandria, region of Piedmont, Italy. History The church is dedicated to the Marian devotion of Our Lady of Sorrows, however, the church is also called San Pietro, since it was once attached to what was once the adjacent Benedictine monastery of San Pietro. The layout we see today was built in the 11th-century at the site of a late 6th-century paleochristian church located just outside the city walls. It had three naves with an octagonal bell-tower at the southern apse. The simple brick facade has protruding pilasters and shows a trend towards verticality. After 1720, with the closing of the monastery, part of the church was rededicated to the Addolorata. It underwent major restoration after the First World War, that stripped much of the decoration, giving the interior a white-washed simplicity. The apse and the base of the bell-tower retain medieval traces. The interior conserves a 15th-century Deposition fresco and two 16th-century canvases depicting Christ Crowned with Spines and Christ before Pilate. The wooden statue of the Madonna Addolorata dates to 1720. References Addolorata Category:11th-century Roman Catholic church buildings Category:Romanesque architecture in Piedmont Addolorata
{ "pile_set_name": "Wikipedia (en)" }
Where God Has the Answers June Foster Where God has the answers. That seemed to be the theme of my stories as I began the writing journey. But what were the questions? In January 2010, I embarked on an exciting adventure of writing fictional novels which feature characters, mostly Christian, who struggle with the common issues we all face in our daily lives. Anger, obesity, shop-a-holism, to name a few. Yes, each have questions but best of all they find answers which reflect Truth—
{ "pile_set_name": "Pile-CC" }
Craig Buckham Craig Thomas Buckham (born 9 August 1983) is an English former first-class cricketer. Buckham was born at Ashford in December 1983. While attending Anglia Ruskin University, Buckham made three appearances in first-class cricket for Cambridge UCCE from 2004–06, playing against Essex, Warwickshire and Kent. A leg break bowler, Buckham bowled a total of 30 overs without taking a wicket, while conceding 187 runs. In addition to playing first-class cricket, Buckham also played at minor counties level. He made two appearances for the Kent Cricket Board in the 2002 MCCA Knockout Trophy, later making seven appearances for Cambridgeshire in the Minor Counties Championship between 2007–2009. References External links Category:1983 births Category:Living people Category:People from Ashford, Kent Category:Alumni of Anglia Ruskin University Category:English cricketers Category:Kent Cricket Board cricketers Category:Cambridge MCCU cricketers Category:Cambridgeshire cricketers
{ "pile_set_name": "Wikipedia (en)" }
When is a pint of beer not a pint of beer? When it's served in Adelaide. Anywhere else in the country, ordering a pint will get you 570 millilitres of amber ale, but in Adelaide it results in a paltry 425ml. Such is the concern about the discrepancy, you asked Curious Adelaide to investigate this question from local Braden Earl: "Why does SA have a different measurement for beer and why is it called a pint if it isn't an actual pint?" According to Adelaide publican and craft brewer Jade Flavell, consternation about glass size is as predictable as someone yelling 'taxi' at the sound of a glass smashing. "You can pretty much put money on the fact that an interstater will say 'that's not a pint'," she said. In addition to our pint anomaly, our schooners are similarly slighter than their Sydney counterparts, and the evolution of our 'butcher' glass is a story in itself. The truth, it seems, is as elusive as a teetotaller at a craft beer convention. In my quest for an answer, I've delved into dusty tomes in library collections, conversed with brewers and expat experts, trawled internet backwaters, scoured old newspapers and, of course, enjoyed a pint or two along the way. What I've found is a cocktail of myth and legend, a hearty brew of fact and folklore. Sorry, this video has expired Ordering a beer in South Australia: a rough guide Chapter 1: Size matters There are several theories about South Australia's pint-sized pint. One involves the Temperance movement. According to this hypothesis the movement, which succeeded in bringing about early pub closing, lobbied for smaller glass sizes to reduce public drunkenness. Another theory involves the Great Depression and the idea drinkers could only afford smaller serves, thereby pushing the larger glasses out of fashion. Someone else told me it may have been for tax reasons. But these are all tantalisingly incomplete, and I could find very little corroborating evidence. I'd barely begun my mission and already felt adrift. How do you solve a mystery that has defied the most dedicated and, I imagine, well-lubricated social historians for decades? The South Australian branch of the Australian Hotels Association seemed like a good place to begin. But I quickly got the feeling I was far from the first person to ask. "We've gone through our archives many times over this issue," lamented general manager Ian Horne, with a touch of amusement but also regret. "Why South Australia for so long stopped at the 15 fluid ounce glass (425ml) as the largest glass is unknown to us." Historian Alison Painter is also intrigued and baffled by the question. Ms Painter started working for Coopers Brewery in 1960 and has written the company's official history, and a book on South Australian brewing. "Whether it happened at the time of World War One when there was talk of reducing drinking so that more money could go into War Bonds, or whether it happened in the Depression years of the 1930s because people were short of money — who knows?" The name for the butcher glass is widely believed to have originated at the Newmarket Hotel. ( ABC News: Daniel Keane ) Chapter 2: Colonial drift A recurring name in my ongoing research was Dr Brett J. Stubbs, a prolific writer and leading beer historian currently residing in France. His email address, which contains the phrase 'Tankard Books', suggests a scholar and antiquarian of some pedigree. The fact that I was unable to speak directly to Dr Stubbs only enhanced my image of him as a man of mystery and importance. During our email correspondence, he told me the variation in glasses was probably a flow-on effect of differences in the way the amber fluid was bottled. "Breweries began mucking around with the sizes," he said. "The large bottles became known officially as reputed quarts (they contained much less than an actual quart of 40 fluid ounces), and the small ones as reputed pints (less than an actual pint of 20 fl. oz.). "The glass size in South Australia that was used to contain somewhat less than a pint of beer ... became known as a 'reputed' pint glass. "The confusion has arisen because South Australians have dropped the word 'reputed' from the name and just referred to them as pint glasses, in contrast to other states where actual pint measures are used." According to Dr Stubbs, the reputed pint has itself changed over time, from 18 fluid ounces to 17 and then, in 1951, 16 — a volume fixed by the Prices Commissioner. By the early 1980s, the reputed pint had shrunk further to 15 fluid ounces, and has remained that size since then. Seeking clarification, I asked Dr Stubbs whether all of this meant it was just a quirk of history that South Australia has ended up with its idiosyncratic pint size. "[I'm] not sure I would call it an historical accident, but maybe a hangover from, or vestige of, our colonial past," Dr Stubbs replied. "There are still many ways in which the states differ, reflecting the fact that the pre-federation colonies were essentially separate countries, and still do things differently in many areas. "It is not that SA drifted away from the other states with regard to beer glasses [but] more the opposite, that the different states have not all equally drifted together." But Dr Stubbs hinted that a more complete answer might be lying in wait in an archive somewhere. The so-called 'fancy' has been pioneered by the Wheatsheaf Hotel, and is now one of its most popular glasses. ( ABC News: Daniel Keane ) Chapter 3: Imperial history According to Jade Flavell, who co-owns The Wheatsheaf Hotel at Thebarton in Adelaide's inner west, imperial pints became popular during the British and Irish pub craze of the 1990s. Prior to then, it was very difficult to get your hands on one. But they were not entirely unknown in South Australia. In fact, historic newspaper articles suggest they may have once been fairly common. A 1855 letter to the editor of an Adelaide newspaper, complaining about the high price of beer, suggests imperial half-pint glasses were commonly used at the time. In 1937, Port Pirie's Recorder reported on a so-called 'beer strike' in which locals stopped going to pubs in protest against the rising price of beer. Many were unhappy at the cost of both imperial pints and reputed pints. The strike ended when publicans agreed to increase the size of their smaller glasses. But the rapid decline of the imperial pint seems to have been caused by World War Two. In 1940, the Federal Government imposed higher taxes on beer as part of the war effort. While New South Wales, Queensland and Western Australia decided to charge more for their beers, South Australia kept prices the same, but reduced serving sizes. This was not the first time beer glasses were made smaller as a result of taxation. It also happened in 1904, and a parliamentary report from the time notes the Prime Minister "expressed sadness" at the downsizing. An article from the Barrier Miner in 1942 puts SA publicans on notice. ( Trove: Barrier Miner ) Chapter 4: Crafty marketing Skulduggery was probably also at work. Crafty publicans might have tried to convince unsuspecting, and perhaps sozzled, patrons that what they were serving were genuine pints. A report from Adelaide's Register newspaper from 1905 suggests this was indeed a very real problem. Entitled 'Reputed or Imperial Measure - Which?', the article reveals that having different pint measurements was causing just as much confusion more than a century ago as it does today. "When is a fraud not a fraud? When it becomes a custom," the article begins. The fraud in question was the sale of reputed pints, and the report details a hearing and apparent cross-examination at the Tariff Commission. "A long discussion ensued relative to reputed half-pints, pints, quarts, and gallons, as well as similar imperial measures," wrote the anonymous and presumably puzzled journalist. "Matters ultimately became so involved that the witness had to give several explanations to make his meaning clear." Adelaide's pint, fancy, schooner and butcher are among the options in SA. ( ABC News: Daniel Keane ) Chapter 5: Fancy a butcher? Another enduring oddity in South Australian hotels is the butcher glass — a 200ml (7 fl. oz.) vessel that is still on offer at some establishments. The most common theory about the evolution of its name involves the historic Newmarket Hotel on the corner of North and West terraces in Adelaide's CBD. The Newmarket was near an abattoir or cattle yard, and it's said the smaller 'butcher' glass was a favourite with the local workers. Some have speculated that the slaughterers needed a glass that would not slip through their bloodied fingers. Others say the smaller size prevented too much intoxication before they returned to work. Dr Stubbs has concluded that the butcher has a contested etymology, to say the least. "The term seems to have 'originated' in many different pubs," he wrote. The pubs in question were frequently close to butcher shops or meat works, and the term was in use at least as early as the 1880s. A letter to the editor from 1954 claimed the name came from butcher boys, who were given the glasses when delivering meat to pubs. A spanner in the works is the fact that butchers were not always small glasses, casting doubt on the belief that they were intended to be consumed in a hurry. And there is also the possibility the word is a corruption of the German word 'becher' — a generic term for a drinking vessel. A more recent addition to local glassware is the 'fancy', a 300ml container pioneered at the Wheatsheaf Hotel. It is so-called because of its delicate, tulip-like appearance. Jade Flavell remembers it took a while to catch on, but is now one of the pub's most popular glasses. The fancy proves that innovation is still very much alive where drinking is concerned. "[The shape] funnels the aroma," she said. "In a straight-edge glass, it tends to dissipate." Brewer and hotel owner Jade Flavell thinks crafty publicans might be partly responsible for the smaller glasses. ( ABC News: Daniel Keane ) Conclusion: A lingering headache Answering Braden's question has led me down several blind alleys but, like a heavy brew, also yielded glimpses of truth. For what it's worth, I agree with Dr Stubbs that our pint is different not by design, but because of our history. But Ian Horne suspects there may never be a decisive answer. "There's lots of mystery and rituals around pubs and alcohol, and history says we didn't record them particularly well, particularly on these sorts of fringe issues," he said. "Because alcohol is involved, you're not sure those that did record it did so before or after they'd enjoyed their schooner or their pint or their butcher." Jade Flavell believes the mystery is all part of the fun. "There's some sort of local pride in the fact that South Australians call their glasses something completely different to every other state," she said. Tackling the problem has been thirsty work, and at the end of it all I feel I could do with a drink. But I'm left with the question... in which glass?
{ "pile_set_name": "OpenWebText2" }
Q: Feature Selection: Correlation and Redundancy Assume having several numerical, multidimensional time-series. As preprocessing of further Analysis, I firstly check for relevance and then for redundany of all dimensions/Features. 1) Check for relevance: I will exclude all dimensions with a variance of 0 over the whole dataset - since this specific Dimension does not contain Information that helps to classify/distinguish the time-series from each other. 2) Check for redundancy: I compute the correlation of all dimensions/Features with each other and my Intuition says (here is my question) that those Features-pairs which correlate by either -1 or +1 are redundant. Whereas a high correlation such as 0,99 seems to be redundant, it is not. Only a correlation of either -1 or +1 means redundancy. Therefore i will randomly exclude on the two dimensions/Features which correlate by +1 or -1. I am yet sceptical whether or not this is a correct assumption. Are there any leads that could prove / disproves my intuiton of the Connection between redundancy and correlation? A: High absolute correlation does not imply redundancy of features in the context of classification. An example is given in the textbook Feature Extraction - Foundations and Applications by I. Guyon et al. (p.10, figure 2 (e)) I reproduced the example for visualization with matplotlib and in Python. In this example both features correlate highly, yet seperation of classes will only be achieved if both features are used. Therefore correlation does not imply redundancy.
{ "pile_set_name": "StackExchange" }
#!/bin/bash # # Copyright (C) 2014 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Use secondary switch to add secondary dex file to class path. exec ${RUN} "${@}" --secondary
{ "pile_set_name": "Github" }
The Quays Convenient Apartment Stay Please use the filters on the left hand side to find your ideal hotel. There seems to be a problem with your internet connection. Please refresh the page using the button below. Show more hotels Information Reviews Photos Description Boasting a picturesque setting in St Lucia’s Rodney Bay, Bay Gardens Beach Resort & Spa features a white sand beach and a full-equipped gym. Treasure Bay Casino and the Bay Walk Shopping Mall are both nearby. The 75 air-conditioned condos at the Bay Gardens Beach Resort & Spa boast Caribbean decor and patios or balconies. Other amenities include deep soaking tubs, cable TV and free Wi-Fi. Guests can relax in the petal-shaped lagoon pool, enjoy a pampering treatment such as a seafront massage from the therapists at La Mer Spa. A games room and playground are available for younger guests. Dining options at the resort include the Hi Tide Beachside Restaurant, the Lo Tide Deli and Trios Caribbean Fusion Restaurant. Nearby eateries include the Lime Restaurant and the Blue Olive Restaurant and Wine Bar. The resort lies less than two miles from the Bonne Terre Tropical Gardens and Rodney Bay Marina.
{ "pile_set_name": "Pile-CC" }
Baba Shamal (magazine) The Persian journal Baba Shamal was one of the most famous political satire magazines in Iran. It was published weekly between 1943 and 1945 by Reżā Ganjaʾī (1918–1995). After his return from Europe in 1947, 50 volumes more were distributed. Ganjaʾī was a cabinet member and a university professor for engineering. Before and after his publishership Ganjaʾī held some important positions in ministries and in the banking and insurance sector. He published his articles under the pseudonym “Donb-al-mohandesīn“. Many well-known Iranian satirists, poets and authors belong to his supporters and co-workers, i.a. Rahī Moʿayyerī („Zāḡča“), Eqteṣād („Shaikh Pašm-al-Dīn“), Fozūnī („Mohandes-al-Šoʿarāʾ“) and Ṣahbā („Shaikh Somā“). The journal was widespread in Iran and was characterized by its everyday language and colloquial style. The general satirical opinion of the authors found its expression in current daily politics which was supplemented by partly colored caricatures and drawings. In general the journal's position was nationalistic, independent and moderate. However, its critique led – under the pressure of censorship – to its suspension in 1947. References Further reading The Encyclopædia Iranica, http://www.iranicaonline.org/articles/baba-samal The Encyclopædia Iranica, http://www.iranicaonline.org/articles/ganjai- Lawrence Elwell-Sutton, “Iranian Press 1941-47,” Iran 6, 1968, p. 80. External links Online-Version: Bābā Šamal Further information: www.translatio.uni-bonn.de Digital editions: Arabische, persische und osmanisch-türkische Periodika Category:1943 establishments in Iran Category:1947 disestablishments in Iran Category:Defunct magazines of Iran Category:Magazines established in 1943 Category:Magazines disestablished in 1947 Category:Persian-language magazines Category:Iranian political satire Category:Iranian satire Category:Satirical magazines Category:Weekly magazines Category:Iranian magazines
{ "pile_set_name": "Wikipedia (en)" }
// Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2014-2016 The Bitcoin Core developers // Original code was distributed under the MIT software license. // Copyright (c) 2014-2019 Coin Sciences Ltd // MultiChain code distributed under the GPLv3 license, see COPYING file. #include "wallet_ismine.h" #include "keys/key.h" #include "keystore.h" #include "script/script.h" #include "script/standard.h" #include <boost/foreach.hpp> using namespace std; typedef vector<unsigned char> valtype; unsigned int HaveKeys(const vector<valtype>& pubkeys, const CKeyStore& keystore) { unsigned int nResult = 0; BOOST_FOREACH(const valtype& pubkey, pubkeys) { CKeyID keyID = CPubKey(pubkey).GetID(); if (keystore.HaveKey(keyID)) ++nResult; } return nResult; } isminetype IsMineKeyID(const CKeyStore& keystore, const CKeyID& dest) { if (keystore.HaveKey(dest)) return ISMINE_SPENDABLE; if (keystore.HaveWatchOnly(GetScriptForDestination(dest))) return ISMINE_WATCH_ONLY; return ISMINE_NO; } isminetype IsMineScriptID(const CKeyStore& keystore, const CScriptID& dest) { CScript subscript; if (keystore.GetCScript(dest, subscript)) { isminetype ret = IsMine(keystore, subscript); if (ret == ISMINE_SPENDABLE) return ret; } if (keystore.HaveWatchOnly(GetScriptForDestination(dest))) return ISMINE_WATCH_ONLY; return ISMINE_NO; } isminetype IsMine(const CKeyStore &keystore, const CTxDestination& dest) { CScript script = GetScriptForDestination(dest); return IsMine(keystore, script); } isminetype IsMine(const CKeyStore &keystore, const CScript& scriptPubKey) { vector<valtype> vSolutions; txnouttype whichType; if (!TemplateSolver(scriptPubKey, whichType, vSolutions)) { /* MCHN START */ // if (keystore.HaveWatchOnly(scriptPubKey)) if (keystore.HaveWatchOnly(scriptPubKey.RemoveOpDrops())) /* MCHN END */ return ISMINE_WATCH_ONLY; return ISMINE_NO; } CKeyID keyID; switch (whichType) { case TX_NONSTANDARD: case TX_NULL_DATA: break; case TX_PUBKEY: keyID = CPubKey(vSolutions[0]).GetID(); if (keystore.HaveKey(keyID)) return ISMINE_SPENDABLE; break; case TX_PUBKEYHASH: keyID = CKeyID(uint160(vSolutions[0])); if (keystore.HaveKey(keyID)) return ISMINE_SPENDABLE; break; case TX_SCRIPTHASH: { CScriptID scriptID = CScriptID(uint160(vSolutions[0])); CScript subscript; if (keystore.GetCScript(scriptID, subscript)) { isminetype ret = IsMine(keystore, subscript); if (ret == ISMINE_SPENDABLE) return ret; } break; } case TX_MULTISIG: { // Only consider transactions "mine" if we own ALL the // keys involved. multi-signature transactions that are // partially owned (somebody else has a key that can spend // them) enable spend-out-from-under-you attacks, especially // in shared-wallet situations. vector<valtype> keys(vSolutions.begin()+1, vSolutions.begin()+vSolutions.size()-1); if (HaveKeys(keys, keystore) == keys.size()) return ISMINE_SPENDABLE; break; } } /* MCHN START */ // if (keystore.HaveWatchOnly(scriptPubKey)) if (keystore.HaveWatchOnly(scriptPubKey.RemoveOpDrops())) /* MCHN END */ return ISMINE_WATCH_ONLY; return ISMINE_NO; }
{ "pile_set_name": "Github" }
Tatabánya Photovoltaics Plant The Tatabánya Photovoltaics Plant will be Hungary's largest photovoltaics producing plant having an annual capacity of 80 MW of solar cells. References Category:Photovoltaic power stations in Hungary
{ "pile_set_name": "Wikipedia (en)" }
Swimming at the 2013 World Aquatics Championships – Men's 4 × 100 metre medley relay The men's 4 × 100 metre medley relay event in swimming at the 2013 World Aquatics Championships took place on 4 August at the Palau Sant Jordi in Barcelona, Spain. Records Prior to this competition, the existing world and championship records were: Results Heats The heats were held at 10:51. Final The final was held at 19:31. References External links Barcelona 2013 Swimming Coverage Medley relay 4x100 metre, men's World Aquatics Championships
{ "pile_set_name": "Wikipedia (en)" }
Mild to moderate Crohn's disease: an evidence-based treatment algorithm. Crohn's disease is a chronic inflammatory condition with a relapsing-remitting disease course. Treatment often requires both induction and maintenance strategies. The management of mild to moderate Crohn's disease is challenging because the natural history of mild disease is not known and effective treatment options are limited. In this article, our objective is to provide a brief overview of the evidence supporting current therapies in the treatment of mild to moderate luminal Crohn's disease and to explore a few of the newer therapeutic options. As induction agents for mild to moderately active Crohn's disease, there is reasonable evidence to support the use of budesonide for terminal ileal and right colonic disease, and sulfasalazine for colonic disease. Although budesonide can be used in the short term (3-6 months) for maintenance of quiescent disease, there are no effective therapies for the long-term maintenance of mild to moderate Crohn's disease. Mesalazine appears to have no role in either the treatment of active or quiescent disease. Currently, there is insufficient data to draw conclusions on the potential role of antibacterials, probiotics or prebiotics.
{ "pile_set_name": "PubMed Abstracts" }
Russell Simmons: Giving Back and Building Peace Russell Simmons is a name as famous as the household brand he created nearly 40 years ago. As the co-founder of Def Jam Records, and former CEO of the iconic record company—Simmons has become the quintessential businessman spanning across cultural boundaries and sealing hip hop into a billion-dollar industry. Creating a record label with then-partner and Grammy award-winning producer Rick Rubin—the two are responsible for the music careers of LL Cool J., The Beastie Boys, Public Enemy as well building a foundation to cultivate some of today’s leading label music executives. From the creation of Rush Communications to expanding into cable content with HBO’s Def Comedy Jam and Def Poetry, and the first label executive to build a successful fashion empire with Phat Farm—Simmons has proved there is no limitations to his vision. Russell Simmons poses for a portrait on Thursday, Jan. 14, 2016 in New York. (Photo by Scott Gries/Invision/AP) In 2013, he co-founded UniRush—creating the RushCard, a prepaid debit card provider to low-income customers. Stepping away from the record label side 1999, Simmons has become actively involved over the last two decades with more philanthropic and community related projects including his partnerships with organizations such as The Peacekeepers and other nonprofits. “We’ve been involved with The Peacekeepers for years now in New York—my neighborhood that I grew up in—also other organizations that are doing this type of work. This particular program, we decided to spearhead ourselves with some of the community members. Those local organizations that are on the ground and are key and they have been overlooked and they are not part of city funding. The unions don’t want to take the money and give it to them,” he said. Over the years, Simmons has worked with the group’s founder, Dr. Dennis Muhammad, hiring them as his security detail for special events. The men believed in much of the same viewpoints in building back their communities. “I like to hire them so that other people feel safe hiring them. If you’re on a subway at 3 a.m. in the morning and someone is coming on the platform—you hope that it’s a nun or a bow tie. I like their love for the community and what they’ve done,” Simmons explains. “They have genuine concern and love for the community.” Recently, RushCard launched a national program Keep the Peace initiative, which provides comprehensive community peacekeeping and empowerment to various programs on the South Side of Chicago. The company has donated $50,000 to bring a holistic approach to reduce violence. Having grown up in Queens, NY, Simmons is no stranger to understanding the obstacles of becoming a statistic if necessary resources aren’t available. He has witnessed a similar pattern around the country in predominately Black communities. He believes the problem is clear and concise. “The lack of jobs, opportunity and good education. A lot has been done to the Black community over the decades and over the centuries, which has not been healed. The relationship between the infrastructure of this country and communities of color is not helping especially African-Americans–where it hurts so much. It’s not helping to change the quality of life,” he says. What has been done to our psyche has not been fixed. There’s a lot of work that has to be done and community work is the best way.” As a purveyor of First Amendment rights and the creative expression of Urban culture, Simmons takes into account the influence of music, how it influences reaction both good and bad. He digresses a little. “Listen to the rap music—it’s reflection of some of the ignorance and some of the beauty in other cases. Nothing has really advanced in the Black communities–these drug laws for instance that have helped to destroy the fabric of the Black community,” says Simmons. “I’m just trying do what I can to try to add something positive back into the struggle. People were arrested for being diseased, educated in criminal behavior and dumped back into the hood. It happens in such a dramatic way around this country. Actually, Jeff Sessions has just rolled back the work it has taken me 20 years to do on the drug laws.” Through RushCard’s national campaign, the key for Simmons is to incorporate a combination of resources from meditation to introducing Chicago area schools to financial literacy and more importantly, bringing a holistic teaching of yoga to young people early in their development. “Yes, kids need to know they have every opportunity to grow. They’re not being told that properly. Through challenge and through stillness, people can see, innovate and live a better life. The creation of a better life is from the imagination.” Simmons says, “imagination is God itself” and shares sometimes society can also cultivate discouragement. He adds, “You’re sitting in a prime position, although they tell you it’s terrible. You have an opportunity to grow—to become a great leader and there’s so many examples of what you can become but our education system and the poisons they’ve put in our communities have stopped us from seeing our truth.” Courtesy of Rolling Out. Having worked with local leaders such as Father Michael Pfleger and the city of Chicago’s first lady, Amy Rule—he has convinced Mayor Rahm Emanuel why meditation is needed in the schools. “At first, Rahm Emanuel wasn’t excited but we got his wife involved and she got him involved. Now we have meditation in the schools where the most violence has occurred and I know it will be very successful as it has been around the country and around the world.” As one of the first people to utilize the strength of hip hop culture and influence into politics, Simmons has become an admirer of Chicago artist Chance the Rapper and recently had a meeting with his younger brother and recording artist Taylor Bennett. “I think we’re in a space where a lot of artists will contribute going forward in a political dialogue. That platform should be a voice for resistance and it’s a voice for hip hop.”
{ "pile_set_name": "Pile-CC" }
Languages: Script: You are here The Herald of the National Museum of Bosnia and Herzegovina The Herald of the National Museum of Bosnia and Herzegovina is the oldest scientific journal in the country. The first issue was published on 1 January 1889, less than a year after the founding of the museum. In the beginning, the Herald was published quarterly, and its first editor was Kosta Hörmann, who served from 1889 to 1906. As part of the so-called old series of the Herald (1889–1943), 55 annual issues comprising 134 volumes were published. The big names of Bosnia and Herzegovina science and museology of the day took turns at the helm of the editorial board: Kosta Hörmann, Ćiro Truhelka (1906–1921), Vladislav Skarić (1921/22–1936), Mihovil Mandić (1936–1941), Jozo Petrović (1941–1942), and Vejsil Ćurčić (1943). The second chapter in the history of the Herald started after World War II, with the start of the new series in 1946. The volumes were divided according to the field the articles covered: Social Science (1946–1953), History and Ethnography (1954–1957), Archaeology (1954–), Ethnology (1958–) and Natural Science (1945–). The concept of the journal, set forth in a programmatic article in the first issue, has remained the same. The mission of the Herald is to publish scientific and professional articles from all the fields studied at the Museum, and to provide a platform for Museum staff and external experts alike. As of 2010, the Herald has a new look: the format is larger, and the articles in the volumes Archaeology and Ethnology are published bilingually, in Bosnian/Croatian/Serbian and English, while the Natural Sciences volume is published in English only. In addition to its scientific and educational significance, the Herald has another important role. Starting with the first issue, it engaged in publication exchanges with other museums and similar institutions worldwide. Today, publications are exchanged with 400 institutions internationally and with 45 institutions in Bosnia and Herzegovina. The Herald can be viewed and searched via the digital archive INFOBIRO which is digitising this publication.
{ "pile_set_name": "Pile-CC" }
50 SHARES Share Tweet Android 6.0 Marshmallow was released in August and number of smartphone companies announced their Android marshmallow update schedule for selected devices. This includes Motorola, Sony Xperia devices, Huawei, HTC and others except Samsung. Samsung hasn’t yet officially announced its Android 6.0 Marshmallow update schedule, but we have some exclusive information about the Samsung Galaxy Android Marshmallow update schedule. So, read on to check if your Samsung Galaxy device is eligible for Android Marshmallow and when. Specspricenigeria has information on Android Marshmallow. The Samsung Galaxy devices that will receive the Android Marshmallow update includes a number of Samsung devices and the time frame of the update. The Android Marshmallow update for Samsung Galaxy devices will be rolled out in two phases. In the first phase of update following devices will receive the update: Galaxy Note 5 (SM-N920G) and Galaxy S6 Edge+ (SM-928G) are approved to receive Android Marshmallow in December 2015 Galaxy S6 (SM-G920I) and Galaxy S6 Edge (SM-G925I) are approved to receive Android Marshmallow in January 2016 Galaxy Note 4 (SM-910G) and Galaxy Note Edge (SM-915G) are approved to receive Android Marshmallow in February 2016 First phase also includes devices like Galaxy S5 (SM-G900G) and Galaxy Alpha (SM-G850Y) that are waiting approval for the Android marshmallow update and their is no estimated time mentioned in the leak for these two devices in first phase. In the 2nd phase of Android Marshmallow update, following device will receive the update: Galaxy A8 (SM-A8000) Galaxy A7 (SM-A700F) Galaxy A5 (SM-A500F) Galaxy A3 (SM-A300F) Galaxy E7 (SM-E700H) Galaxy E5 (SM-E500H) The leaked roadmap image also confirms that the left Samsung devices, not mentioned in the update roadmap list, will receive the Marshmallow update in the 3rd phase that will start after the initial testing. As we already know that Android marshmallow comes with new exciting features that includes Doze, Google On Tap, advanced permissions, new Chrome feature, handle Web links in new ways, Android Pay and many more other features. The new Android 6.0 Marshmallow comes with a feature called the new RAM manager and it also comes with support for USB Type-C ports and the new Do Not Disturb mode. So, is your Samsung Galaxy device eligible for the update? Do share with us via comments below.
{ "pile_set_name": "OpenWebText2" }
The Queen of Zamba The Queen of Zamba is a science fiction novel by American writer L. Sprague de Camp, the first book of his Viagens Interplanetarias series and its subseries of stories set on the fictional planet Krishna. It was written between November 1948 and January 1949 and first published in the magazine Astounding Science Fiction as a two-part serial in the issues for August and September 1949. It was first published in book form as a paperback by Ace Books in 1954 as an "Ace Double" issued back-to-back with Clifford D. Simak's novel Ring Around the Sun. This version was editorially retitled Cosmic Manhunt and introduced a number of textual changes disapproved by the author. The novel was first issued by itself in another paperback edition under the title A Planet Called Krishna, published in England by Compact Books in 1966. A new paperback edition restoring the author's preferred title and text and including the Krishna short story "Perpetual Motion" was published by Dale Books in 1977. This edition was reprinted by Ace Books in 1982 as part of the standard edition of the Krishna novels. The novel has been translated into German, French, Italian, Czech, and Polish. An E-book edition was published by Gollancz's SF Gateway imprint on September 29, 2011 as part of a general release of de Camp's works in electronic form. As with all of the "Krishna" novels, the title of The Queen of Zamba has a "Z" in it, a practice de Camp claimed to have devised to keep track of them. Short stories in the series do not follow the practice, nor do Viagens Interplanetarias works not set on Krishna. Plot summary Victor Hasselborg, a 22nd-century private eye, is hired by a Syrian businessman to track down his missing daughter Julnar Batruni, who it turns out has run off with adventurer Anthony Fallon. Immediate complications ensue when Hasselborg finds himself falling for Alexandra, Fallon's abandoned wife. Discovering that the fugitives have gone off-planet, he tracks them to the planet Krishna, an Earth-like world of the star Tau Ceti with humanoid inhabitants but a medieval culture. Disguising himself as a native Krishnan, Hasselborg goes after them, little-knowing he has entered a web of interplanetary intrigue, spying, and gun-running... Anthony Fallon, the antagonist in The Queen of Zamba, would reappear in two later Krishna novels; as the protagonist of The Tower of Zanid and as a minor character in The Swords of Zinjaban. Setting The planet Krishna is de Camp's premier creation in the Sword and Planet genre, representing both a tribute to the Barsoom novels of Edgar Rice Burroughs and an attempt to "get it right", reconstructing the concept logically, without what he regarded as Burroughs' biological and technological absurdities. De Camp intended the stories as "pure entertainment in the form of light, humorous, swashbuckling, interplanetary adventure-romances - a sort of sophisticated Burroughs-type story, more carefully thought out than their prototypes." As dated in the 1959 version of de Camp's essay "The Krishna Stories" and James Cambias's GURPS Planet Krishna (a 1997 gaming guide to the Viagens series authorized by de Camp), the Krishnan events of "The Queen of Zamba" take place in the year 2138, falling between "Perpetual Motion" and "Calories", and making it the third story set on Krishna in terms of chronology. The primary portion of the story is preceded and followed by scenes on Earth, each of which is over a decade removed from the main action. Reception Early reviewers of the Ace edition were not terribly impressed by the book. J. Francis McComas called it "a tedious account of a private eye's quest through space for a runaway heiress," with "[t]he chase ... a pretty drab affair, without the wit and charm usually found in this author's work." Groff Conklin characterized it as "a cops-and-robbers adventure," rating it "fast-moving and moderately sophisticated entertainment, bubble-light though not bubble-headed, and considerably below the author's best." Anthony Boucher described the novel as "a fairly primitive and predictable adventure story which is 'science fiction' because it is said to happen on the remote planet Krishna." More recent critics have struck much the same note. William Mattathias Robins called it "a simple detective adventure in an exotic setting." Colleen Power wrote more charitably that "[w]hile the novel seems dated, with its tough-talking detective slang and philosophy, [its] satire combines nicely with comic swordplay to present the reader with a short, light science fiction detective novel." She also pointed out that "the overwhelming concern ... to prevent modern technological humans from influencing or interfering with the normal development of native cultures" in it and the other Viagens novels "predat[es] 'Star Trek's' 'prime directive' by nearly twenty years." David Pringle characterized it as "[l]ight-hearted planetary romance -- or fantasy in an ostensibly science fictional setting." Both Boucher and Robins note the novel's primacy in the Viagens series, suggesting they see its primary significance in the establishment of the setting. Notes External links "A Planet Called Krishna" - a book review by Simon McLeish Category:1949 American novels Category:1949 science fiction novels Category:American science fiction novels Category:Novels by L. Sprague de Camp Category:Works originally published in Analog Science Fiction and Fact Category:Novels first published in serial form Category:Planetary romances Category:Tau Ceti in fiction
{ "pile_set_name": "Wikipedia (en)" }
Q: Can not complete GET request to my localhost or to my external API (express app) I am building a React app to consume an API that I built using express.js. My React app successfully gets data using fetch or axios to query data using an official API ("https://jsonplaceholder.typicode.com/users"), but I keep getting an error when I try to get from my own API ("XXX.us-east-1.elasticbeanstalk.com/api/v1/users") or from my localhost ("http://localhost:81/api/v1/users"). Questions Is my GET request wrong? Is there a problem with the return from my API? Would you recommend any resources to learn to create a RESTful API with Express.js, set it up in AWS and consume this API with a React app? Thank you! The error is: Access to fetch at 'XXX.us-east-1.elasticbeanstalk.com/api/v1/users' from origin 'http://localhost:3000' has been blocked by CORS policy: No 'Access-Control-Allow-Origin' header is present on the requested resource. If an opaque response serves your needs, set the request's mode to 'no-cors' to fetch the resource with CORS disabled Using fetch: componentDidMount() { fetch ('https://jsonplaceholder.typicode.com/posts/1') // ('XXX.us-east-1.elasticbeanstalk.com/api/v1/users') // ('http://localhost:81/api/v1/users') .then(response => response.json()) .then(data => this.setState({ title: data.title, body: data.body }) ); } Using axios: componentDidMount() { axios .get('https://jsonplaceholder.typicode.com/users').then(res => this.setState({ users: res.data })) // .get('http://localhost:81/api/v1/users', { crossDomain: true }).then(res => this.setState({ users: res.data })) .catch(function (error) { console.log("Error"); }); } A: Its actually a cors issue in express, so to solve it add the following in the start file var express = require('express'); var app = express(); app.use(cors());
{ "pile_set_name": "StackExchange" }
[Voiding disorders in childhood: from diagnosis to treatment]. In voiding disorders in childhood, after a precise diagnosis, treatment can be provided. Aspecific hygienic and dietetic measures are the basis of care in all micturating disorders and frequently must be established to allow a precise diagnosis. In case of enuresis, restriction of beverage and diuretic foods is recommended in the evening. Other treatments for enuresis should be proposed to motivated children. In the polyuric form of enuresis, the treatment is desmopressin (DDAVP) and in the form with low bladder capacity, alarms or a combination of these 2 treatments. In dysfunctional voiding, after caring for the secondary causes, and depending on the characteristics of the disorder, the first-step treatment is pelvic floor rehabilitation with or without anticholinergic therapy. Other medical treatments are used in a second step. Isolated urethral instability remains controversial.
{ "pile_set_name": "PubMed Abstracts" }
Binocular enhancement of color discrimination in a deutan. A 31-year-old white male deutan produced reliably different profiles when examined binocularly and monocularly with a Farnsworth-Munsell 100-hue test. Discrimination in the long wavelengths improved under the binocular conditions. Intensive testing yielded no information to account for the phenomenon.
{ "pile_set_name": "PubMed Abstracts" }
Friday, November 20, 2015 Hans-Adam and Alois Sign Initiatives to Help Tunisia, Mauritania and South Sudan Photo: LFSG During a small ceremony at Schloss Vaduz last week, the Liechtenstein Foundation for State Governance (LFSG) and the Global Sphere Holding signed three memoranda of understanding for a strategic alliance with regard to financial literacy, capacity building, public education and political/financial inclusion initiatives of LFSG in Tunisia, Mauritania and South Sudan. The memoranda were signed by the Prince Hans-Adam and Hereditary Prince Alois on the Principality's behalf. Additional initiatives, including in Bahrain and Senegal, are under discussion within the framework of this strategic alliance.
{ "pile_set_name": "Pile-CC" }
of 624 and 22191. 39 What is the greatest common factor of 378 and 13392? 54 Calculate the greatest common divisor of 7992 and 6021. 27 Calculate the highest common factor of 8786 and 437. 23 Calculate the highest common divisor of 2914 and 4136. 94 Calculate the greatest common divisor of 344 and 8815. 43 Calculate the greatest common divisor of 32665 and 2363. 139 Calculate the highest common divisor of 3199 and 497. 7 Calculate the highest common divisor of 38930 and 34. 34 Calculate the greatest common factor of 62826 and 1036. 74 Calculate the greatest common factor of 4160 and 69680. 1040 Calculate the highest common factor of 48 and 134824. 8 Calculate the highest common divisor of 15948 and 36. 36 What is the greatest common factor of 259243 and 205? 41 What is the highest common factor of 96 and 881520? 48 Calculate the greatest common divisor of 1898 and 1950. 26 Calculate the greatest common divisor of 1126466 and 22. 22 Calculate the highest common factor of 5340 and 820. 20 What is the highest common divisor of 16176 and 1397202? 2022 Calculate the highest common divisor of 639 and 90. 9 What is the greatest common divisor of 10 and 155282? 2 What is the highest common factor of 5593 and 21385? 329 Calculate the greatest common divisor of 548403 and 3. 3 Calculate the greatest common divisor of 88 and 65692. 44 What is the highest common factor of 291907 and 476? 119 What is the greatest common divisor of 301852 and 493? 17 What is the greatest common factor of 472 and 2780552? 472 What is the highest common factor of 434 and 19754812? 434 What is the highest common factor of 268 and 110818? 134 What is the highest common factor of 1707095 and 195? 65 Calculate the greatest common divisor of 1 and 124213. 1 Calculate the greatest common factor of 35 and 708232. 7 What is the highest common factor of 1164 and 1212? 12 What is the greatest common factor of 370285 and 206? 103 Calculate the highest common divisor of 40788 and 396. 396 What is the highest common factor of 3580 and 2260? 20 Calculate the greatest common divisor of 25 and 22285. 5 Calculate the highest common factor of 497 and 16117. 71 What is the highest common divisor of 900 and 17676? 36 What is the highest common divisor of 32594 and 4128? 86 Calculate the highest common factor of 103 and 139. 1 What is the greatest common divisor of 1095 and 3435? 15 Calculate the greatest common factor of 556 and 93686. 278 What is the highest common divisor of 5 and 5? 5 What is the highest common factor of 15 and 6465? 15 What is the highest common divisor of 8664 and 42? 6 What is the greatest common factor of 372 and 27094? 62 What is the greatest common factor of 534 and 1598796? 534 Calculate the highest common divisor of 1008 and 4375. 7 Calculate the highest common factor of 834 and 797304. 834 Calculate the greatest common factor of 1342 and 1220. 122 Calculate the greatest common divisor of 228 and 8531. 19 Calculate the greatest common factor of 20 and 634540. 20 What is the highest common factor of 231853 and 515? 103 Calculate the highest common factor of 2870 and 290. 10 What is the greatest common factor of 209 and 127319? 19 What is the highest common factor of 9 and 3645? 9 Calculate the highest common factor of 1632 and 154938. 102 What is the greatest common divisor of 138 and 761737? 23 What is the highest common divisor of 173 and 753761? 173 Calculate the greatest common divisor of 75447 and 505. 101 What is the highest common divisor of 386 and 29722? 386 Calculate the greatest common divisor of 106 and 2026. 2 Calculate the greatest common divisor of 3510275 and 50. 25 Calculate the highest common factor of 104284 and 4030. 62 Calculate the highest common factor of 14196 and 1027. 13 Calculate the highest common factor of 150 and 90650. 50 What is the greatest common divisor of 955422 and 4329? 117 Calculate the highest common factor of 193615 and 5. 5 What is the greatest common divisor of 17808 and 44202? 318 Calculate the greatest common factor of 10212 and 777. 111 What is the highest common factor of 60 and 28013220? 60 What is the highest common factor of 9459 and 24173? 1051 What is the highest common divisor of 50 and 6092650? 50 Calculate the highest common divisor of 33 and 579777. 33 What is the highest common factor of 8013 and 12? 3 What is the greatest common factor of 1364 and 195393? 341 What is the greatest common factor of 285 and 28515? 15 What is the greatest common divisor of 2834 and 598? 26 Calculate the greatest common factor of 923532 and 24. 12 Calculate the greatest common factor of 532 and 185402. 266 Calculate the highest common factor of 5166 and 36654. 246 Calculate the highest common divisor of 58859 and 142. 71 Calculate the highest common factor of 54720 and 128. 64 What is the highest common factor of 316 and 92035? 79 What is the greatest common divisor of 81 and 13737? 3 What is the greatest common divisor of 8 and 263572? 4 Calculate the highest common divisor of 28015 and 715. 65 Calculate the highest common divisor of 3266030 and 940. 470 Calculate the highest common divisor of 13692 and 1392. 12 Calculate the highest common factor of 1006 and 24. 2 Calculate the greatest common factor of 1135219 and 639. 71 Calculate the highest common factor of 8983 and 1625. 13 Calculate the greatest common factor of 485866 and 1392. 58 Calculate the highest common divisor of 62543 and 663. 221 What is the greatest common divisor of 3296 and 1120? 32 Calculate the highest common factor of 1709459 and 373. 373 Calculate the greatest common factor of 189911 and 359. 359 What is the highest common factor of 1903990 and 5? 5 Calculate the highest common factor of 3895 and 43870. 205 What is the greatest common divisor of 93294 and 40896? 1278 Calculate the highest common factor of 63 and 27867. 21 Calculate the highest common divisor of 1162960 and 30. 10 What is the greatest common factor of 5500 and 580? 20 Calculate the greatest common divisor of 2576 and 1242. 46 What is the greatest common factor of 199 and 12139? 199 Calculate the highest common divisor of 4014 and 276074. 446 What is the highest common factor of 12416 and 13760? 64 What is the greatest common divisor of 160 and 52560? 80 What is the greatest common factor of 285449 and 131? 131 Calculate the greatest common factor of 52 and 123253. 13 Calculate the highest common factor of 378070 and 154. 154 What is the greatest common factor of 3198 and 465186? 246 What is the highest common divisor of 109114 and 1602? 178 Calculate the highest common divisor of 19 and 902500. 19 What is the greatest common factor of 56628 and 6336? 396 Calculate the highest common factor of 13790 and 2167. 197 What is the greatest common divisor of 25 and 202420? 5 What is the highest common divisor of 2206 and 8? 2 Calculate the highest common factor of 16 and 636184. 8 What is the highest common divisor of 270 and 2862? 54 What is the greatest common divisor of 31833 and 27510? 393 Calculate the greatest common factor of 229541 and 793. 13 Calculate the highest common factor of 4 and 38198. 2 Calculate the greatest common factor of 3523 and 29159. 13 What is the greatest common divisor of 6561 and 45? 9 What is the greatest common divisor of 636174 and 126? 126 What is the greatest common factor of 37611 and 900873? 1791 What is the highest common divisor of 1664 and 2121856? 128 What is the greatest common factor of 122 and 431? 1 Calculate the greatest common divisor of 197 and 235809. 197 What is the highest common divisor of 1289032 and 708? 236 Calculate the greatest common divisor of 15230 and 40. 10 Calculate the highest common factor of 21052 and 12. 4 What is the greatest common divisor of 26004 and 363? 33 Calculate the highest common factor of 39 and 13793. 13 Calculate the highest common factor of 7 and 9528701. 7 What is the highest common divisor of 975 and 2018250? 975 Calculate the highest common factor of 10593 and 1980. 99 Calculate the highest common divisor of 597 and 1815. 3 Calculate the greatest common factor of 796 and 791622. 398 Calculate the greatest common factor of 73 and 12237209. 73 Calculate the highest common divisor of 10 and 4743070. 10 Calculate the
{ "pile_set_name": "DM Mathematics" }
Q: Why does memory allocated from inside a DLL become invalid after FreeLibrary()? I had this bug today which turned out to be because I use a string allocated from inside my DLL after calling FreeLibrary(). This is a simple example reproducing the crash. This goes in DLL: void dllFunc(char **output) { *output = strdup("Hello"); // strdup uses malloc } This is in the EXE that loads the DLL: void exeFunc() { char *output; dllFunc(&output); std::string s1 = output; // This succeeds. FreeLibrary(dll); std::string s2 = output; // This crashes with access violation. } I read the documentation of FreeLibrary() but I couldn't find anything about memory becoming invalid after it's called. Edit I just realized that I had been using VS2008 toolchain for the DLL while using VS2010 toolchain for the EXE (I was using VS2010 as IDE for both, but you can select the toolchain from the project settings). Setting the toolchain to VS2010 for the DLL as well removed the crash. A: If you choose static linking with the MSVCRT (C Runtime) library, you will get the behavior you describe. Same thing also happens if your EXE and DLL are dynamically linked to an MSVCRT DLL, but are using different versions. Or if they are matched to the same version, but one is using DEBUG and the other is using RETAIL. In other words, memory is only as good as the lifetime of the MSVCRTxxx.dll used to make the allocation. I just saw your update to your question - yes, mixing and matching the CRT between VS 2008 and 2010 is the exact reason for the crash. If both your DLL and EXE are dynamically linked to the same version of the MSVCRT DLL, then you share the memory heap and you avoid the problem you are having. The standard practice is this: If your exported DLL function returns anything that needs to be "freed" or "released" later, then standard practice is to provide an additional function exported out of the DLL to handle de-allocations. You can configure both the EXE and DLL's C Runtime linkage from the Code-Generation page for the C/C++ project settings in your project. Picture here: http://imgur.com/uld4KYF.png A: This occurs because each Dll creates its own memory heap (which malloc and its C friends, along with new will use internally, generally via HeapAlloc), and when the Dll is freed, so is its heap. Refer to this MSDN article for more Dll memory caveats. unless you are using a custom memory allocator, shared across all your binaries, you need to keep dynamically allocated memory within the module that created it (unless you can 100% guarantee that the object will not outlive its creator).
{ "pile_set_name": "StackExchange" }
Dotted Cardstock - Pepper Product Details A heavy-duty paper designed for mounting surfaces, scrapbooking, framing, and so much more. Add a beautiful, textured effect to all sorts of crafting projects. Includes ones 12 x 12 inch sheet Product Details A heavy-duty paper designed for mounting surfaces, scrapbooking, framing, and so much more. Add a beautiful, textured effect to all sorts of crafting projects. Includes ones 12 x 12 inch sheet - Show Less
{ "pile_set_name": "Pile-CC" }
Q: How to get node content of XML with Dom4j in java I have a XML file like: <description> <text>blahblah</text> <code>code</code> <text>blah</text> </description> I've navigated to the node description, and I want to read the full content including the <text> and so on. I've used the getText(), but it returned empty string. I've used the getStringValue(), but it filtered all <text>. I've used the asXML(), the result is close, but the result contains <description> which I don't want. Is there a method to get the XML content of a element? A: Something like this: public static void main(String[] args) throws DocumentException { String xml = "<description><text>blahblah</text><code>code</code><text>blah</text></description>"; SAXReader reader = new SAXReader(); Document doc = reader.read(new StringReader(xml)); Element description = doc.getRootElement(); String content = getContent(description); System.out.println(content); } private static String getContent(Element element) { StringBuilder builder = new StringBuilder(); for (Iterator<Element> i = element.elementIterator(); i.hasNext();) { Element e = i.next(); builder.append(e.asXML()); } return builder.toString(); } Note that if the element has text content itself, this won't return the text content, only the child nodes. A: Assume that document is and instance of org.dom4j.Document, then String xPath = "description"; List<Node> nodes = document.selectNodes( xPath ); for (Node node : nodes) { node.asXML() }
{ "pile_set_name": "StackExchange" }
We noticed that you're using an unsupported browser. The TripAdvisor website may not display properly. We support the following browsers: Windows:Internet Explorer, Mozilla Firefox, Google Chrome. Mac:Safari. New!Find and book your ideal hotel on TripAdvisor — and get the lowest prices I ended up with a great deal to stay here with my... - Review of The Village Hotel I ended up with a great deal to stay here with my wife through living social for around $80 a night. I'd be on the property before and it had changed slightly. The only hot tubs are in the mail lodging area which was across the driveway from where we stayed. This made it slightly annoying to walk back and forth in the cold (especially after being in the tub). However, access to the lifts is almost unbeatable and the easy walk on Main Street for dinner and drinks is convenient. Room Tip: Stay in the main lodging area if able See more room tips Stayed November 2012, traveled as a couple Value Location Sleep Quality Rooms Cleanliness Service Helpful? Thank Brian R Report Ask Brian R about The Village Hotel This review is the subjective opinion of a TripAdvisor member and not of TripAdvisor LLC. Virginia P, Front Office Manager at The Village Hotel, responded to this review, January 23, 2013 Dear Valued Guest, Thank you for taking a moment to write a review on our trip advisor page! We truly appreciate it. We are glad to see you enjoyed your stay with us! I did want to mention, and I apologize if we did not mention it at check in but, we do have 3 indoor hot tubs located on the first floor in the Village Hotel building. The outdoor hot tubs are located in the main building up one floor from our front desk. However, there is a way to walk through the parking garages to get to the outdoor hot tubs instead of going outside. I apologize that we did not inform you of those options and will certainly address it with our front office staff for our future guests'. We hope you choose to stay at the Village at Breckenridge in the future! Sincerely, Ginny Report response as inappropriate Thank you. We appreciate your input. This response is the subjective opinion of the management representative and not of TripAdvisor LLC. My boyfriend and I booked a two night stay through livingsocial and couldn't have been more pleased. The rooms were spacious and clean. Bathroom was a bit small but it was ok. This hotel is close to Peak 9 and main street so everything we wanted to do was super close and within five minutes walking distance. Definitely plan on visiting again!! Stayed January 2013, traveled as a couple Value Location Sleep Quality Rooms Cleanliness Service Helpful? Thank Lynn W Report Ask Lynn W about The Village Hotel This review is the subjective opinion of a TripAdvisor member and not of TripAdvisor LLC. Virginia P, Front Office Manager at The Village Hotel, responded to this review, January 23, 2013 Dear Valued Guest, Thank you for taking the time to write a review on our trip advisor page! We really appreciate the feedback and are so happy to hear you enjoyed your stay with us! Hope to see you again in the future! Think Snow! Sincerely, Ginny Report response as inappropriate Thank you. We appreciate your input. This response is the subjective opinion of the management representative and not of TripAdvisor LLC. I'm somewhat disappointed by the quality of the hotel for the money we spent. I understand spending extra money when you stay for a holiday but come on, the room a bit dumpy with not even a queen bed, it was a double. The bathroom was very basic as well. But I have to say, the staff who work at the hotel made up for the room. The staff are genuinely nice, helpful, cheerful people who work very hard to help you with whatever you need. I was highly impressed with the quality of the staff.If you are interested in being close to the slopes and not spending a lot of time in your room, this is a decent choice; it's very close to downtown Breck and a 5 minute walk to the Quicksilver Lift. Stayed December 2012, traveled as a couple Value Location Sleep Quality Rooms Cleanliness Service Helpful? Thank Mary H Report Ask Mary H about The Village Hotel This review is the subjective opinion of a TripAdvisor member and not of TripAdvisor LLC. Virginia P, Front Office Manager at The Village Hotel, responded to this review, January 4, 2013 Dear Valued Guest, Thank you for taking the time to write a review on our trip advisor page! We are glad you decided to stay at the Village Hotel for your trip to Breckenridge! Thank you for your kind words about our staff. We truly aim to go above and beyond in providing exceptional service for all of our guests. We also appreciate your feedback in regards to our guest rooms'. We certainly consider all of these comments and hope we can improve our accommodations to your standards here in the future! Sincerely, Ginny Report response as inappropriate Thank you. We appreciate your input. This response is the subjective opinion of the management representative and not of TripAdvisor LLC. When I'm on a ski vacation, time is money. The less time I spend doing stuff I don't want to do (riding buses, parking, walking to lifts, walking to bus stops, dealing with hotel issues, etc.,) the better I feel about my trip. The Village Hotel helps maximize your "fun time" in Breck. The rooms aren't super-posh, but they are clean and comfortable. We had a king bed, and I was impressed with how comfortable it was. We begged for a good view, and we got one. The toilet and shower are attached to the room, with the sink outside of the "toilet room" and in the main room. That's not uncommon in a hotel, but the size of the toilet/shower room was tiny. It was so small the door had to be shut before you sat on the toilet. No biggie, but good to know. Also, I think housekeeping only does about 75% of what they do at other hotels. Our room was not vacuumed, and, strangely, they tipped themselves by taking a can of our unopened mixed nuts. I thought that was weird. The front staff were extremely helpful and courteous. Obviously, they hire and train well (which is getting hard to find these days). There is a $20 daily resort fee tacked on. This includes wifi, parking (covered), fitness center and hot tubs. The rooms have refrigerators, but no microwave. An outlet splitter would be a good thing to bring. This place was built before people travelled with 93 things that need to be plugged-in. Everything worth doing is within walking distance. The location cannot be beat. Reading the reviews, and looking at the pics, I was a bit hesitant about staying here. I consider myself well travelled, and I had no regrets. If you like powder, ask about the First Tracks pass. Also, buying lift tickets and renting skis when confirming with Breck properties can save a lot of money. Cheers, and pray for snow! Stayed December 2012, traveled as a couple Value Location Sleep Quality Rooms Cleanliness Service Helpful? Thank Otic9000 Report Ask Otic9000 about The Village Hotel This review is the subjective opinion of a TripAdvisor member and not of TripAdvisor LLC. Virginia P, Front Office Manager at The Village Hotel, responded to this review, December 30, 2012 Dear Valued Guest, Thank you for taking the time to write this review on our trip advisor page! We appreciate all the details you added about our location and property and staff! Everything you mentioned is true and we really appreciate your candid account of your stay with us. We hope to see you again at the Village Hotel in the future! Sincerely, Ginny Petrovek Report response as inappropriate Thank you. We appreciate your input. This response is the subjective opinion of the management representative and not of TripAdvisor LLC. This hotel was clean and in perfect location to downtown Breckenridge. We had an unexpected problem with our room keys and unfortunately got locked out of our room! Jonathan at the front desk was wonderful as was Leann the next morning. They resolved the issue in a professional/understanding manner. I would highlight recommend this hotel. I would absolutely stay here again. Stayed November 2012, traveled as a couple Value Location Sleep Quality Rooms Cleanliness Service Helpful? Thank btm1323 Report Ask btm1323 about The Village Hotel This review is the subjective opinion of a TripAdvisor member and not of TripAdvisor LLC. Virginia P, Front Office Manager at The Village Hotel, responded to this review, December 12, 2012 Dear Guest, Thank you for taking the time to write a review on our trip advisor page.I am sorry to hear about the issues you had with your key cards during your stay with us. We certainly take that issue seriously and are taking care of that issue for our future guests. I am happy to know that our night audit, Jonathan, and front desk manager, Leanne, provided you with exceptional service to resolve the problem. We hope you stay with us again during your next visit to Breckenridge! Sincerely, Ginny Report response as inappropriate Thank you. We appreciate your input. This response is the subjective opinion of the management representative and not of TripAdvisor LLC. I have visited Breckenridge many times, but always stayed at the well-appointed Breckenridge Mountain Lodge. This time I thought I would treat myself to a ski-in/ski-out accommodation. After carefully considering Mountain Thunder Lodge, and One Ski Hill Place, I knew that the Village Hotel was clearly the best choice. The location cannot be beat. New resorts are great, but being able to walk to everything carries a certain charm. Upon arrival, I walked into the main lobby and was speechless. It truly is breathtaking. The décor is just outstanding. My room wasn’t ready when I arrived, so sat in the comfy chairs facing the front desk and waited for it to get done. It was there I met one of the nicest employees I have ever encountered at a Front Desk. Her name was Leanne Belair. She took great care of me throughout my entire stay. I explained that I was hoping to see some wildlife while in the area, and she hinted that I might be able to see some from my room. I didn’t know what that meant, until I opened the door to my unit. And then I saw a beautiful photo of some wildlife right over the couch. It was so real that you thought it might start walking around. One thing to note. Finding your room is difficult. The property should really try to have some wayfinding signs installed to assist people getting around. I’m kind of surprised that they didn’t have any up before ski season. Later that day, I went down to talk to the concierge to get dinner recommendations. And I actually recognized him from years ago while visiting Jackson Lake Lodge, a resort in the Grand Teton National Park. His name is Charlie. I’m not very good with names, but after meeting this man you will never forget him. When it is time for Charlie to leave this good earth, The Village Hotel should stuff him and place him in the lobby for all to see. His enthusiasm for life is unmatched. He is literally the nicest person I have ever met. Charlie recommended the Park Avenue Pub as a great place to watch some football. The ambiance there is perfect. Whoever designed it really knew what they were doing. It’s a casual bar/restaurant with rustic accents. While having breakfast there one morning, I met the General Manager, Lindsay Watson. I mentioned how great my experience and she took an genuine interest. I could have visited with her all day, but she had to step away for a meeting. After skiing for 3 days, it was unfortunately time to return home. At check-out, I mentioned how amazing everything was, and asked the young lady at the Front Desk for the photographer’s contact information (the one who did the artwork inside the rooms). It broke my heart when she said she didn’t have it. Shortly after returning home, I received a large package in the mail. And sure enough, it was the beautiful photo from my room. Ginny Petrovek, the Director of Rooms, was thoughtful enough to find out where I lived and have it mailed to my home. It’s that kind of experience that makes me know Breckenridge will always hold a special place in my heart. I can honestly say that I can’t wait to return, and will NEVER forget my time at the Village. Room Tip: Don't forget to meet Charlie the concierge! See more room tips Stayed November 2012, traveled with friends Value Location Sleep Quality Rooms Cleanliness Service Helpful? 5Thank BHSLT4LIFE Report Ask BHSLT4LIFE about The Village Hotel This review is the subjective opinion of a TripAdvisor member and not of TripAdvisor LLC. Virginia P, Front Office Manager at The Village Hotel, responded to this review, November 23, 2012 Dear Valued Guest, Thank you for taking the time to write this thoughtful review on our page. It is great to hear that the members of our staff you interacted with took care of you! Each one of them truly lit up when they read this review. I did want to address your one comment on the signage, or lack there of, here for the hotel property. Just this Wednesday, the first round of signage installs started for the hotel. We recognized this need and are addressing it! By your next visit, all the signs should be up and will certainly help with your experience at the property. Thank you again for taking the time to write this review on our page. We hope to see you again at the Village in the future! Never forget us! Warm regards, Ginny Report response as inappropriate Thank you. We appreciate your input. This response is the subjective opinion of the management representative and not of TripAdvisor LLC. Both Colorado residents, my boyfriend and I stayed here while I was attending a business conference at another hotel over a recent autumn weekend. The location is stellar and so is the customer service of EVERYONE on their team. It is a lovely cozy hotel with beautiful decor and a dynamite pool with four outdoor hot tubs. The only problem is that the pool-hot tubs only open at 10 a.m. every morning so when I woke up early on Sunday to go check them out, they were still locked and closed. Also the $20 daily resort fee is nice for the free covered parking, free wi-fi and free access to the fitness center (and pool-hot tubs when they are open) but it's high, given the hotel room was already priced at $139 a night. Also, there is a problem with the sheets, as mentioned in another review; they need to get fitted sheets to cover the mattresses. We ended up sleeping on the mattress by morning each night. Too bad because the sheets were clean and very soft and the bed was very comfortable. I asked for complimentary late checkout on my last day and noon was the only option given to me. The bathroom sinks have been updated, but the shower-bath was a little disappointing and cramped. Stayed September 2012, traveled as a couple Value Location Sleep Quality Rooms Cleanliness Service Helpful? 1Thank TravelingtheWorld1 Report Ask TravelingtheWorld1 about The Village Hotel This review is the subjective opinion of a TripAdvisor member and not of TripAdvisor LLC. Virginia P, Front Office Manager at The Village Hotel, responded to this review, October 22, 2012 Dear Valued Guest, Thank for taking the time to write this detailed comment on our tripadvisor page! We truly appreciate all of your feedback and the details of your experience with us. I see you mentioned the hot tub hours. Our health club facility is run by our homeowner's association and we are diligently working with them to see if hours may possibly be changed in the future to open at an earlier hour for our guests' convenience. I apologize that we do not provide fitted bottom sheets. The laundry company we work with does not provide this particular sheet. It is a reoccuring comment from our guests' however, and we will see if it might be something we can change in the future. I am so glad you enjoyed the services of the property and our proximity to town! We hope you consider staying with us again in the future! Winter season is just around the corner! Sincerely, Ginny Report response as inappropriate Thank you. We appreciate your input. This response is the subjective opinion of the management representative and not of TripAdvisor LLC. The Village Hotel offers all non-smoking accommodations at the base of Breckenridge Ski Resort, adjacent to the Peak 9 Quicksilver Super 6 chair lift. The Village's buildings, from lodging units to taverns and shops, surround a central gazebo and pedestrian plaza. The property includes a ski school, sports equipment rentals/gear shops, an indoor/outdoor heated pool, whirlpools, and an exercise room. Dining options include Breckenridge Cattle and Fish Company, Cafe Breck, The Park Avenue Pub, and The Maggie. The hotel offers Starbucks coffee in the mornings in the lobby, 24-hour guest assistance, a medical center, business center, and gift shops. Standard rooms located inside the Village Hotel feature Starbucks coffee service, telephone, voicemail, satellite TV, clock radio and a mini-fridge. Condo units feature the same amenities, as well as fully equipped kitchens with fireplaces and balconies in the larger suites. Newly renovated hotel rooms offer telephone, voicemail, LCD flat screen TV, and mini fridge....more less Reservation Options: TripAdvisor is proud to partner with Hotels.com, Booking.com, Cheap Tickets, Travelocity, Priceline and Odigeo so you can book your The Village Hotel reservations with confidence. We help millions of travelers each month to find the perfect hotel for both vacation and business trips, always with the best discounts and special offers. Is This Your TripAdvisor Listing? Own or manage this property? Claim your listing for free to respond to reviews, update your profile and much more. * TripAdvisor LLC is not a booking agent and does not charge any service fees to users of our site... (more) We noticed that you're using an unsupported browser. The TripAdvisor website may not display properly. We support the following browsers: Windows:Internet Explorer, Mozilla Firefox, Google Chrome. Mac:Safari. TripAdvisor LLC is not responsible for content on external web sites. Taxes, fees not included for deals content.
{ "pile_set_name": "Pile-CC" }
Pryke baronets The Pryke Baronetcy, of Wanstead in the County of Essex, is a title in the Baronetage of the United Kingdom. It was created on 3 November 1926 for William Robert Pryke. He was Chairman of Pryke & Palmer Ltd, iron and hardware merchants, and served as Lord Mayor of London from 1925 to 1926. Pryke baronets, of Wanstead (1926) Sir William Robert Pryke, 1st Baronet (1847–1932) Sir (William Robert) Dudley Pryke, 2nd Baronet (1882–1959) Sir David Dudley Pryke, 3rd Baronet (1912–1998) Sir Christopher Dudley Pryke, 4th Baronet (born 1946) Notes References Kidd, Charles, Williamson, David (editors). Debrett's Peerage and Baronetage (1990 edition). New York: St Martin's Press, 1990, Pryke
{ "pile_set_name": "Wikipedia (en)" }
Pope Francis has sharply criticised climate change deniers as "stupid" in the wake of a spate of powerful hurricanes that have wreaked havoc in the United States, Mexico and the Caribbean. "Those who deny it [climate change] should go to the scientists and ask them," the pontiff said on Monday during an in-flight press conference on the return leg of a five-day Colombia trip. "They speak very clearly." As his charter plane flew over some of the recently devastated areas en route to Rome, Francis added: "I am reminded of a phrase from the Old Testament, I think from the Psalm: 'Man is stupid, he is stubborn and he does not see.'" READ MORE: What you need to know: Hurricane Irma and its impacts The pope's comments came as Hurricane Irma, one of the most powerful Atlantic storms in a century, caused widespread destruction across the French Caribbean islands, Cuba, Dominican Republic, Puerto Rico, Haiti and the US state of Florida. At least 38 people have been killed so far from the Category 5 superstorm. Meanwhile, Hurricane Katia bore down on the east coast of Mexico, leaving at least two dead. Far out in the Atlantic, Hurricane Jose, a Category 2 storm, having brushed past the Caribbean also poses a potential threat to the US east coast. Last month, Hurricane Harvey, one of the worst storms to hit the US mainland in 12 years, led to unprecedented flooding in the southern US state of Texas. 'Moral responsibility' Francis is one of the world's most high-profile campaigners on environmental issues, actively supporting efforts to combat climate change and its consequences. He said individuals and politicians had a "moral responsibility" to act on advice from scientists, who had clearly outlined what must be done to halt the course of "catastrophic" warming. "These aren't opinions pulled out of thin air," he said. "They are very clear. They [world leaders] decide and history will judge those decisions." WATCH: State of denial - Trump vs climate change (24:38) Recalling last month's news that a ship crossed the Arctic without an icebreaker for the first time, Francis said: "We can see the effects of climate change, and scientists clearly say what path we should follow." While regularly criticising politicians, the pope has made caring for the environment a hallmark of his papacy. He wrote an entire encyclical (a letter from the pope disseminated to the bishops of the Church) about how the poor in particular are most harmed when multinationals move in to exploit natural resources. During his visit to Colombia, Francis spoke out frequently about the need to preserve the country's rich biodiversity from overdevelopment and exploitation. Among world leaders, US President Donald Trump has repeatedly made a case sceptical of the existence of climate change. In June, Trump pulled the United States out of the Paris Agreement, which binds countries to national pledges to reduce greenhouse gas emissions.
{ "pile_set_name": "OpenWebText2" }
Q: Android Application Force Closes after Screen Rotation and Notification (Fragment) So my application has a fragment with a built in runnable timer, that will create a notification on the device once the time reaches 0. However, after a screen orientation, the app force closes and logcat produces "Attempt to invoke virtual method 'java.lang.String android.content.Context.getPackageName()' on a null object reference" Here's the full logcat: java.lang.NullPointerException: Attempt to invoke virtual method 'java.lang.String android.content.Context.getPackageName()' on a null object reference at android.content.ComponentName.<init>(ComponentName.java:77) at android.content.Intent.<init>(Intent.java:4160) at com.example.android.courtcounter.tabs.TimerFragment.createNotification(TimerFragment.java:178) at com.example.android.courtcounter.tabs.TimerFragment$1.run(TimerFragment.java:52) at android.os.Handler.handleCallback(Handler.java:739) at android.os.Handler.dispatchMessage(Handler.java:95) at android.os.Looper.loop(Looper.java:135) at android.app.ActivityThread.main(ActivityThread.java:5254) at java.lang.reflect.Method.invoke(Native Method) at java.lang.reflect.Method.invoke(Method.java:372) at com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:903) at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:698) the error happens here: //Creates the notification from the Main Activity class public void createNotification() { NotificationCompat.Builder mBuilder = new NotificationCompat.Builder(getActivity()) .setSmallIcon(R.drawable.ic_launcher) .setContentTitle("Score Counter and Timer") .setContentText("Time's up!"); // Creates an explicit intent for an Activity in your app Intent resultIntent = new Intent(this.getActivity(), TimerFragment.class); // The stack builder object will contain an artificial back stack for the // started Activity. // This ensures that navigating backward from the Activity leads out of // your application to the Home screen. TaskStackBuilder stackBuilder = TaskStackBuilder.create(getActivity()); // Adds the Intent that starts the Activity to the top of the stack stackBuilder.addNextIntent(resultIntent); PendingIntent resultPendingIntent = stackBuilder.getPendingIntent( 0, PendingIntent.FLAG_UPDATE_CURRENT ); int mID = 001; mBuilder.setContentIntent(resultPendingIntent); NotificationManager mNotificationManager = (NotificationManager) getActivity().getSystemService(Context.NOTIFICATION_SERVICE); // mId allows you to update the notification later on. mNotificationManager.notify(mID,mBuilder.build()); } Apparently notifying after orientation change will cause it to have the "attempt to invoke method on a null object reference" issue. Here's where I call the notification method i created: private Runnable timerRunnable = new Runnable() { @Override public void run() { if (timeLeft > 501) { timerTextViewSetUp(); timerHandler.postDelayed(this, 500); timeLeft = timeLeft - 500; //Log.v(TimerFragment.class.getSimpleName(), "The timer has been set"); } else if (timeLeft<501 && timeLeft>0) { timeLeft = 0; timerTextView.setText("0:00"); timerHandler.removeCallbacks(this); vibrate(); createNotification(); setPauseButtonToStartButton(); } I don't know if this has anything to do with it, but here's my onSavedInstanceState code: @Override public void onSaveInstanceState(Bundle savedInstanceState) { //Save state information savedInstanceState.putLong(TIME_LEFT_KEY, timeLeft); } and this is in onCreateView: if (savedInstanceState != null) { timeLeft = savedInstanceState.getLong(TIME_LEFT_KEY); timerHandler.postDelayed(timerRunnable, 0); //Log.v(TimerFragment.class.getSimpleName(), Long.toString(timeLeft)); } Help would be greatly appreciated, thanks! A: Orientation changes cause activity to be recreated. Use removeCallbacks(Runnable r) to remove the runnable so it doesn't get called after the old activity is destroyed. You can recreate the runnable and handler once new activity is created.
{ "pile_set_name": "StackExchange" }
Q: What is the difference between "mincing" and "grinding"? What, if any, are essential differences between mincing and grinding? I tend to associate mincing with soft foods (other materials?) such as meat. In particular, Merriam Webster has the following (for the transitive verb): a : to cut or chop into very small pieces b : to subdivide minutely; especially : to damage by cutting up : to utter or pronounce with affectation a: archaic : minimize b : to restrain (words) within the bounds of decorum Here 3.a points out the etymology and maybe the technique of mincing, while 3.b suggest metaphorical usage (mince one's words). Grinding might connote harder materials, such as pepper or nuts. Again from MW (transitive verb): to reduce to powder or small fragments by friction (as with the teeth) to press together and move with a rotating or back-and-forth motion—see bruxism Etymonline gives OE grindan "to rub together, grate, scrape" and earlier roots with the same meaning. Apparently, one can also grind beef, for example. Is there any difference in the technique of mincing and grinding different materials? Or in the intensity of crushing? In the accompanying sounds? (Hopefully grinding one's teeth is mainly about the sound, not producing anything.) Or is it just a matter of collocation — mince is customary with some materials, grind with other materials? A: Mincing has the connotation of being cut with an instrument with a slicing edge, whereas grinding utilizes friction between two or more points. More about the physical action and tools involved rather than materials; although because of the difference in technique, they have varying effectivness with materials of varying consistency. Both turns of phrase make sense describing said techniques metaphorically and physically, respectively.
{ "pile_set_name": "StackExchange" }
1. Field of the Invention The present invention relates to a hydraulic steering mechanism for steering drive wheels by utilizing hydraulic pressure. 2. Related Art There is conventionally known a hydraulic steering mechanism for steering a pair of drive wheels by connecting a pair of steering cases that are disposed at both ends of an axle case through a tie rod and by turning one of the steering cases about a kingpin shaft by a hydraulic actuator. However, in the conventional hydraulic steering mechanism, sufficient consideration has not been given to a viewpoint of a reduction in steering force of the hydraulic actuator. The present invention has been accomplished with the above conventional technique in view, and it is an object of the invention to provide a hydraulic steering mechanism with a simple structure in which a hydraulic actuator for turning a steering case about a kingpin shaft can be miniaturized.
{ "pile_set_name": "USPTO Backgrounds" }
NASA Finds Ingredient of Household Plastic In Space propylene found on Saturn's moon Titan ABOVE VIDEO:With a thick atmosphere, clouds, a rain cycle and giant lakes, Saturn’s large moon Titan is a surprisingly Earthlike place. But unlike on Earth, Titan’s surface is far too cold for liquid water – instead, Titan’s clouds, rain, and lakes consist of liquid hydrocarbons like methane and ethane (which exist as gases here on Earth). When these hydrocarbons evaporate and encounter ultraviolet radiation in Titan’s upper atmosphere, some of the molecules are broken apart and reassembled into longer hydrocarbons like ethylene and propane. NASA.gov – NASA’s Cassini spacecraft has detected propylene, a chemical used to make food-storage containers, car bumpers and other consumer products, on Saturn’s moon Titan. A small amount of propylene was identified in Titan’s lower atmosphere by Cassini’s Composite Infrared Spectrometer (CIRS). (NASA.gov image) This is the first definitive detection of the plastic ingredient on any moon or planet, other than Earth. A small amount of propylene was identified in Titan’s lower atmosphere by Cassini’s Composite Infrared Spectrometer (CIRS). This instrument measures the infrared light, or heat radiation, emitted from Saturn and its moons in much the same way our hands feel the warmth of a fire. Propylene is the first molecule to be discovered on Titan using CIRS. By isolating the same signal at various altitudes within the lower atmosphere, researchers identified the chemical with a high degree of confidence. Details are presented in a paper in the Sept. 30 edition of the Astrophysical Journal Letters. Conor Nixon “This chemical is all around us in everyday life, strung together in long chains to form a plastic called polypropylene,” said Conor Nixon, a planetary scientist at NASA’s Goddard Space Flight Center in Greenbelt, Md., and lead author of the paper. “That plastic container at the grocery store with the recycling code 5 on the bottom — that’s polypropylene.” CIRS can identify a particular gas glowing in the lower layers of the atmosphere from its unique thermal fingerprint. The challenge is to isolate this one signature from the signals of all other gases around it. “This chemical is all around us in everyday life, strung together in long chains to form a plastic called polypropylene. That plastic container at the grocery store with the recycling code 5 on the bottom — that’s polypropylene.” said Conor Nixon, a planetary scientist at NASA’s Goddard Space Flight Center in Greenbelt, Md., and lead author of the paper. The detection of the chemical fills in a mysterious gap in Titan observations that dates back to NASA’s Voyager 1 spacecraft and the first-ever close flyby of this moon in 1980. Voyager identified many of the gases in Titan’s hazy brownish atmosphere as hydrocarbons, the chemicals that primarily make up petroleum and other fossil fuels on Earth. On Titan, hydrocarbons form after sunlight breaks apart methane, the second-most plentiful gas in that atmosphere. The newly freed fragments can link up to form chains with two, three or more carbons. The family of chemicals with two carbons includes the flammable gas ethane. Propane, a common fuel for portable stoves, belongs to the three-carbon family. Voyager detected all members of the one- and two-carbon families in Titan’s atmosphere. From the three-carbon family, the spacecraft found propane, the heaviest member, and propyne, one of the lightest members. But the middle chemicals, one of which is propylene, were missing. PROPYLENE FOUND AS A RESULT OF CIRS DATA As researchers continued to discover more and more chemicals in Titan’s atmosphere using ground- and space-based instruments, propylene was one that remained elusive. It was finally found as a result of more detailed analysis of the CIRS data. Michael Flasar “This measurement was very difficult to make because propylene’s weak signature is crowded by related chemicals with much stronger signals,” said Michael Flasar, Goddard scientist and principal investigator for CIRS. “This success boosts our confidence that we will find still more chemicals long hidden in Titan’s atmosphere.” Cassini’s mass spectrometer, a device that looks at the composition of Titan’s atmosphere, had hinted earlier that propylene might be present in the upper atmosphere. However, a positive identification had not been made. CASSINI-HUYGENS IS A COOPERATIVE PROJECT “I am always excited when scientists discover a molecule that has never been observed before in an atmosphere,” said Scott Edgington, Cassini’s deputy project scientist at NASA’s Jet Propulsion Laboratory (JPL) in Pasadena, Calif. Scott Edgington “This new piece of the puzzle will provide an additional test of how well we understand the chemical zoo that makes up Titan’s atmosphere.” The Cassini-Huygens mission is a cooperative project of NASA, the European Space Agency and the Italian Space Agency. JPL manages the mission for NASA’s Science Mission Directorate in Washington. The CIRS team is based at Goddard.
{ "pile_set_name": "Pile-CC" }
# -*- tcl -*- # This file is not required as the .test file already does all the # tests without the need for an additional sourced control file. # We have it here just as a reminder. return
{ "pile_set_name": "Github" }
1. Field of the Invention The present invention relates to a pilot operated electromagnetic valve. 2. Description of the Related Art An automotive air conditioner is generally configured such that it includes a compressor, a condenser, an evaporator, and so forth arranged in a refrigerant circulation passage. Various types of control valves are provided for the purpose of, for example, switching the refrigerant circulation passages according to the operation state in such a refrigeration cycle and regulating the flow rate of refrigerant. A pilot operated electromagnetic valve capable of controlling the opening and closing of a large valve section using a relatively small electric power may be used as such the control valve (see Reference (1) in the following Related Art List, for instance). Such an electromagnetic valve drives a small pilot valve element by a solenoid so as to open and close a pilot valve and then drives a large main valve element by a pressure difference regulated thereby so as to open and close a main valve. A piston is formed integrally with the main valve element, and a back pressure chamber is formed by this piston as a separated space inside a body. A leak passage, through which to introduce the refrigerant into the back pressure chamber and a pilot passage, through which the refrigerant is led out from the back pressure chamber, are formed in the main valve element. And the opening and closing of the pilot valve opens and blocks the pilot passage, respectively. With this structure, the opening and closing of the main valve is controlled by varying the pressure of the back pressure chamber. The pressure of the back pressure chamber is regulated through a balance between the flow rate of refrigerant introduced into the back pressure chamber and the flow rate of refrigerant led out from the back pressure chamber.
{ "pile_set_name": "USPTO Backgrounds" }
Tony Albert Tony Albert (born 1981) is a contemporary Australian artist working in a wide range of mediums including painting, photography and mixed media. His work engages with political, historical and cultural Aboriginal and Australian history, and his fascination with kitsch “Aboriginalia". Biography Albert was born in 1981 in Townsville, North Queensland. In 2004 he graduated from the Queensland College of Art, Griffith University, Brisbane, with a degree in Contemporary Australian Indigenous Art. Albert's family is from Cardwell, Queensland and he is a descendant of the Girramay, Yidinji and Kuku-Yalanji peoples. Albert was a founding member of the urban-based Indigenous art collective ProppaNOW founded in 2004. ProppaNOW also included artists Richard Bell, Jennifer Herd, Vernon Ah Kee, Fiona Foley, Bianca Beetson, and Andrea Fisher. Work Like Bell and Ah Kee, the use of text is essential to Albert's practice. Headhunter (2007), an installation consisted of various objects Albert had been collecting for several years, portrays the past racism in Australia and puts emphasis on "the commodification of Aboriginal people for consumption by the non-Indigenous population, at a time when actual engagements with Aboriginal people were rare and predominantly paternalistic." The application of text can also be seen in Albert's photographic work such as Hey ya! (Shake it like a Polaroid picture) (2007). Awards and commissions In 2014 Albert won first prize in the National Aboriginal & Torres Strait Islander Art Award with his work We can be Heroes, prompted by the 2012 shooting by police of two Aboriginal teenagers in Kings Cross. Albert's was the first photographic work to win the prize. In 2015, Albert was commissioned by the City of Sydney to create Yininmadyemi - Thou didst let fall, a public work for Hyde Park, Sydney. The work serves as a memorial to Aboriginal military history and features four large upright bullets and shell casings. He was the Archibald Prize finalist in 2016 and 2017. Exhibitions Albert's work has been the subject of nine solo exhibitions and over fifty group exhibitions. References External links Official site Tony Albert at the Art Gallery of New South Wales Tony Albert at UnDisclosed Category:People from Townsville Category:21st-century Australian photographers Category:1981 births Category:Living people Category:Griffith University alumni Category:21st-century Australian painters Category:Photographers from Queensland Category:Indigenous Australian artists Category:Archibald Prize finalists Category:Mixed-media artists
{ "pile_set_name": "Wikipedia (en)" }
The Individual Response Evaluation (IRE) Mode of Classroom Talk Individual Response Evaluation ( IRE): a pattern of classroom talk in which the teacher asks a question, a student answers and the teacher accepts or rejects the answer and goes on to ask another question. Sometimes these forms of questions are appropriate because teachers do need to get certain points across to students. IRE discussions do not provide the type of language input and feedback that advance children's knowledge of language structure and use. These types of dialogues do not allow children to build and develop meaning through dialogue, so this cannot be the only form of classroom talk that takes place between teachers and students. Teachers need to engage students in different modes of classroom talk. Solution Preview In order for teachers to provide students with more stimulating conversations in the classroom they should: 1. Engage children in reciprocal conversations and discussions whether they are one-on one or whole group ... Solution Summary There are several modes of classroom talk that is present in the classroom. One of these is the Individual Response Evaluation mode. While this is needed sometimes for teachers to get specific information from students it should not be the only mode of classroom talk in the classroom. The solution describes several ways teachers can help students further their langauage acquisition and language learning in the classroom through varied student -centered classroom activities and class diuscussions.
{ "pile_set_name": "Pile-CC" }
At VoteTexas.gov, you can check if you already are registered to vote and where. Per VoteTexas, you can register to vote if...: “You are a United States citizen You are a resident of the county where you submit the application; You are at least 17 years and 10 months old, and you are 18 years of age on Election Day. You are not a convicted felon (you may be eligible...
{ "pile_set_name": "OpenWebText2" }
– Vi hoppas och tror att det funnits människor i området som sett eller hört något, säger polisens presskommunikatör Nils Norling och poängterar hur viktigt det är med vittnesmål för att komma vidare i utredningen. – Det går jättebra att gå fram till en polisbil och ta kontakt med poliserna där eller ringa 114 14. Överfallet skedde vid sjutiden på tisdagsmorgonen intill en trafikerad korsning med en busshållplats mittemot. I närheten ligger flera skolor och förskolor. Många barnfamiljer passerar parken på morgnarna, liksom hundägare. – Vi är i synnerhet mycket intresserade av att komma i kontakt med en äldre kvinna med hund som brukar gå i parken. Nils Norling, presstalesperson för polisen i Malmö. Foto: Johan Dernelius Kvinnan vårdas fortfarande på sjukhus Just nu jobbar både lokalpolisområde norr, underrättelse och grova brott med fallet. Polisen undersöker om det finns kameror i området som kan ha fångat gärningsmännen på bild. Kvinnan som överfölls blev knivskuren i överkroppen och på benen. Hennes tillstånd är stabilt, men hon vårdas fortfarande på sjukhus. I ett första förhör har kvinnan uppgett att männen var helt okända för henne. – Vi kommer hålla ytterligare förhör med kvinnan för att få en klar bild av vad som har skett, säger Nils Norling. Fallet rubricerades först som misstänkt rån men ändrades senare till försök till mord.
{ "pile_set_name": "OpenWebText2" }
Angelo Borrelli Angelo Borrelli is an Italian government official, who is serving as Head of the Civil Protection, since 8 August 2017. Biography Borrelli was born in 1964, in he graduated in Santi Cosma e Damiano, near Latina in Lazio region. He graduate in Economics at the University of Cassino, becoming auditor and tax advisor. In 2000 he entered the National Office for Civil Service. In 2002 he was appointed executive of the Civil Protection Department, the government body in Italy that deals with the prediction, prevention and management of exceptional events. From 2010 to August 2017 he served as Deputy Head of the Civil Protection, and when Fabrizio Curcio resigned, Prime Minister Paolo Gentiloni appointed him new Head of the Civil Protection. On 31 January, the Italian Council of Ministers appointed Borrelli Extraordinary Commissioner for COVID-19 Emergency. On 22 February, the government announced a new decree, including the quarantine of more than 50,000 people from 11 different municipalities in Northern Italy. Penalties for violations ranged from a €206 fine to 3 months imprisonment. Italian military and law enforcement agencies were instructed to secure and implement the lockdown. Schools were closed at ten municipalities in Lombardy, one in Veneto and Emilia Romagna. All public events were cancelled and commercial activities were halted or were allowed to resume only until 6 pm. Regional train services to the most affected areas were suspended – trains were not stopping at Codogno, Maleo and Casalpusterlengo stations. References Category:1964 births Category:Living people
{ "pile_set_name": "Wikipedia (en)" }
Putamen The putamen (; from Latin, meaning "nutshell") is a round structure located at the base of the forebrain (telencephalon). The putamen and caudate nucleus together form the dorsal striatum. It is also one of the structures that comprise the basal nuclei. Through various pathways, the putamen is connected to the substantia nigra, the globus pallidus, the claustrum, and the thalamus, in addition to many regions of the cerebral cortex. A primary function of the putamen is to regulate movements at various stages (e.g. preparation and execution) and influence various types of learning. It employs GABA, acetylcholine, and enkephalin to perform its functions. The putamen also plays a role in degenerative neurological disorders, such as Parkinson's disease. History The word "putamen" is from Latin, referring to that which "falls off in pruning", from "putare", meaning "to prune, to think, or to consider". Until recently, most MRI research focused broadly on the basal ganglia as a whole, for various reasons (e.g. image resolution, rarity of isolated infarct or hemorrhage within the putamen, etc.). However, many studies have been done on the basal ganglia and relevant brain-behavior relationships. In the 1970s, the first single unit recordings were done with monkeys monitoring pallidal neuron activity related to movement. Since then, more extensive neuronal tracing, stimulation, and imaging research methods (e.g. fMRI, DWI) that allow for investigation of the putamen have been developed. Anatomy The putamen is a structure in the forebrain. Along with the caudate nucleus it forms the dorsal striatum. The caudate and putamen contain the same types of neurons and circuits – many neuroanatomists consider the dorsal striatum to be a single structure, divided into two parts by a large fiber tract, the internal capsule, passing through the middle. The putamen, together with the globus pallidus, makes up the lentiform nucleus. The putamen is the outermost portion of the basal ganglia. These are a group of nuclei in the brain that are interconnected with the cerebral cortex, thalamus, and brainstem. Basal ganglia include the dorsal striatum, substantia nigra, nucleus accumbens, and the subthalamic nucleus. In mammals, the basal ganglia are associated with motor control, cognition, emotions, learning, and domain-general functions important for executive functioning as well as support for domain-specific languages. The basal ganglia are located bilaterally, and have rostral and caudal divisions. The putamen is located in the rostral division as part of the striatum. The basal ganglia receive input from the cerebral cortex, via the striatum. The putamen is interconnected with the following structures: This description is rudimentary and does not nearly exhaust even the basic established circuitry of the putamen. The cortico-subcortico-cortical circuits with putaminal involvement are dense and complicated, consisting of a wide range of axonal, dendritic, chemical, afferent, and efferent substrates. The putamen's outputs are highly arborized across output structures, and cortical efferents arise from layers III-VI of the cortex, dependent on gyri and location within the putamen. Topographical organization of the putamen combines the following elements: anterior-to-posterior functional and somatotopic gradients, lateral-to-medial functional and somatotopic gradients, diffuse terminal output, patchy localized terminal output, segregated terminals from adjacent regions, finely interdigiated terminals from distal cortical regions in a seemingly overlapping fashion. Caudate nucleus The caudate works with the putamen to receive the input from cerebral cortex. Collectively, they can be considered the "entrance" to the basal ganglia. Projections from the putamen reach the caudate directly via the caudolenticular grey bridges. The putamen and caudate are jointly connected with the substantia nigra, however the caudate outputs more densely to the substantia nigra pars reticulata while the putamen sends more afferents to the internal globus pallidus. Substantia nigra The substantia nigra contains two parts: the substantia nigra pars compacta (SNpc) and the substantia nigra pars reticulata (SNpr). The SNpc obtains input from the putamen and caudate, and sends information back. The SNpr also obtains input from the putamen and caudate. However, it sends the input outside the basal ganglia to control head and eye movements. The SNpc produces dopamine, which is crucial for movements. The SNpc is the part that degenerates during Parkinson's disease. Globus pallidus The globus pallidus contains two parts: the globus pallidus pars externa (GPe) and the globus pallidus pars interna (GPi). Both regions acquire input from the putamen and caudate and communicate with the subthalamic nucleus. However, mostly the GPi sends GABAergic inhibitory output to the thalamus. The GPi also sends projections to parts of the midbrain, which have been assumed to affect posture control. Physiology Types of pathways The putamen (and striatum in general) has numerous, parallel circuits that allow for cortico-subcortico-cortico communication loops. These have been described, broadly, as the direct, indirect, and hyper direct pathways. GABAergic projections of the putamen have an inhibitory effect on the thalamus. Thalamic projections from the centromedian and parafascicular nuclei have an excitatory effect on the putamen. Unlike the thalamus, which has broad reciprocal connectivity, cortical projections with the putamen are afferent, thus sending information as opposed to receiving it. Cortical communication is accomplished via multi-fiber pathways as outlined previously (i.e. via other subcortical structures). Dopamine Dopamine as a neurotransmitter has a dominant role in the putamen, most of it is supplied from the substantia nigra. When a cell body of a neuron (in the putamen or caudate nuclei) fires an action potential, dopamine is released from the presynaptic terminal. Since projections from the putamen and caudate nuclei modulate the dendrites of the substantia nigra, the dopamine influences the substantia nigra, which affects motor planning. This same mechanism is involved in drug addiction. In order to control the amount of dopamine in the synaptic cleft, and the amount of dopamine binding to post synaptic terminals, presynaptic dopaminergic neurons function to reuptake the excess dopamine. Other neurotransmitters The putamen also plays a role in modulation of other neurotransmitters. It releases GABA, enkephalin, substance P, and acetylcholine. It receives serotonin and glutamate. Function: motor skills The putamen is interconnected with many other structures, and works in conjunction with them to influence many types of motor behaviors. These include motor planning, learning, and execution, motor preparation, specifying amplitudes of movement, and movement sequences. Some neurologists hypothesize that the putamen also plays a role in the selection of movement (e.g. Tourette syndrome) and the "automatic" performance of previously learned movements (e.g. Parkinson's disease). In one study it was found that the putamen controls limb movement. The goal of this study was to determine whether particular cell activity in the putamen of primates was related to the direction of limb movement or to the underlying pattern of muscular activity. Two monkeys were trained to perform tasks that involved the movement of loads. The tasks were created so that movement could be distinguished from muscle activity. Neurons in the putamen were selected for monitoring only if they were related both to the task and to arm movements outside the task. It was shown that 50% of the neurons that were monitored were related to the direction of movement, independent of the load. Another study was done to investigate movement extent and speed using PET mapping of regional cerebral blood flow in 13 humans. Movement tasks were performed with a joystick-controlled cursor. Statistical tests were done to calculate the extent of movements and what regions of the brain the movements correlate to. It was found that "increasing movement extent was associated with parallel increases of rCBF in bilateral basal ganglia (BG; putamen and globus pallidus) and ipsilateral cerebellum." This not only shows that the putamen affects movement but it also shows that the putamen integrates with other structures in order to perform tasks. One study was done in order to specifically investigate how the basal ganglia influences the learning of sequential movements. Two monkeys were trained to press a series of buttons in sequence. The methods used were designed to be able to monitor the well-learned tasks versus the new tasks. Muscimol was injected into various parts of the basal ganglia, and it was found that "the learning of new sequences became deficient after injections in the anterior caudate and putamen, but not the middle-posterior putamen". This shows that different areas of the striatum are utilized when performing various aspects of the learning of sequential movements. Role in learning In many studies, it has become apparent that the putamen plays a role in many types of learning. Some examples are listed below: Reinforcement and implicit learning Along with various types of movement, the putamen also affects reinforcement learning and implicit learning. Reinforcement learning is interacting with the environment and catering actions to maximize the outcome. Implicit learning is a passive process where people are exposed to information and acquire knowledge through exposure. Although the exact mechanisms are not known, it is clear that dopamine and tonically active neurons play a key role here. Tonically active neurons are cholinergic interneurons that fire during the entire duration of the stimulus and fire at about 0.5–3 impulses per second. Phasic neurons are the opposite and only fire an action potential when movement occurs. Category learning One particular study used patients with focal lesions on the basal ganglia (specifically the putamen) due to stroke in order to study category learning. The advantage to using these types of patients is that dopaminergic projections to the prefrontal cortex are more likely to be intact. Also, in these patients, it is easier to relate specific brain structures to function because the lesion only occurs in a specific place. The goal of this study was to determine whether or not these lesions affect rule-based and information-integration task learning. Rule-based tasks are learned via hypothesis-testing dependent on working memory. Information-integration tasks are ones wherein the accuracy is maximized when information from two sources are integrated at a pre-decisional stage, which follows a procedural-based system. Seven participants with basal ganglia lesions were used in the experiment, along with nine control participants. It is important to note that the caudate was not affected. The participants were tested for each type of learning during separate sessions, so the information processes would not interfere with each other. During each session, participants sat in front of a computer screen and various lines were displayed. These lines were created by using a randomization technique where random samples were taken from one of four categories. For ruled-based testing, these samples were used to construct lines of various length and orientation that fell into these four separate categories. After the stimulus was displayed, the subjects were asked to press 1 of 4 buttons to indicate which category the line fell into. The same process was repeated for information-integration tasks, and the same stimuli were used, except that the category boundaries were rotated 45°. This rotation causes the subject to integrate the quantitative information about the line before determining what category it is in. It was found that subjects in the experimental group were impaired while performing rule-based tasks, but not information-integration ones. After statistical testing, it was also hypothesized that the brain began using information-integration techniques to solve the rule-based learning tasks. Since rule-based tasks use the hypothesis-testing system of the brain, it can be concluded that the hypothesis-testing system of the brain was damaged/weakened. It is known that the caudate and working memories are part of this system. Therefore, it was confirmed that the putamen is involved in category learning, competition between the systems, feed-back processing in rule-based tasks, and is involved in the processing of pre-frontal regions (which relate to working memory and executive functioning). Now it is known that not only the basal ganglia and caudate affect category learning. Role in "hate circuit" Tentative studies have suggested that the putamen may play a role in the so-called "hate circuit" of the brain. A recent study was done in London by the department of cell and developmental biology at University College London. An fMRI was done on patients while they viewed a picture of people they hated and people who were "neutral". During the experiment, a "hate score" was recorded for each picture. The activity in sub-cortical areas of the brain implied that the "hate circuit" involves the putamen and the insula. It has been theorized that the "putamen plays a role in the perception of contempt and disgust, and may be part of the motor system that's mobilized to take action." It was also found that the amount of activity in the hate circuit correlates with the amount of hate a person declares, which could have legal implications concerning malicious crimes. Role in transgender individuals The putamen was found to have significantly larger amounts of grey matter in male to female transgender individuals compared to the putamen of a typical heterosexual male. This possibly suggests that a fundamental difference in brain composition may exist between trans women and cis men. Pathology Parkinson's disease After discovering the function of the putamen, it has become apparent to neurologists that the putamen and basal ganglia play an important role in Parkinson's disease and other diseases that involve the degeneration of neurons. Parkinson's disease is the slow and steady loss of dopaminergic neurons in substantia nigra pars compacta. In Parkinson's disease the putamen plays a key role because its inputs and outputs are interconnected to the substantia nigra and the globus pallidus. In Parkinson's disease the activity in direct pathways to interior globus pallidus decreases and activity in indirect pathways to external globus pallidus increases. This is why Parkinson's patients have tremors and have trouble performing voluntary movements. It has also been noted that Parkinson's patients have a difficult time with motor planning. Other diseases and disorders The following diseases and disorders are linked with the putamen: Cognitive decline in Alzheimer's disease Huntington's disease Wilson's disease Dementia with Lewy bodies Corticobasal degeneration Tourette syndrome Schizophrenia Depression Attention deficit hyperactivity disorder Chorea Obsessive-Compulsive Disorder Kernicterus Other anxiety disorders In other animals The putamen in humans is relatively similar in structure and function to other animals. Therefore, many studies on the putamen have been done on animals (monkeys, rats, cats, etc.), as well as humans. However, inter-species variation are indeed observed in mammals, and have been documented for white matter putaminal connectivity. Variation is primarily related to structural connectivity patterns, while somatotopic organization principles are retained. Primate research since the 1980s through to the present has established that cortical regions relation to higher-order cognition primarily send afferent neurons to the rostal-most portion of the putamen, while the remainder of this structure in primates primarily serves sensori-motor functions and is densely interconnected with primary and supplementary motor regions. Additional images See also Lentiform nucleus References External links Diagram at uni-tuebingen.de – "The Visual Pathway from Below" Category:Basal ganglia
{ "pile_set_name": "Wikipedia (en)" }
Shows routes of Spanish voyages in the Pacific, including the Northwest Coast of North America. Relief shown by hachures; depths shown by soundings. Sheet 1 of 2: map of the northwest coast of America. Title from accompanying notes. An ice-axe at the top of Mount Miyazaki, to which has been attached two flags (USA and Japan) and a placard.The names read: Peter E. Haase, Noburu Morikawa; Ryu Nakajima; Hiromasa Haneda. The sign has been dated... Title from accompanying notes. "20 April. Shige at Camp 1."Mountain climber stopped to pose for the camera on the trail, there are six other men on the trail in front of him. He is tethered with rope and has sunglasses, quilted mittens, and...
{ "pile_set_name": "Pile-CC" }
diff -uNr recordmydesktop-0.3.8.1/src/rmd_initialize_data.c recordmydesktop-0.3.8.1.mod/src/rmd_initialize_data.c --- recordmydesktop-0.3.8.1/src/rmd_initialize_data.c 2008-12-13 17:49:09.000000000 +0200 +++ recordmydesktop-0.3.8.1.mod/src/rmd_initialize_data.c 2020-02-11 15:45:12.372308546 +0200 @@ -239,8 +239,8 @@ args->device = (char *) malloc(strlen(DEFAULT_AUDIO_DEVICE) + 1); strcpy(args->device, DEFAULT_AUDIO_DEVICE); - args->workdir = (char *) malloc(5); - strcpy(args->workdir, "/tmp"); + args->workdir = (char *) malloc(sizeof("@TERMUX_PREFIX@/tmp")); + strcpy(args->workdir, "@TERMUX_PREFIX@/tmp"); args->pause_shortcut = (char *) malloc(15); strcpy(args->pause_shortcut, "Control+Mod1+p");
{ "pile_set_name": "Github" }
package org.spiderflow.core.utils; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import org.springframework.mail.SimpleMailMessage; import org.springframework.mail.javamail.JavaMailSender; import org.springframework.stereotype.Component; /** * 邮件发送工具类 * * @author BillDowney * @date 2020年4月4日 上午12:31:09 */ @Component public class EmailUtils { // 发送邮件服务 @Autowired private JavaMailSender javaMailSender; // 发送者 @Value("${spring.mail.username}") private String from; /** * 发送简单文本邮件 * * @param subject 主题 * @param content 内容 * @param to 收件人列表 * @author BillDowney * @date 2020年4月4日 上午12:40:42 */ public void sendSimpleMail(String subject, String content, String... to) { SimpleMailMessage message = new SimpleMailMessage(); message.setFrom(from); message.setSubject(subject); message.setText(content); message.setTo(to); javaMailSender.send(message); } }
{ "pile_set_name": "Github" }
# # Copyright 2019 Xilinx, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # source settings.tcl set PROJ "prj_ssr_fft_reg_test_r4_l16.prj" set SOLN "solution1" if {![info exists CLKP]} { set CLKP 3.3 } open_project -reset $PROJ add_files "src/main.cpp src/hls_ssr_fft_data_path.hpp src/DEBUG_CONSTANTS.hpp" -cflags "-I${XF_PROJ_ROOT}/L1/include/hw/vitis_fft/fixed -I${XF_PROJ_ROOT}/L1/tests/common" add_files -tb "src/main.cpp ${XF_PROJ_ROOT}/L1/tests/hw/1dfft/fixed/commonFix/verif/fftStimulusIn_L16.verif ${XF_PROJ_ROOT}/L1/tests/hw/1dfft/fixed/commonFix/verif/fftGoldenOut_L16.verif" -cflags "-I${XF_PROJ_ROOT}/L1/include/hw/vitis_fft/fixed -I${XF_PROJ_ROOT}/L1/tests/common" set_top fft_top open_solution -reset $SOLN set_part $XPART create_clock -period $CLKP if {$CSIM == 1} { csim_design } if {$CSYNTH == 1} { csynth_design } if {$COSIM == 1} { cosim_design } if {$VIVADO_SYN == 1} { export_design -flow syn -rtl verilog } if {$VIVADO_IMPL == 1} { export_design -flow impl -rtl verilog } if {$QOR_CHECK == 1} { puts "QoR check not implemented yet" } exit
{ "pile_set_name": "Github" }
High percentage of dietary palm oil suppressed growth and antioxidant capacity and induced the inflammation by activation of TLR-NF-κB signaling pathway in large yellow croaker (Larimichthys crocea). A 70-day feeding trial was conducted to investigate the effects of dietary fish oil (FO) replaced by palm oil (PO) on growth, biochemical and antioxidant response as well as inflammatory response in the liver of large yellow croaker (initial weight 15.87 ± 0.14 g). Four iso-proteic and iso-lipidic experimental diets were formulated with 0% (the control group), 33.3%, 66.7% and 100% FO replaced by PO. Fish fed the diet with 100% PO showed significantly lower growth performance than the control group. As expected, the contents of C16:0, C18:1n-9 and C18:2n-6 were increased with increasing dietary PO levels. There were remarkable increases in total cholesterol (TC) and low-density lipoprotein-cholesterol (LDL-C) levels in fish fed the diet with 100% PO compared to the control group. Moreover, dietary PO significantly increased activities of plasma alanine transaminase (ALT) and aspartate aminotransferase (AST) in fish fed the diet with 100% PO compared to the control group. The total antioxidant capacity (T-AOC) and the activity of catalase (CAT) in plasma were significantly decreased in fish fed the diet with 100% PO compared to the control group, and meanwhile no significant differences were found in T-AOC and CAT activity in fish fed diets with no more than 66.7% PO. Fish fed the diet with 100% PO exerted significantly higher toll like receptors (TLRs) and myeloid differentiation factor (MyD88) mRNA expression levels than the control group. The IFNγ, IL-1β and TNFα mRNA expressions were increased with increasing dietary PO levels. The increase of pro-inflammatory gene expression may be due to the activation of NF-κB signaling as the ratio of nucleus p65 to total p65 protein was elevated with the increase of dietary PO levels. These results showed that relatively higher PO levels in diets suppressed the growth and antioxidant capacity as well as induced the inflammatory response by activating TLR-NF-κB signaling pathway in juvenile large yellow croaker.
{ "pile_set_name": "PubMed Abstracts" }
1. Field of the Disclosure The subject disclosure relates generally to oilfield drilling, and more particularly to bottom hole assemblies and tools for orienting a bottom hole assembly (BHA). 2. Background of the Related Art In conventional drilling, the BHA is lowered into the wellbore using jointed drill pipes or coiled tubing. Often the BHA includes a mud motor, directional drilling and measuring equipment, measurements-while-drilling tools, logging-while-drilling tools and other specialized devices. A simple BHA having a drill bit, various crossovers, and drill collars is relatively inexpensive, costing a few hundred thousand US dollars, while a complex BHA costs ten times or more than that amount. Many drilling operations require directional control so as to position the well along a particular trajectory into a formation. Directional control, also referred to as “directional drilling,” is accomplished using special BHA configurations, instruments to measure the path of the wellbore in three-dimensional space, data links to communicate measurements taken downhole to the surface, mud motors, and special BHA components and drill bits. The directional driller can use drilling parameters such as weight-on-bit and rotary speed to deflect the bit away from the axis of the existing wellbore. In some cases, e.g. when drilling into steeply dipping formations or when experiencing an unpredictable deviation in conventional drilling operations, directional-drilling techniques may be employed to ensure that the hole is drilled vertically. Direction control is most commonly accomplished through the use of a bend near the bit in a downhole steerable mud motor. The bend points the bit in a direction different from the axis of the wellbore when the entire drill string is not rotating. By pumping mud through the mud motor, the bit rotates though the drill string itself does not, allowing the bit alone to drill in the direction to which it points. When a particular wellbore direction is achieved, the new direction may be maintained by then rotating the entire drill string, including the bent section, so that the drill bit does not drill in a direction away from the intended wellbore axis, but instead sweeps around, bringing its direction in line with the existing wellbore. As it is well known by those skilled in the art, a drill bit has a tendency to stray from its intended drilling direction, a phenomenon known as “drill bit walk”. A device for addressing drill bit walk is shown in U.S. Pat. No. 7,610,970 to Sihler et al. issued Nov. 3, 2009, which is incorporated herein by reference. The use of coiled tubing with downhole mud motors to turn the drill bit to deepen a wellbore is another form of drilling, one which proceeds quickly compared to using a jointed pipe drilling rig. By using coiled tubing, the connection time required with rotary drilling is eliminated. Coiled tube drilling is economical in several applications, such as drilling narrow wells, working in areas where a small rig footprint is essential, or when reentering wells for work-over operations. In coiled tubing drilling, a BHA with a mud motor is attached to the end of a coiled tubing string. Typically, the mud motor has a fixed or adjustable bend housing in order to drill deviated holes. Because the coiled tubing is unable to rotate from surface, a so called orienter tool is used as part of the BHA to “orient” the bend of the mud motor into the desired direction. There exists a multitude of different designs for the drive systems of such tools. Some designs support continuous rotation such as electric motor and gearbox drives, while others only permit rotation by a certain limited angle. The orienter tool is typically a high-torque, low-speed device, wherein the design of the drive system provides a torque output which can at least match the reactive torque exerted by the drilling mud motor. For example, some orienter tools have utilized planetary gears in an effort to drive the output shaft. Basically, creating a torque on an output shaft means that a tangential force has to be exerted. By way of example, an output torque of 1,000 ft-lbs from a 2-inch diameter shaft means a tangential force of 12,000 lbs. This amount of force will quickly yield any material unless the tangential force is evenly distributed over a sufficient area to reduce the stress levels. In a conventional planetary stage with a size constraint on the order of 3 inches in diameter, the limits of how much bending force the gear teeth can take, and how much stress the planet carrier is capable of supporting will be much below 1000 ft-lbs of torque.
{ "pile_set_name": "USPTO Backgrounds" }
Monte Cassino Commemorative Cross The Monte Cassino Commemorative Cross () is a commemorative medal awarded to all soldiers of the Polish II Corps who fought in the battle of Monte Cassino and the battles for Piedimonte and Passo Corno. After the capture of Monte Cassino in May 1944, the Polish government-in-exile (in London) created a campaign cross to commemorate the role of the Polish II Corps (often known as Anders Army) in capturing this strategic point, which had long blocked the Allied advance up the Italian peninsula. A consignment of 50,000 crosses was ordered from a manufacturer in Tel Aviv, then part of British-ruled Palestine, where the Polish forces had spent part of 1942 and almost all of 1943 in training. A total of 48,498 crosses (serial numbers 1 to 48,498) were awarded with accompanying award documents issued in the field to each soldier who took part in the battle. Although it is commonly believed that no master record exists to indicate which serial numbers were given to specific soldiers, records do indicate which blocks of serial numbers were given to units within the Polish II Corps. Furthermore, the actual lists of named cross recipients are held at the Polish Institute and Sikorski Museum. The 1502 un-awarded crosses (serial numbers 48,499 to 50,000) were eventually sold off to dealers and collectors. At the moment of its institution in June 1944, the medal was 19th in the Polish order of precedence. Recipients by Unit The Monte Cassino Commemorative Cross was issued to the following units: CROSS No. UNIT 1 Senior Commanders - General Władysław Anders, General Officer Commanding 2 Polish Corps 2 Senior Commanders - General Zygmunt Bohusz-Szyszko, Deputy General Officer Commanding 2 Polish Corps 3 Senior Commanders - Bishop Józef Gawlina, Polish Army Field Bishop 4 Senior Commanders - General Bolesław Bronisław Duch, General Officer Commanding 3 Carpathian Rifle Division 5 Senior Commanders - General Nikodem Sulik, General Officer Commanding 5 Kresowa Infantry Division 6 Senior Commanders - General Bronisław Rakowski, General Officer Commanding 2 Armoured Brigade 7 Senior Commanders - General Roman Odzierzyński General Officer Commanding 2 Polish Corps Artillery 8 Senior Commanders - General Prof. Dr Bolesław Szarecki, General Officer Commanding 2 Polish Corps Medical Services 9 Senior Commanders - Colonel Dr. Ludwik Ząbkowski, Commanding Officer 2 Artillery Group, 2 Polish Corps 10 Senior Commanders - Colonel Kazimierz Wiśniowski, 2 Polish Corps Chief of Staff 11 Senior Commanders - Colonel Stanisław Skowroński, 2 Polish Corps Quartermaster 12 Senior Commanders - Colonel Mieczysław Zaleski, Commanding Officer 2 Polish Corps Signals Units 13 Senior Commanders - Colonel Konstanty Skąpski, Commanding Officer 2 Polish Corps Engineering Units 14-749 Headquarters 2 Corps Staff 750 Headquarters 2 Corps Staff – Melchior Wańkowicz, War Correspondent 751-1102 Killed in Action 3 Carpathian Rifle Division 1103-2043 Officers 3 Carpathian Rifle Division 2044-14702 Other Ranks listed Alphabetically 3 Carpathian Rifle Division 14703-14750 Not Awarded 14751-15246 5 Kresowa Infantry Division Headquarters Staff 15247-15375 Headquarters Staff 5 Wilno Infantry Brigade 15376-16136 13 Wilno Rifle Battalion 16137-16878 14 Wilno Rifle Battalion 16879-17615 15 Wilno Rifle Battalion 17616-17743 Headquarters Staff 6 Lwow Infantry Brigade 17744-18477 16 Lwow Rifle Battalion 18478-19206 17 Lwow Rifle Battalion 19207-19938 18 Lwow Rifle Battalion 19939-20656 4 Light Artillery Regiment 20657-21362 5 Light Artillery Regiment 21363-22027 6 Light Artillery Regiment 22028-22654 5 Anti-Tank Regiment 22655-23554 5 Light Anti-Aircraft Regiment 23555-24438 15 Poznan lancer Regiment 24439-25130 5 Heavy Machine Gun Battalion 25131-26019 5 Engineers Battalion 5KDP 26020-26526 5 Signals Battalion 26527-26770 5 Sanitary (Medical) Company 26771-27016 6 Sanitary (Medical) Company 27017-27059 Command Supply and Transport Units 27060-27324 5 Supply Company 27325-27583 6 Supply Company 27584-27966 15 Supply Company 27967-28307 16 Supply Company 28308-28443 5 Workshop Company [5 EME Coy.] 28444-28586 6 Workshop Company [6 EME Coy.] 28587-28691 5 Provost (Military Police) Squadron [Coy.] 28692-28733 6 Light Artillery Regiment 28734-28739 15 Poznan Lancer Regiment 28751-28975 Headquarters 2nd Warsaw Armoured Brigade 28976-28987 On secondment to 2nd Armoured Brigade 28988 Headquarters 2nd Warsaw Armoured Brigade 28989-29672 4 Skorpion Armoured Regiment 29673-30328 1 Krechowiecki Lancer Regiment 30329-30992 6th Children of Lwów Armoured Regiment 30993-31188 9 Signals Company 31189-31385 9 Light Sanitary (Medical) Company 31386-31909 9 Supply Company 31910-32105 9 Workshop Company [9 EME Coy.] 32106-32246 9 Forward Tank Replacement Squadron [Coy.] 32247-32269 6 Field Court, 2nd Armoured Brigade 32270-32300 Not Awarded 32301-32332 Command, 2nd Artillery Group 32333-32361 Staff, Counter-Battery Section 32362-33050 7 Horse Artillery Regiment 33051-33055 7 Horse Artillery Regiment - Wounded 33056-33057 7 Horse Artillery Regiment - Killed in Action 33058-33759 9 Medium Artillery Regiment 33760-33767 9 Medium Artillery Regiment - Killed in Action/Died of Wounds 33768-34364 10 Medium Artillery Regiment 33765-34367 10 Medium Artillery Regiment - Wounded 33768-34374 10 Medium Artillery Regiment – Killed in Action 34375-34936 11 Medium Artillery Regiment 34937-34940 11 Medium Artillery Regiment - Wounded 27/4 to 11/5/45 34941-34945 11 Medium Artillery Regiment - Killed in Action/Died of Wounds 34946-34949 11 Medium Artillery Regiment 34950-34951 Command, 2nd Artillery Group 34852-34960 Not Awarded34961-34978 Command, 2 Corps Artillery 34979-35631 7 Anti-Tank Regiment 35632-35657 7 Anti-Tank Regiment - Killed 35658-36543 7 Light Anti-Aircraft Regiment 36544-37518 8 Heavy Anti-Aircraft Regiment 37519-38083 1 Artillery Survey Regiment 38084-38655 Carpathian Lancer Regiment 38656 Colonel Henryk Ignacy Szymanski (USA) US Army Liaison38657-38760 Carpathian Lancer Regiment 38761-38776 Command, 2 Corps Engineers 38777-39697 10 Battalion, Corps Engineers [10 Polish Corps Troops Engineers] 39698-40043 10 Bridging Company 40044-40297 301 Engineering Company 40298-40370 304 Mechanical Equipment Platoon 40371-40398 10 Bomb Disposal Platoon 40399-40420 306 Engineering Park 40421-40438 Command, Army Signals 2 Corps 40439-41052 11 Signals Battalion 41053-41061 2 Corps Signals Traffic Control Team 41062-41111 Corps Artillery Fire Control Signals Platoon 41112-41167 Command, Corps Artillery Group Signals Platoon 41168-41234 11 Special Radiotelegraphic Platoon 41235-41245 12 Special information Platoon 41246-41280 386 Signals Platoon 41281-41320 389 Medium Radio Platoon 41321-41338 2 Corps Signals Park 41339-41371 Seconded from 7 Inf.Div for Telephone Network Security 41372-41440 Air Support Signals Section 41441-41446 Arial Photographic Interpretation Section 41447-41449 Heads of 2 Corps Medical Services 41450-41615 3 Casualty Clearing Station [stationed at Venafro] 41616-41783 5 Casualty Clearing Station [stationed at Pozzilli] 41784-42159 161 Military Hospital [later 2 Military Hospital - stationed at Campobasso] 42160-42171 Arial Photographic Interpretation Section 42172-42328 162 Military Hospital [also known as 6 Field Hospital - stationed at Venafro] 42329-42736 1 Military Hospital [later 5 Military Hospital - stationed at Casamassima] 42737-43169 3 Military Hospital [stationed at Palagiano] 43170-43415 31 Sanitary (Medical) Company 43416-43443 32 Field Hygiene Platoon 43444-43451 34 Anti-Malaria Section 43452-43460 45 Surgical Team 43461-43470 46 Surgical Team 43471-43480 47 Surgical Team 43481-43489 48 Surgical Team 43490-43493 49 Transfusion Team 43494-43497 50 Transfusion Team 43498-43514 341 Field Medical Dump 43515-43520 Field Bacteriological-Chemical Unit 43521-43541 Heads, Supply and Transport Services 43542-43589 Command, 2 Corps Supply & Transport Units 43590-43863 21 Transport Company43864 Brigadier Sir E H C Frith (UK) C.O. 26 British Liaison Unit 43865-43970 21 Transport Company43971-44403 22 Transport Company 44404-44768 23 Transport Company 44769-45159 24 Transport Company 45160-45369 29 Ambulance Company 45370-45419 30 Independent Workshop Platoon 45420-45459 326 Supply Dump 45460-45501 327 Supply Dump 45502-45539 328 Supply Dump 45540-45617 61 Anti-Aircraft Artillery Supply Unit 45618-45715 62 Anti-Aircraft Artillery Supply Unit 45716-45777 331 Mobile Field Bakery 45778-45843 332 Mobile Field Bakery 45844–45884 334 Propellant Fuel Dump 45885-45912 336 Office Materials Dump 45913-45923 Delegation of Head of Field Canteens 45924-45973 318 Field Canteen and Mobile Library Company 45974-45986 Command, 26 Field [Forward] Supply Centre 45987-45999 Command, 27 Field [Forward] Supply Centre 46000-46012 Command, 28 Field [Forward] Supply Centre 46013-46026 Heads, Corps Material Services 46027-46045 Heads of Non-divisional Material Service Units 46046-46207 Command, Corps Material Park 46208-46251 Material Parks Delivery Platoon 46252-46257 Field Officers shop 46258-46269 Field Ammunitions Laboratory 46270-46377 350 Material Company Supply Station 46378-46480 375 Field Laundry 46481-46496 377 Field Bath 46497-46515 378 Field Bath 46516-46527 Heads of Corps Electrical and Mechanical Engineering Service 46528-46693 13 Workshop Company [13 EME Coy.] 46694-46872 15 Workshop Company [15 EME Coy.] 46873-47123 35 Workshop Company [35 EME Coy.] 47124-47127 Corps Ordinance Workshop 47128-47294 36 Rescue Company 47295-47303 Heads of Geographical Services 47304-47455 12 Geographical Company 47456-47475 312 Field Map Store 47476-47484 Command 2 Corps Provost (Military Police) 47485-47648 11 Provost (Military Police) Squadron [Coy.] 47649-47659 1 Provost (Military Police) Platoon 47660-47676 2 Provost (Military Police) Platoon 47677-47756 Command, Army Service Corps Units 47757-47958 Guard Battalion 47959-47966 Heads, Justice Service 47967-47994 12 Field Court 47995-48006 Office of War Graves Registration 48007-48046 370 Evacuated Equipment Park 48047-48089 371 Evacuated Equipment Park 48090-48135 372 Evacuated Equipment Park 48136-48146 2 Corps Inspectorate for Control of Technical Equipment 48147-48149 48150-48151 Heads, Field Postal Service 48152-48171 2 Corps Field Postal Department 48172-48178 2 Corps Military Censors Office 48179-48216 2 Corps Meteorological Section 48217-48230 Command, 2 Corps Financial Service [Paycorps] 48231-48257 2 Corps Press and Culture Office 48258-48263 Press and Culture Office, non-divisional units 48264-48272 Command Station No.1 48273-48290 Smoke Screen Unit 48291-48293 Soldiers’ Welfare Section 48294-48355 Concert and Entertainment Section 48356-48385 Reserve Liaison and Translator Officers 48386-48399 111 Bridge Security Company 48400-48407 Health Service Inspectorate, Women's Auxiliary Service (Poland) 48408-48498 Independent Commando Company 48499-48505 Headquarters 2 Corps Staff 48506-48514 Cipher Office and Radio Station 48515-48517 7 Light Anti-Aircraft Regiment 48518 7 Anti-Tank Regiment 48519 301 Engineering Company 48520 389 Medium Radio Platoon 48521-48523 2 Provost (Military Police) Platoon 48524-48531 48532-48535 318 Field Canteen and Mobile Library Company 48536-48537 35 Workshop Company [35 EME Coy.] 48538-48540 5 Military Hospital (Previously 1 Military Hospital) 48541-48542 162 Military Hospital 48543 161 Military Hospital 48544-48547 48548-48553 Headquarters 2 Corps Staff 48554-48580 Arial Photographic Interpretation Section 48581 8 Heavy Anti-Aircraft Regiment 48582-48583 Smoke Screen Unit 48584-48585 3 Casualty Clearing Station 48586 31 Sanitary (Medical) Company 48587-48597 Guard Battalion 48598-48691 Headquarters 2 Corps Staff 48692 2 Corps Signals Traffic Control Team 48693 Carpathian Lancer Regiment 48694-48696 Heads of 2 Corps Medical Services 48697-48710 Heads, Supply and Transport Services 48711-48715 Heads of Corps Electrical and Mechanical Engineering Service 48716-48720 Command, Army Service Corps Units 48721-48749 48750-48759 Headquarters 2 Corps Staff 48760 7 Anti-Tank Regiment 48761 1 Artillery Survey Regiment 48762-48796 Convalescent Home (2 Battalion) 48797 Heads, Supply and Transport Services 48798 Heads, Corps Material Service 48799-48821 Guard Battalion 48822-48823 2 Corps Press and Culture Office 48824-48830 48831 663 Artillery Observation Air Squadron 48832 48833 7 Anti-Tank Regiment 48834-48845 48846 Convalescent Home (2 Battalion) 48847 8 Heavy Anti-Aircraft Regiment The units listed above are not the official British War Office designations, but rather a close translated version taken from the Polish. This information was brought to you courtesy of Andrzej and The Polish Institute and General Sikorski Museum. It was compiled by Dr. Mark Ostrowski. Recent research has established that the initial figures cited above were miscalculated and in fact the last known cross given to a named recipient was 48847. The complete list of named recipients is at the Polish Institute and Sikorski Museum in File AXII/85/214''', Folders 1-19. See also Battle of Monte Cassino References External links The Polish Victory at Monte Cassino Dal Volturno a Cassino a website that has as its main purpose to collect and disseminate information on both, notably to the battle of Cassino is to all those events less known, but it must be related, temporally preceding and the following. Category:Battle of Monte Cassino Category:Polish campaign medals
{ "pile_set_name": "Wikipedia (en)" }
In vivo electrophysiology of dopamine-denervated striatum: focus on the nitric oxide/cGMP signaling pathway. Within the striatum, the gaseous neurotransmitter nitric oxide (NO) is produced by a subclass of interneurons containing the neuronal NO synthase (nNOS). NO promotes the second messenger cGMP through the activation of the soluble guanyl cyclase (sGC) and plays a crucial role in the integration of glutamate (GLU) and DA transmission. The aim of this study was to characterize the impact of 6-hydroxyDA (6-OHDA) lesion of the rat nigrostriatal pathway on NO/cGMP system. In vivo extracellular single units recordings were performed under urethane anesthesia to avoid any potentially misleading contributions of cortically-driven changes on endogenous NO. Hence, no electrical extrastriatal stimulation was performed and great attention was paid to the effects of 3-morpholinosydnonimine (SIN-1, a NO donor), N(G)-nitro-L-arginine methyl ester (L-NAME, a nonselective NOS inhibitor) and Zaprinast (a PDE inhibitor) delivered by iontophoresis upon the main striatal phenotypes. The latter were operationally distinguished in silent medium spiny-like neurons (MSN), with negligible spontaneous activity but displaying glutamate-induced firing discharge at rest and spontaneously active neurons (SAN), representing to a large extent nonprojecting interneurons. SANs were excited by SIN-1 and Zaprinast while MSNs showed a clear inhibition during local iontophoretic application of SIN-1 and Zaprinast. In 6-OHDA animals, SIN-1-induced excitation in SANs was significantly increased (on the contrary, the inhibitory effect of L-NAME was less effective). Interestingly, in DA-denervated animals, a subclass of MSNs (40%) displayed a peculiar excitatory response to SIN-1. These findings support the notion of an inhibitory modulatory role exerted by endogenous NO on control striatal projection cells. In addition, these findings suggest a functional cross-talk between NO, spontaneously active interneurons, and projection neurons that becomes critical in the parkinsonian state.
{ "pile_set_name": "PubMed Abstracts" }
Q: complex regex with preg_replace, replace a word not inside [ and ] I'm having trouble finding a correct regex to achieve what I want. I have a sentence like that : Hi, my name is Stan, you are welcome, hello. and I would like to transform it like that : [hi|hello|welcome], my name is [stan|jack] you are [hi|hello|welcome] [hi|hello|welcome]. Right now my regex is half working, because somes words are not replaced, and those replaced are deleting some characters Here is my test code <?php $test = 'Hi, my name is Stan, you are welcome, hello.'; $words = array( array('hi', 'hello', 'welcome'), array('stan', 'jack'), ); $result = $test; foreach ($words as $group) { if (count($group) > 0) { $replacement = '[' . implode('|', $group) . ']'; foreach ($group as $word) { $result = preg_replace('#([^\[])' . $word . '([^\]])#i', $replacement, $result); } } } echo $test . '<br />' . $result; Any help will be appreciated A: The regex you are using is overcomplicated. You simply need to use a regex substitution using regular brackets (): <?php $test = 'Hi, my name is Stan, you are welcome, hello.'; $words = array( array('hi', 'hello', 'welcome'), array('stan', 'jack'), ); $result = $test; foreach ($words as $group) { if (count($group) > 0) { $imploded = implode('|', $group); $replacement = "[$imploded]"; $search = "($imploded)"; $result = preg_replace("/$search/i", $replacement, $result); } } echo $test . '<br />' . $result;
{ "pile_set_name": "StackExchange" }
Munroe Regional Home Care Munroe Regional Home Care This is a listing for Munroe Regional Home Care (9401 SW Highway 200, Suite 3003, Ocala, FL 34481) from the Home Health Care Directory. You can find more similar services in our Home Health Care Directory.
{ "pile_set_name": "Pile-CC" }
October 5, 2018 — Venus stations retrograde October 5 until November 15. Her journey begins in deep, alchemical Scorpio, then she returns to spacious, harmonious Libra—her natural domicile. In November, Venus will stations direct as a daytime star for the first time in our lifetimes, representing the empowerment of the feminine. Venus is goddess love and beauty. When she moves backwards, she sparks a period of reflection, review, and recalibration in all of her domains—especially relationships, art, beauty, peace, and love. Now, we enter a period of retreat and going inward. We can contemplate old patterns around love and relating, meeting any walls or fears that still obstruct our hearts. Where does our fear close down our potential for deep connection? Where do we still hold resentments? And what unresolved places still exist in our hearts around past relationships or friendships? Not only do we have the chance to untangle old pains and traumas, we may also look at any patterns that have been reemerging in our lives again and again. Why are these old road blocks still showing up in new costumes? What wisdom and lessons have we yet to look in eyes and receive? Libra rules paradox, and we are now confronting our darkest places and holding space for them—with forgiveness and acceptance. We can find balance as we transform our places of suffering into beauty and true intimacy. Taboos, secret desires, and shadows now have the chance to see the light of day, with neutrality and embrace. Perhaps true intimacy is sharing with another person what we’re afraid to say. We have that opportunity now—first with ourselves. We can listen to our own hearts, then extend our ears more openly to others, receiving all of who they are without assumption. Veils are slipping away as we grow into the power of our inner feminine, standing strong and still in the truth of our beings—with grace, resilience, and unconditional love. To schedule an astrology reading with Juliana, visit here, or write: info@etherealculture.com My book, The Stars Within You: A Modern Guide to Astrology, is now available for pre-order! Check my Events Page for upcoming talks, workshops, + retreats.
{ "pile_set_name": "OpenWebText2" }
[Value of echocardiography in the study of the embolic origin of a cerebral ischemic accident]. One hundred consecutive patients aged from 35 to 82 years (mean : 74 years) admitted to a Neurology unit for cerebral ischaemic accident of suspected embolic origin were examined by two-dimensional echocardiography, then divided into two groups. Group I patients (n = 24) had a cardiopathy detectable by ultrasound, such as valve disease (n = 5), ischaemic cardiopathy (n = 10), myocardial dilatation (n = 4) or hypertrophic obstructive cardiomyopathy (n = 1). Group II patients (n = 74) had no cardiopathy detectable by ultrasound. Among group I patients, a thrombus was detected in 3 cases (12.5%), and 16 patients (66.7%) had echographic signs of potentially emboligenic cardiopathy without thrombus, including mitral or aortic valve stenosis (5 cases), parietal ectasia (6 cases), severe abnormality in left ventricular contractility (4 cases) and left atrial dilatation (4 cases). No thrombus was visualized in group II patients, but 10 (13.2%) had signs of potentially emboligenic cardiopathy, including mitral valve prolapse in 6 and left atrial dilatation in 4 cases. Altogether, therefore, a potentially emboligenic cardiopathy was detected by echocardiography in 29% of these 100 patients, but it had already been diagnosed prior to this examination in 24%. A thrombus could be visualized in only 3% of the cases. It is concluded that echocardiography need not be systematically performed in all patients with cerebral ischaemic accident, but only in young patients in search of a cause amenable to curative or actively prophylactic treatment.
{ "pile_set_name": "PubMed Abstracts" }
--- abstract: 'The onset and nature of the earliest geomagnetic field is important for understanding the evolution of the core, atmosphere and life on Earth. A record of the early geodynamo is preserved in ancient silicate crystals containing minute magnetic inclusions. These data indicate the presence of a geodynamo during the Paleoarchean, between 3.4 and 3.45 billion years ago. While the magnetic field sheltered Earth’s atmosphere from erosion at this time, standoff of the solar wind was greatly reduced, and similar to that during modern extreme solar storms. These conditions suggest that intense radiation from the young Sun may have modified the atmosphere of the young Earth by promoting loss of volatiles, including water. Such effects would have been more pronounced if the field were absent or very weak prior to 3.45 billion years ago, as suggested by some models of lower mantle evolution. The frontier is thus trying to obtain geomagnetic field records that are $\gg$3.45 billion-years-old, as well as constraining solar wind pressure for these times. In this review we suggest pathways for constraining these parameters and the attendant history of Earth’s deep interior, hydrosphere and atmosphere. In particular, we discuss new estimates for solar wind pressure for the first 700 million years of Earth history, the competing effects of magnetic shielding versus solar ion collection, and bounds on the detection level of a geodynamo imposed by the presence of external fields. We also discuss the prospects for constraining Hadean-Paleoarchean magnetic field strength using paleointensity analyses of zircons.' address: - 'Department of Earth and Environmental Sciences, University of Rochester, Rochester, New York, 14627, USA' - 'Department of Physics and Astronomy, University of Rochester, Rochester, New York, 14627, USA' author: - 'John A. Tarduno' - 'Eric G. Blackman' - 'Eric E. Mamajek' bibliography: - 'tarduno2\_biblio.bib' title: 'Detecting the oldest geodynamo and attendant shielding from the solar wind: Implications for habitability' --- Geodynamo,Early Earth ,Solar Wind ,Atmospheric Loss ,Habitability Introduction ============ The onset and nature of the geomagnetic field is important for understanding the evolution of the core, atmosphere and life on Earth. For the earliest Earth, the dynamo was powered thermally, so dynamo startup provides information on core heat flow and lower mantle conditions. A common posit is that the lack of magnetic shielding of cosmic radiation is inconsistent with the development of life, but it is clear that an atmosphere and ocean layer can provide some protection. The primary issue explored here is the survival of the hydrosphere. The magnetic field acts to prevent atmospheric erosion by the solar wind. In the case of the early Earth, the magnetic field would have had to balance the greatly enhanced solar wind pressure associated with the young rapidly-rotating Sun. The interplay between the magnetic field and radiation from the young Sun controls the loss of light elements and, ultimately, water and therefore may be a fundamental stage in the development of a habitable planet. The interaction of the solar wind and the geomagnetic field produces Earth’s magnetosphere (Figure 1), with the magnetopause ($r_{s}$) defined as the point toward the Sun (the sub-solar point) where the wind pressure is balanced by the magnetic field [@Griessmeier2004]:\ $$r_s=\left[\frac{\mu_0f_0^2M_E^2}{4\pi^2(2\mu_0n_{sw}m_{p}v_{sw}^2+B^2_{IMF})}\right]^{1/6}$$\ where $M_{E}$ is Earth’s dipole moment, $n_{sw}$ is solar wind density, $v_{sw}$ is solar wind velocity, $f_{0}$ is a magnetospheric form factor (=1.16 for Earth), $\mu_{0}$ is the permeability of free space, $m_{p}$ is a proton mass and $B_{IMF}$ is the interplanetary magnetic field. Today, Earth’s magnetic field stands off the solar wind to a distance between 10 and 11 Earth radii. The occurrence of magnetic storms and their associated phenomena– including auroral lights seen at unusually low latitudes – are a vivid reminder that the solar input side of this balance is variable. In the case of a magnetic storm, a coronal mass ejection impinges on and compresses the magnetosphere, greatly reducing the magnetopause standoff distance. Of course, Earth’s magnetic field is dynamic and it also varies on a wide range of timescales. At any given time, the geomagnetic field at a radius $r$, colatitude $\theta$ and longitude $\phi$ can be specified by the gradient of the scalar potential ($\Phi$): $$\Phi(r,\theta,\phi)=r_{e}\sum_{l=1}^{\infty} \sum_{m=0}^{l} \left(\frac{r_{e}}{r} \right)^{l+1} P^{m}_{l}(cos \theta)[g^{m}_{l}\cos m \phi + h^{m}_{l}\sin m \phi]$$ where $P^{m}_{l}$ are partially normalized Schmidt functions, $r_{e}$ is the radius of Earth and the Gauss coefficients $g^{m}_{l}$ and $h^{m}_{l}$ describe the size of spatially varying fields. The current field is approximately 80% dipolar and when averaged over several hundred thousand years to 1 million years, the axial dipole term ($g^{1}_{0}$) is thought to dominate. The latter assumption– the geocentric dipole hypothesis– forms the basis of tectonic interpretations of paleomagnetic data that have been essential in our understanding of the plate tectonic history of the planet. Paleomagnetic directions provide the opportunity for tests of the geocentric dipole hypothesis [@Irving1964] and these have confirmed the hypothesis for times as old as $\sim$2.5 billion years (Smirnov and Tarduno, 2004), notwithstanding considerable gaps in the paleomagnetic database of directions. For even deeper time, the global distribution of data does not yet permit a test, but we can proceed because the geocentric dipole assumption provides the maximum magnetic shielding [@Siscoe1980]. The key magnetic parameter to determine in learning about solar-terrestrial interactions in the deep past (eqn 1) then is magnetic field strength ($M_{E}$), but this is far more difficult to determine than paleodirections. Determining the ancient solar wind pressure is also challenging, especially for the first $\sim$700 million years of Earth’s history. In this review, we will first tackle the observational challenge of determining ancient field strength by means of examples through Paleoarchean times. We will show how these data, when combined with constraints based on solar analogs, allow us to constrain the magnetopause some 3.45 billion years ago. We provide a review of atmospheric escape mechanisms, including contrarian views that question the importance of a shield created by the geomagnetic field. Within this context, we discuss the implications of the Paleoarchean magnetopause conditions for the evolution of Earth’s water inventory. As one peers back further in time, these data mark the bifurcation point for potential trajectories for the dynamo. In one scenario, the dynamo starts soon after core formation and has been relatively strong ever since. In another, the dynamo started between 4 and $\sim$3.5 billion years ago. We will next suggest ways to extend the record of the magnetopause back in time to the earliest Paleoarchean and Hadean Earth. To address the presence/absence question about the earliest field we also address the question of “recording zero" and in so-doing revisit the self-shielding potential of a planet lacking a magnetic field.\ The Challenge of Recording Magnetic Deep Time ============================================= @Stevenson1983 suggested that field strength might approximately scale with rotation, while cautioning over the accuracy of any scaling laws of the time. Nevertheless, scaling with rotation has been suggested as a viable way of determining past field strength [@Dehant2007; @Lichtenegger2010]. While there are many unresolved issues in dynamo theory, it is generally accepted that core convection is driven by buoyancy [e.g. @Roberts2000], in which case intensity estimates based solely on rotation are illusory. Clearly, observations are needed. Obtaining paleomagnetic constraints on the oldest geodynamo, however, is difficult because of the ubiquitous metamorphism that has affected Paleoarchean and older rocks. It is sometimes assumed that the maximum metamorphic temperature can be simply related to the temperature at which a magnetization is removed from a sample in the laboratory. By this line of reasoning, if a rock had been heated to 320 $^{o}$C during a metamorphic event, say for 1 million years, its magnetization isolated by heating in the laboratory to 320 $^{o}$C would record the magnetic field at the time of the metamorphic event. The magnetization isolated as temperatures greater than 320 $^{o}$C might record the field when the rock first cooled. In reality, many (if not most) bulk rock samples would be totally remagnetized during this metamorphic event, and/or their magnetic field strength records might be forever compromised. To understand why this is so, and how we might see through metamorphism, we must first consider the basis for measurement of past field strength (the subdiscipline of paleointensity).\ Paleointensity measurement -------------------------- The physical mechanism of magnetization that is best understood is that of thermoremanent magnetization (TRM), acquired when a sample’s constituent minerals cool through their Curie temperatures. To recover paleointensity from rocks carrying a TRM, the most robust method is the Thellier approach, named after seminal work of the Thelliers [@Thellier1959], or some of its more common variants [e.g. the Thellier-Coe approach; @Coe1967]. In these experiments, samples are exposed to a series of paired heating steps (Figure 2). For example, in the Thellier-Coe approach, a sample might first be heating to 100 $^{o}$C in field free-space, during which a natural remanent magnetization (NRM) is removed. Next, the sample is reheated to the same temperature, but this time in the presence of a known applied field (this is called the “field-on" step). After cooling, the sample magnetization is measured and the thermoremanent magnetization acquired can be calculated. Because one knows the magnetization lost, the magnetization gained and the applied field ($H_{\rm lab}$), one can solve for the ancient field ($H_{\rm paleo}$) recorded by the 100 $^{o}$C temperature range: $$H_{\rm paleo} = \frac{M_{\rm NRM}}{M_{\rm TRM}} H_{\rm lab}$$ In practice, one conducts this experiment at many steps up to the maximum Curie temperature of the magnetic minerals in a given specimen.\ Chemical change --------------- The need for the second, or “field-on" step makes paleointensity experiments far less successful than a study of magnetic directions alone. The main problem is that heating can induce changes in magnetic minerals, changing the TRM-capacity; because the measurement is done in the presence of a field, the reference needed for equation 3 ($M_{\rm TRM}$) is compromised. In contrast, heatings for directional measurements are always done in field-free space, and a sample can suffer some alteration and still retain useful information. Hence, finding samples that will not alter during laboratory experiments is one key requirement. This constraint is also closely related to the metamorphic and weathering history of a rock. In general, the formation of Fe-rich clays spells disaster for paleointensity recording because these can alter during paleointensity experiments to form new iron-oxides [e.g. @Cottrell2000; @Gill2002; @Kars2012]. Ultramafic rocks, including komatiitic lavas, might be thought of as good carriers of Archean fields, but the Fe-rich phyllosilicate serpentine \[(Mg, Fe)$_{3}$Si$_{2}$O$_{5}$(OH)$_{4}$\] and magnetite are commonly formed in these rocks. The magnetite carries a magnetization dating to the alteration age (which is usually uncertain) and the magnetization is not a TRM; instead it is a crystallization or chemical remanent magnetization (CRM), and a straightforward tie to past field strength is lost [@Yoshihara2004; @Smirnov2005].\ Time, Temperature and Domain State ---------------------------------- The principal magnetic mineral recorder of interest for Paleoarchean and older rocks is magnetite having a Curie temperatures of $\sim$580 $^{o}$C. Hematite, with a Curie temperature of 675 $^{o}$C is also a potential recorder, but its occurrence as a primary phase recording a TRM is rare relative to magnetite. Even if we have a pristine rock that has undergone no chemical change, magnetite within the rock can be remagnetized by the aforementioned 320 $^{o}$C metamorphic event even though the peak temperature is less than the Curie temperature of magnetite. To understand this phenomena, we start with the recognition that metamorphic heating will affect magnetic grains differently, according to their size and related domain state. Large magnetic grains (greater than one micron, for example), are composed of multiple magnetic domains (called the multidomain, or MD, state). Even under low grade metamorphic conditions, we expect that new magnetizations will be acquired due to movement of domain walls. These new magnetizations will be recorded by some MD grains up to the Curie point of magnetite. Small magnetite grains containing only a single domain (SD) have the potential to withstand low grade metamorphic conditions. The parameter of interest for these grains in the thermal relaxation time ($\tau$), which can be expressed in terms of rock magnetic parameters as follows [@Dunlop1997]: $$\frac{1}{\tau}=\frac{1}{\tau_{0}} exp { \left[ - \frac{\mu_{0}V M_{s} H_{K}}{2kT} \left( 1- \frac{|H_{0}|}{H_{K}} \right) ^{2} \right] } \\[3mm]$$ where $\tau_{0}$ ( 10$^{-9}$ s) is the interval between thermal excitations, $\mu_{0}$ is the permeability of free space, V is grain volume, $M_{s}$ is spontaneous magnetization, $H_{K}$ is the microscopic coercive force, k is Boltzmann’s constant, T is temperature, and $H_{0}$ is the applied field. This relationship is derived from Néel’s [@Neel1949; @Neel1955] theory for single domain TRM; it was used by [@Pullaiah1975] to determine time-temperature relationships that can be in turn used to predict the acquisition of secondary magnetizations: $$\frac{T_{A} ln(\tau_{A}/\tau_{0})}{M_{s}(T_{A})H_{K}(T_{A})} = \frac{ T_{B}ln(\tau_{B}/\tau_{0}) } {M_{s}(T_{B})H_{K}(T_{B})}$$ Where the two relaxation times ($\tau_{A}$, $\tau_{B}$) correspond to temperatures ($T_{A}$, $T_{B}$) respectively, and $H_{K}>>H_{0}$. This relationship describes the tendency for the maximum metamorphic temperature to leak to a higher unblocking temperature range. Hence, for our example of a peak metamorphic temperature of 320 $^{o}$C, and a nominal reheating duration of 1 million years, only SD unblocking temperatures up to $\sim$400 $^{o}$C should be affected [@Dunlop1977]. In summary, the challenges for paleointensity are those of finding samples that have not been chemically altered in nature, will not alter in laboratory, carry small single domain or single-domain like grains (called pseudo-single PSD), and are from low metamorphic grade geologic terrains. The magnetic properties of common Archean rock types vary on a grain (mineral) scale, so meeting these challenges is rarely possible using whole rock samples.\ Single Crystal Paleointensity ----------------------------- One approach to address these challenges is a focus on the paleomagnetism of single crystals rather than bulk rocks, using the single silicate crystal paleointensity (SCP) approach. Silicate crystals are not of intrinsic magnetic interest, but they often host minute magnetic particles that are ideal magnetic recorders [@Cottrell1999; @Dunlop2005; @Feinberg2005; @Tarduno2006]. In Mesozoic lavas, feldspars bearing magnetic inclusions have been shown to alter less in the laboratory than the whole rocks from which they were separated [@Cottrell2000]. The SCP approach (using feldspar from lavas) has been used to study geomagnetic field intensity versus reversal frequency for the last 160 million years [an inverse relation is supported; @Tarduno2001; @Tarduno2002; @Tarduno2005], and during the Kiaman Superchron [@Cottrell2008]. The SCP method has also been applied to olivine from pallasite meteorites [@Tarduno2012]. The SCP approach has allowed examination of the field of the Late, Middle and Early (Paleo) Archean as discussed below.\ Prior Proterozoic-Archean Paleomagnetic Field Constraints\ ---------------------------------------------------------- Most prior characterizations of the Archean field have been based on whole rocks and subsequent studies have revealed that metamorphism has most likely compromised their paleointensity record (see discussion in Usui et al., 2009). Below we highlight the salient observations about the field that are robust for the interval equal to and older than the Proterozoic-Archean boundary (Figure 3). As noted earlier, there is a general consensus that the field was predominately dipolar at $\sim$2.5 Ga [@Smirnov2004; @Biggin2008; @Smirnov2011]. The dynamo appears to be reversing, although the sparse rock record allows different interpretations of the reversal rate. @Dunlop2004 and @Coe2006 first argued that reversal rates might have been low. No long sedimentary sequences with paleomagnetic data are available to justify this interpretation for times before the Archean-Proterozoic boundary, and the thermal alteration of sediments of this age questions whether such data will ever be available. The interpretation is instead based on the sampling of lavas that are associated with large igneous provinces exposed in the Superior Craton of Canada and in the Pilbara Craton of Western Australia [the $\sim$2.7-2.8 Ga Fortesque Group; @Strik2003]. However, magmatic activity associated with these large igneous province could have occurred in pulses (as it has during the Phanerozoic). If so, much less time is represented by the rock record (than is expected by assuming continuous magmatism between available radiometric age data) and capturing only a few field reversals would be expected. The oldest reversal documented to date is that associated with 3.1 to 3.2 Ga plutonic rocks exposed in the Barberton Greenstone Belt [@Layer1998; @Tarduno2007]. An older reversal has been reported from study of a purported 3.4 Ga tuff of the Barberton Greenstone Belt [@Biggin2011], however the sampled rocks are sandstone and minor shale highly altered by local fluids (Axel Hofmann, personal communication). Moreover, the limited spatial sampling and interpretation by the authors of contamination of the data by lightning strikes suggests this reversal record could be an artifact. Smirnov et al. [@Smirnov2003] applied the SCP approach to $\sim$2.45 Ga dikes formed close to the Proterozic-Archean boundary from the Karelia Craton of Russia. Although the dikes were insufficient in number to completely average secular variation, the values are consistent with present-day field values. The SCP was also applied to 3.2 Ga plutons of the Barberton Greenstone Belt. These yielded a mean paleointensity that was interpreted as being within 50% of the current field value [@Tarduno2007]. However, this relatively large uncertainty was assigned to the field value to account for the possibility of cooling rate effects (in general, laboratory cooling rates are by necessity much faster than those in nature, motivating application of correction factors). Modeling and experimental data suggest that cooling rate effects are nominal for pseudosingle domain grains [@Winklhofer1997; @Yu2011; @Ferk2012]. Without correction, raw data from 3.2 Ga silicate crystals are essentially indistinguishable from the modern field intensities. A few studies of whole rocks, where the principal magnetic carriers were thought to be fine magnetite exsolved in silicate crystals, are also of note. A recent restudy of the 2.78 Ga [@Feinberg2010] Modipe Gabbro indicated a virtual dipole moment of 6 x 10$^{22}$ A m$^{2}$ [@Muxworthy2013]. This study lacks a field test on the age of magnetization, but the pristine nature of select outcrops suggests the magnetization could be primary. Inexplicably, @Muxworthy2013 do not consider the possibility of post-emplacement tectonic tilt. Given that the in situ magnetic inclination is very steep (I= 70$^{o}$ reported in @Muxworthy2013; I = 85$^{o}$ in @Evans1966, and I = 89$^{o}$ in @Feinberg2010), it is most likely that the true paleolatitude is shallower if the Modipe Gabbro has been tilted. @Denyszyn2013 convincingly argue that the unit has been tilted, but the exact tilting amount is uncertain. Nevertheless, the value of 6 x 10$^{22}$ A m$^{2}$ is probably an underestimate of the true field strength. Study of the Stillwater Complex yielded a high paleointensity of 92 $\mu$T [@Selkin2000]. After correction of anisotropy, this value was reduced to 46 $\mu$T, and after application of a cooling rate correction (based on SD assumptions) the value was further lowered to 32 $\mu$T, resulting in a final VDM of 4 x 10$^{22}$ A m$^{2}$. For the reasons discussed above, the cooling rate correction is an overestimate if PSD grains are present, suggesting that the Stillwater VDM reported by @Selkin2000 also underestimates the true field strength. Thus, conservatively, the hypothesis that field strength was similar (i.e. within 50%) to that of the modern field cannot be rejected for the interval from the Proterozoic boundary to the mid-Archean at 3.2 Ga. Below, we discuss in more detail the constraints for field strength for the Paleoarchean.\ The magnetopause 3.45 billion years ago ======================================= Arguably the least metamorphosed Paleoarchean rocks are found in the Barberton Greenstone belt of the Kaapvaal Craton, South Africa. Here, the peak temperatures can be as low as $<$350 $^{o}$C [e.g. @Tice2004]. Similar rocks are found in the closely associated Nondeweni Greenstone Belt also of the Kaapvaal Craton, located about 300 km south of the Barberton Greenstone Belt [@Hofmann2007]. In both belts, dacites are found which meet the recording challenges discussed above. In a sense, they are goldilocks recorders. They have a relatively low Fe content– enough to record the ancient field, but not so much that massive amounts of secondary magnetic minerals have formed during metamorphism.\ Conglomerate Test ----------------- One of the most powerful tests to determine whether a rock can retain a primary record of the magnetic field is the conglomerate test [@Graham1949] (Figure 4). If the magnetization directions from individual clasts forming a conglomerate are the same, that direction must postdate deposition. If the magnetizations differ, the clasts may retain a record of the geomagnetic field prior to deposition, and possibly dating to the formation of the rock from which a given conglomerate clast was derived. There is an important, and sometimes overlooked, limitation with the application of this test. Because different rocks types will respond differently to a given set of metamorphic conditions, the conglomerate test only applies to the lithology sampled in the conglomerate test. In the Barberton Greenstone Belt, a $\sim$3416 Ma conglomerate is found, dominated by dacite clasts. @Usui2009 found that the dacite clasts contained two distinct components of magnetization. At low to intermediate unblocking temperatures, the magnetization from many clasts defined a common direction that was indistinguishable from the magnetic field during the $\sim$180 million-year-old Karoo magmatic event that affected most of the Kaapvaal Craton. At high unblocking temperatures ($\gtrsim$525-550 $^{o}$C), another distinct component was defined; the direction of this component differed between clasts. A formal test [@Watson1956] showed that these directions indicated that the hypothesis that these directions were drawn from a random population (as expected for a conglomerate recording a primary direction) could not be rejected at the 95% confidence interval. The parent body of the dacite clasts is exposed in the Barberton Greenstone Belt; bulk samples show a similar component structure as that seen in the clasts, but the high unblocking temperature magnetization, while grouped (as would be expected in the parent rock) showed a scatter that was unusually high as compared to that typically seen in younger igneous rocks. It must be remembered, however, that these are from bulk samples, which typically contain a wide range of magnetic grain sizes. The largest multidomain grains, which are seen in thin section [@Usui2009], are expected to carry overprints and are likely contaminating the high unblocking temperature direction. There is also a more exotic explanation. In the absence of a geodynamo, the high scatter could reflect a variable field produced by solar interaction with the Paleoarchean atmosphere. For example, the solar wind interaction with the thick atmosphere of Venus (a planet without an internally generated magnetic field) produces an external magnetic field which is more variable and more than an order of magnitude weaker than the surface field of Earth [@Zhang2007] (see also *section 7*). Hence, paleointensity becomes the key variable in evaluating these data further.\ Field Intensity Values at 3.4 to 3.45 Ga ---------------------------------------- To obtain a field intensity at 3.45 Ga, @Tarduno2010 sampled the source of the Barberton Greenstone Belt dacite clasts; these were supplemented with a study of 3.4 Ga dacites from the Nondweni Greenstone Belt. In both belts, dacites contain quartz phenocrysts, 0.5 to 2 mm in size, which were the target of investigation. The ideal nature of the magnetic inclusions within the phenocrysts was confirmed by rock magnetic tests (e.g. magnetic hysteresis), and Thellier-Coe paleointensity data were collected using CO$_{2}$ laser heating methods [@Tarduno2007] and high resolution SQUID magnetometers (including an instrument with a 6.3 mm access bore specialized for the SCP approach). The thermal demagnetization data show the removal of a low unblocking temperature component with heating to $\sim$450 $^{o}$C (Figure 2). The unblocking characteristics of this component agree with single domain theory suggesting that given a peak metamorphic temperature of 350 $^{o}$C, an overprint should leak to slightly higher temperatures. At higher unblocking temperatures a component is isolated that trends to the origin of orthogonal vector plots of the magnetization. Over this higher unblocking temperature range, paleointensities of 28.0 $\pm$ 4.3 $\mu$T and 18.2 $\pm$ 1.8 $\mu$T were obtained for the Barberton and Nondweni dacites, respectively. Because paleointensity is derived from a comparison of field-on versus field-off steps, it does not require one to have oriented specimens. However, oriented samples are required to obtain paleolatitude which is in turn needed to express a given field value at a site as a dipole moment (i.e., the value needed for eqn 1). To obtain paleointensity, an oriented thin section approach was developed [@Tarduno2007]. Application of this technique suggests virtual dipole moments of 3.2 $\pm$ 0.31 x 10$^{22}$ A m$^{2}$ for the Nondweni dacite, and 4.3 $\pm$ 1.0 x 10$^{22}$ A m$^{2}$ for the Barberton dacite. Thus a strong magnetic field was present during a time for which we have the oldest microfossil evidence for life [@Wacey2011]. However, what were the details of the shielding environment? Having these magnetic dipole moment constraints, the next step in our quest to calculate magnetopause standoff is to constrain solar wind pressure in the past, as discussed below.\ Solar Wind and magnetopause at 3.45 Ga -------------------------------------- The property of the Archean to Hadean Sun that has most captured the attention of the geological community is its luminosity. The Sun began the main sequence phase of its life at a luminosity $\sim$30% lower than the present-day value [@Gough1981]. Because of the conversion of H to He in the Sun’s core, density and temperature increase, and luminosity rises. The “Faint Young Sun Paradox" highlights the expectation that this reduced luminosity should have resulted in a snowball Earth [@Sagan1972], but sediments (including those from the Barberton Greenstone belt) indicate deposition in shallow warm seas. The solution to the paradox is generally posed in the form of some greenhouse gas concentration in the early Earth’s atmosphere. We will briefly revisit this question later, but for now the variable of greatest interest for addressing equation 1 is not luminosity, but other aspects of the Sun’s radiation, specifically the solar wind (mainly hydrogen ionized to protons), X-rays and energetic UV radiation. Unlike Earth, where field strength relates to buoyancy forces, the Sun’s magnetic field is a strong function of its rotation rate. The solar dynamo has its origin in the interactions between rotation and its convective outer envelope [@Parker1970]. The Sun sheds angular momentum through the emission of magnetized winds and rotation slows in a process known as magnetic braking. The rotation period slows as $\sim t^{\frac{1}{2}}$ [e.g. @Ayres1997]. Stellar winds are not detected directly, but through interaction with the interstellar medium. The interaction forms an “astrosphere" surrounding a star created by hot, interstellar neutral hydrogen which is heated by the stellar wind. Hot hydrogen atoms can be detected spectroscopically because they produce an absorption feature (H [I]{} Lyman-$\alpha$) thereby proving an indirect means of gauging stellar winds. Observational constraints on ancient mass are available from the study of solar analogs - stars that are approximately the same size as the Sun but have different ages [e.g. @Wood2006]. These data suggest that current and past mass loss are related by a power law: $$\frac{\dot{m}v_{sw}}{\dot{m}_0v_{sw0}}=\left(\frac{t}{t_0}\right)^{-2.33}$$ where $\dot{m}_{0}$, $v_{sw0}$ are the present-day mass loss and solar wind velocity, respectively. As we will discuss later, this relationship should be applied to the Sun only for solar ages greater than $\sim$ 700 million years. This relationship suggests that the solar mass loss at 3.45 Ga was 2.4 x $10^{-13}$ M$_{\odot}$/yr, where M$_{\odot}$ is the current solar mass. This mass loss can be used to further derive a model (Model A of @Tarduno2010) of wind velocity and density change with time, which comprise solar wind pressure. Hence, we can now solve equation 1 to obtain estimates of magnetopause standoff distance as a function of dipole moment (Figure 5). The paleointensity estimates for 3.4-3.45 Ga from the Kaapvaal Craton dacites suggest standoff distances of $\sim$5 Earth radii (R$_{e}$). One can also estimate mass loss from rotation and X-ray emission. Stellar evolution models predict the Sun at 3.45 Ga would appear to be a G6V star having a rotational period of $\sim$12 days [@Mamajek2008]. Mass loss rates among solar-type stars can be related to X-ray emission ($f_{X}$) and rotation as: $$\dot{m}=\dot{M_{\odot}}\left(\frac{\rm{R}}{\rm{R}_{\odot}}\right)^{2}\left(\frac{f_{X}}{f_{X_{\odot}}}\right)^{1.34\pm0.18}$$\ where R is solar radius, R$_{\odot}$ is the modern value, and $f_{X_{\odot}}$ is the modern soft X-ray surface flux ($\sim$10$^{4.57}$ erg/s/cm$^{2}$ in 0.1-2.4 keV band). Equation 7 (Model B of @Tarduno2010) predicts a higher mass loss of 1.5 x 10$^{-12}$ M$_{\odot}$/yr and a smaller standoff of $\sim$4 R$_{e}$. Even using the more conservative mass loss of Model A, the standoff distance is expected to be only $\sim$50% of the present-day value. @Sterenborg2011 presented results of a magnetohydrodynamic numerical simulation of Palearchean conditions utilizing modules of the Space Weather Modeling Framework [@Toth2005]. They obtained standoff distances within error of our estimates, confirming the compressed Paleoarchean magnetosphere. The change in standoff relative to today may in itself seem abstract, but we have a very good idea of what these conditions are like because they are typically experienced during modern events, such as the Halloween solar storms of 2003 [e.g. @Rosenqvist2005]. However, during modern events like the Halloween storms, reduced standoffs occur on hour to day timescales. In contrast, these conditions would represent the typical day 3.4 to 3.45 billion years ago. Thus, while the magnetic field was present and provided some shielding from the solar wind, the reduced standoff conditions suggest there nevertheless could have been important modifications of the early Earth’s atmosphere. Below, we first provide context through a discussion of atmospheric loss processes.\ Atmospheric Loss Mechanisms =========================== Atmospheric loss mechanisms are typically divided into thermal escape stimulated by energetic photons and non-thermal escape associated with the interaction of charged particles. Following this classification, the presence/absence of a magnetic field will influence only non-thermal escape. However, thermal processes are also of importance because they determine whether non-thermal loss is likely to be efficient. Moreover, as we introduce below, under reduced standoff conditions of the early Earth, heating due to solar wind interaction with the atmosphere is possible, and in this case the clear-cut assignment of magnetic field influence to only non-thermal processes becomes less meaningful. Much can be learned about these atmospheric loss processes by study of planets without internally generated magnetic fields, namely Venus and Mars, and the general interest has expanded in parallel with exoplanet discoveries and considerations of habitability [e.g. @Seager2010]. The limitations of these analogies should also be kept in mind; atmospheric loss processes presently working at Venus and Mars are the result of different evolutionary histories of their respective atmospheres. While analogies should not be taken too far, the physical processes are illustrative, notwithstanding the difficult task of deciding the importance of these processes for the young Earth.\ Thermal escape -------------- The uppermost tenuous part of the atmosphere is known as the exosphere; it begins at the exobase, where the mean free path of molecules is so large that collisions before escape are unlikely. In Jeans’ escape [@Jeans1925], a molecule gains sufficient energy to reach its escape velocity. The the important Jeans’ escape parameter $\lambda_{J}$ is defined as the ratio of gravitational potential energy to thermal energy: $$\lambda_{J} \equiv GMm/kT_{J}r_{J}$$ where, G is the gravitational constant, $M$ is the planetary mass, $m$ is the mass of the escaping particles, $k$ is Boltzmann’s constant, $T_{J}$ is the temperature of the exobase and $r_{J}$ is the radius of the exobase. A central parameter in understanding thermal escape in the past is thus knowing the temperature at the exobase, and this in turn will depend of the solar flux, particularly that in the X-ray to energetic ultraviolet (EUV) spectral range (wavelength $\leq$ 1027 Å) commonly referred to as XUV radiation. Astrophysical observations of solar analogs [e.g. @Dorren1994] have provided data to constrain the past solar flux. @Ribas2005 define a flux as a function of stellar age ($t$) as: $$F = 29.7\, t^{-1.23}\, {\rm ergs} \, {\rm s}^{-1} \, {\rm cm}^{-2}$$ where the flux is defined at wavelengths between 1 and 1200 Å. At 2.5, 3.45 and 4 Ga, this flux would be about 3, 6, and 13 times greater (respectively) than today. This increased flux has the potential to heat the exosphere (but this also depends on atmospheric composition, see [*section 4.3*]{}) and increase ionization, promoting non-thermal escape. If heating is sufficiently great ($\lambda_{J} <$1.5; @Opik1963) atmospheric “blowoff" can occur. The more general term is hydrodynamic escape, where the thermal energy from XUV radiation allows an escaping species to efficiently stream away from the planet. It is particularly important for hydrogen-rich exospheres, potentially including that of the early Earth [@Watson1981]. Hydrodynamic escape will be important only if the supply of the escaping species is not limited by diffusion lower in the atmosphere. The diffusion limited flux ($F_{d}$) can be expressed as [@Hunten1973]: $$F_{d}= b\left(\frac{GM}{kT}\right)(m_{j}-m_{i})\left(\frac{n_{i}}{n_{j}}\right)$$ where ($F_{d}$) is diffusive flux per steradian per second, $b$ is a binary collision parameter, $\cal{O} $ (10$^{9}$) cm$^{-1}$ s$^{-1}$, $n$ is number density and the subscripts $i$ and $j$ refer to the escaping and background gas masses, respectively, and $n_{i}/n_{j}$ is the mixing ratio of the escaping gas. If we consider hydrogen as the gas escaping from a background of nitrogen, mixing ratios greater than a few percent suggest that escape will be energy related rather than diffusion limited [@Watson1981], and this seems likely given the potential sources of hydrogen ranging from dissociation of water vapor to volcanic outgassing. In fact, faced with the likelihood of hydrogen accumulation in a pre-Proterozoic anoxic Earth, the relevant question is one of determining how large amounts of hydrogen were removed such that the atmosphere did not become highly reducing (evidence for which is lacking in Paleoarchean and younger rocks). Two final aspects of hydrogen hydrodynamic escape are of note. Hydrodynamic escape of hydrogen has the potential to carry out with it heavier atmospheric species. @Watson1981 used equation 10 to predict that heavier species would probably not be removed, with the exception of deuterium. This prediction is consistent with the lack of nitrogen isotope ($^{15}$N/$^{14}$N) fractionation observed on Earth [@Marty2013], versus, for example, that observed on Mars [e.g. @Jakosky1994] where thermal and nonthermal processes have greatly thinned the atmosphere. However, the removal of hydrogen could leave behind excess atmospheric oxygen that would have to be either incorporated into the crust/mantle or lost. @Kulikov2007 suggest oxygen could be lost by hydrodynamic escape given early Earth EUV conditions. Another type of thermal loss is that associated with large impacts. The Paleoarchean of the Barberton Greenstone Belt (and the Pilbara Craton) contains evidence for large impacts in the form of spherule beds [e.g. @Lowe2003]; these may represent the tail of a distribution of large impacts known as late heavy bombardment [@Bottke2012]. During a large impact the atmosphere can be super-heated by shock to the point that some is blown-off. We include this process in the discussion because during this heating, non-thermal removal processes will also be enhanced. This effect would be most relevant for times after the main thermal pulse resulting from shock.\ Non-thermal escape ------------------ Non-thermal processes encompass a wide range of escape mechanisms that are summarized in Table 1. Some of these involve photons (e.g. photodissociation) whereas others involve interaction with the charged solar wind and collectively can be grouped into “solar wind erosion". Light atmospheric species that have been ionized by EUV photons, charge exchange with solar wind protons, or by impacts with solar wind electrons [@Luhmann1992] can be removed by the solar wind. Ionized light and heavier ions that are not removed can reenter the atmosphere. When they impact neutral species near the exobase, some of the target species will be scattered, gaining enough energy to escape, in a process known as “sputtering". Heavier species (e.g. N$_{2}^{+}$, O$_{2}^{+}$) will be more effective as sputtering agents [@Johnson1990]. Of particular importance for our consideration are those non-thermal processes that involve ion escape, because these generally require open field lines. Ions can also travel along the magnetic field and escape through the magnetotail [e.g. @Kistler2010]. Open flux also occurs near the magnetic poles, where magnetic field lines connect with the interplanetary field. The polar cap is defined as the region where open flux occurs and the auroral zone is at its boundary. The solar wind can penetrate deeper into the atmosphere in the polar cap causing ionization, and this is also the region where energetic ions can escape; for example depletion of H and He above the Earth’s magnetic poles today is due to escape along open field lines [@dePater2001]. Ions can also be lost when reconnection takes place, when the magnetic field of the solar wind joins the terrestrial magnetic field [e.g. @Lyon2000]. This is classically expressed as a southward interplanetary (solar wind) magnetic field connecting with a northward magnetosphere field, and this type of reconnection is pronounced during coronal mass ejection events when magnetized solar plasma impacts the magnetosphere. However, reconnection is a complex phenomena that has been observed under a variety of solar forcing conditions.\ Process Example Reactions Current Planetary Example Early Earth$^{\ddag}$ ---------------------------------- ----------------------------------------------------- --------------------------- ----------------------- Charge exchange$^{1}$ H + H$^{+*}$ $\rightarrow$ H$^{+}$ + H$^{*}$ Earth (H, D) $\uparrow$ O + H$^{+*}$ $\rightarrow$ O$^{+}$ + H$^{*}$ Venus (He) Dissociative recombination$^{2}$ O$_{2}^{+}$ + $e$ $\rightarrow$ O$^{*}$ + O$^{*}$ Mars (O) $\uparrow$ OH$^{+}$ + $e$ $\rightarrow$ O + H$^{*}$ Venus (H), Mars (N) Impact dissociation$^{3}$ H$_{2}$ + $e^{*}$ $\rightarrow$ H$^{*}$ + H$^{*}$ Mars (N) $\uparrow$ N$_{2}$ + $e^{*}$ $\rightarrow$ N$^{*}$ + N$^{*}$ Photodissociation O$_{2}$ + $h \nu$ $\rightarrow$ O$^{*}$ + O$^{*}$ $\uparrow$ H$_{2}$O + $h \nu$ $\rightarrow$ OH$^{+}$ + H$^{*}$ OH$^{+}$ + $h \nu$ $\rightarrow$ O + H$^{*}$ Ion-neutral reaction$^{4}$ O$^{+}$H$_{2}$ $\rightarrow$ OH$^{+}$ + H$^{*}$ $\uparrow$ Sputtering or $\uparrow$ knock-on$^{5}$ O$^{*}$ + H $\rightarrow$ O$^{*}$ + H$^{*}$ Venus (H) Solar-wind pickup$^{6}$ O + $h \nu$ $\rightarrow$ O$^{+}$ + $e$ Mercury (He, Ar) $\uparrow^{\S}$ then O$^{+}$ picked up Ion escape$^{7}$ H$^{+*}$ escapes Earth (H,D, He) $\uparrow^{\S}$ Electric fields$^{8}$ X$^{+}$ + eV $\rightarrow$ X$^{+*}$ Earth (H,He) $\uparrow^{\S}$ Jeans escape $\uparrow$ Hydrodynamic escape $\uparrow$ Impact erosion $\uparrow$ : Atmospheric Escape Processes$^{\dag}$ $^{\dag}$Adapted from [@Hunten1989; @dePater2001; @Seager2010]. $^{*}$ Excess kinetic energy. $^{\ddag}$Referenced at $\sim$3.45 Ga for comparison with magnetopause reported in [@Tarduno2010]. $\uparrow$, $\downarrow$, escape greater or lower at $\sim$3.45 Ga than today. $\uparrow^{\S}$, overall escape greater due to increased ion supply. $^{1}$Charge exchange occurs when an energetic ion collides with a neutral, and the ion loses its charge but retains its kinetic energy. The former ion can have enough kinetic energy to escape. $^{2}$Dissociative recombination occurs when an ion dissociates on recombination with an electron, resulting in atoms with sufficient energy to escape. $^{3}$Impact dissociation occurs when a neutral molecule is impacted by an electron; the resulting energetic atoms can escape. Similar end products result from photodissociation, but rather than an electron impacted by a molecule, energy is transferred by photons. $^{4}$Ion and neutral molecule can also react to form an ion and an energetic atom. $^{5}$When an energetic atom of ion collides with an atmospheric atom, the atom is accelerated and may escape. A single collision is referred to as knock-on. Sputtering describes a cascade of collisions following the initial collision, with these later collisions supplying enough energy for atmospheric escape. $^{6}$In the absence of an internally generated dynamo field (or in the case of a very weak field), charged atmosphere particles can interact directly with the solar wind and be carried away, in a process know as solar wind pickup or sweeping. If a planet lacks an internal field, but has an atmospheric and an induced magnetic field, particles are picked up at the subsolar point and lost at the tails of the induced magnetosphere. $^{7}$When magnetic field lines are open, energetic atmospheric ions can escape (ion escape). $^{8}$Charged particles can be accelerated by electric fields of magnetospheric or ionospheric origin aiding escape as they are lost as they move out along magnetic field lines (associated with either an internal-generated or induced magnetosphere). Atmospheric chemistry --------------------- The type of loss that is most important at a given time is a function of atmospheric chemistry. Below we focus on a few aspects of this complex topic relevant to our considerations of atmospheric lost on the early Earth and refer readers to the excellent synthesis on planetary atmospheres by @Pierrehumbert2010. The extent of the atmospheric and loss from its top will be dependent on temperature, which in turn depends on whether the dominant species are good or poor infrared emitters [@Lammer2013]. For example, CO$_{2}$ is a good infrared emitter; it dominates the exobase of Venus and as a result the temperature is relatively cool (200-300 K). In contrast, atomic O is a poor infrared emitter; it dominates the present exobase of Earth and the temperature is relatively high (1000 K). Nitrogen is neither a good or poor IR-emiter, so the exobase temperature will tend to be controlled by other species [@Pierrehumbert2010]. The question of what these species might be in the early Earth’s atmosphere is itself hotly debated, principally focused on the greenhouse gasses needed to counter the Faint Young Sun, and the amount to which the atmosphere was reducing [@Shaw2008]. Proposed solutions to the Faint Young Sun have included increased atmospheric NH$_{3}$ [@Sagan1972], CO$_{2}$ [@Owen1979], CH$_{4}$ [@Kiehl1987; @Catling2001], or a decreased albedo [@Rosing2010]. Various counterarguments have been made (see comprehensive review by @Feulner2012). NH$_{3}$ should be quickly dissociated by photolysis; its survival was postulated because of the sheltering effect of a hypothetical thick Titan-like organic haze layer resulting from photolysis of CH$_{4}$. However, a haze layer can have a cooling effect [@Haqq2008]. Nevertheless, an optically thin haze layer might be an effective UV screen [@Hasenkopf2011]. The ratio of the mixed valence state magnetite to siderite in banded iron formations (BIFs) appears to limit atmospheric CO$_{2}$ levels [@Rosing2010], but this strictly applied to the oldest well-preserved Archean BIFs. Moreover, this apparent limitation relies on the assumption that iron mineral formation in BIFS was not far from equilibrium with the atmosphere; this assumption has been questioned [@Dauphas2011; @Reinhard2011]. @Goldblatt2011 argue that albedo effects alone cannot resolve the Faint Young Sun Paradox to a factor of 2 of the needed radiative forcing (but see also responses by @Rosing2011). Nitrogen itself, while not a greenhouse has, could have aided early greenhouse warming because its presence broadens the absorption lines of other greenhouse gases [@Goldblatt2009]. Notwithstanding the continuing debates, a N$_{2}$-CO$_{2}$ atmosphere, with important time dependent roles for hydrogen and methane, seems to be the closest consensus available for the early Earth. The importance of methane likely tracks the start of its production biologically [@Catling2001], whereas the very early presence of hydrogen in the atmosphere seems unavoidable given early high degassing rates, although concentrations are also debated [@Tian2005; @Catling2006]. Several authors have noted that the removal of hydrogen by thermal and non-thermal processes could have contributed to the change from a reducing to oxidizing atmosphere [@Catling2001; @Lammer2008]. We recall that loss of this hydrogen can adiabatically cool the exosphere, limiting loss of heavier species [@Tian2008; @Lammer2013] whereas H$_{2}$-N$_{2}$ collision-induced warming is another mechanism that could counter effects of the Faint Young Sun [@Wordsworth2013]. There is another potential bottleneck of importance that could control the loss of water from the early Earth. Today, water loss is limited by the cold trap at the top of the troposphere where temperatures are low and water condenses, leaving the stratosphere relatively dry. Given a current loss of H from the atmosphere of 2.7 $\times$ 10$^{8}$ atoms cm$^{-2}$ s$^{-1}$, only about 5.7 m of a global ocean on Earth would be removed in 4.5 billion years [@Hunten1989]. Since water vapor is controlled by surface temperature, and Archean surface temperatures are argued to have been temperate [e.g. @Blake2010], there is no reason to suspect the lower atmosphere was unusually dry. However, with the lack of a ozone layer in the anoxic Earth, greater penetration of UV into the atmosphere (causing photolysis of water), and greater UV output of the young Sun, a cold trap may not be as limiting, as has been suggested for the primitive Venusian atmosphere [@Chassefiere1997]. The example of Venus merits further discussion (below) because it and Mars are sometimes offered as examples relevant to the importance of magnetic fields on atmospheric loss.\ The apparent Martian and Venusian counter-examples: issues in assessing the effectiveness of a magnetosphere ------------------------------------------------------------------------------------------------------------ While it has been traditionally postulated that the difference between the current water inventory of Earth versus that of Mars is due to the presence of an internally generated magnetic field on the former [e.g. @Lundin2007], there have been several more recent dissenting voices, to the point that some have even questioned the more general importance of a magnetic field [@Kasting2010] and particularly whether a stronger field could even lead to increased dispersal of the atmosphere [@Brain2013]. These issues raised, follow one or more of the following threads of reasoning:\ [*(i)*]{} Venus lacks an internal magnetic field but retains a thick atmosphere, providing a circumstance in which seemingly magnetic shielding is unimportant for atmospheric protection, and by extension, habitability.\ [*(ii)*]{} The present-day total atmospheric ion escape from Earth, Mars and Venus appears to be within similar orders of magnitude, at 10$^{24}$-10$^{26}$ ions s$^{-1}$ [@Barabash2010; @Strangeway2010]. Because Earth has an internal geomagnetic field and Mars and Venus do not, an internal magnetic field must not be that important for water loss/survival.\ [*(iii)*]{} Planets with atmospheres have an induced magnetosphere that provides atmospheric protection, therefore a specifically internally generated magnetic field is unnecessary for atmospheric survival.\ [*(iv)*]{} Ions can be accelerated and channeled along large scale field lines, particular toward the poles in a large scale dipole configuration. In this respect, by converging the incoming particle flux, the magnetic fields help to exacerbate the atmospheric loss rather than abate it [@Brain2013]\ Sorting out the evidence in support of and in opposition to these arguments comprises an important opportunity for further research. Here we discuss each of them a bit further to highlight what we think are particularly important aspects to consider further when critically assessing the role of magnetic fields in atmospheric retention and habitability. With regard to argument [*i*]{}, the distinction between merely retaining an atmosphere vs. the retention of specifically water must be kept in mind. While the exact evolutionary history and escape mechanisms are debatable , there is little argument that Venus has lost a huge amount of water relative to Earth. This has irreversibly altered its atmosphere, to the point that the probative value of present-day conditions on Venus for understanding the solar-terrestrial environment of the early Earth is limited. The present Venusian CO$_{2}$ atmosphere has a relatively cool exobase limiting loss. It is true that a magnetic field is not necessary to retain an atmosphere but, the relevant issue for life as we know it is water. The evolution of a a planetary atmosphere is also dependent on the evolution of its host star’s wind. Indeed the fact that the young sun had a stronger solar wind is important in considering argument [ *ii*]{}, which ignores the greatly enhanced solar winds and XUV radiation environment associated with the rapidly rotating young Sun ([*sections 3.3, 4.1*]{} of this paper). In fact the extreme XUV atmospheric heating may have been so great that the exosphere moved beyond the magnetosphere associated with a geodynamo [e.g. @Lammer2008; @Tian2008]. In this case, by definition part of the atmosphere will be unprotected by the magnetic field against loss. But atmospheric escape rates could be even higher in such a case in the absence of an internally generated magnetic field, because solar wind protons would more readily gain access to denser parts of the atmosphere. In fact, the question becomes one of atmospheric survival, even given a N$_{2}$-CO$_{2}$ atmosphere [@Lichtenegger2010]. Recent measurements of present-day conditions do show that Mars and Venus have in fact exhibited accelerated atmospheric loss during times of increased solar wind pressure [@Edberg2010; @Edberg2011], whereas a direct comparison of Earth and Mars under the same solar forcing found that Martian atmospheric loss was more sensitive to increases in solar wind pressure [@Wei2012], strongly suggesting the importance of the geomagnetic shield. In assessing the importance of a magnetic field for habitable atmosphere retention, we must in fact distinguish between a field induced externally by wind-atmosphere interactions (argument [ *iii*]{}) vs. supplied internally via a dynamo. The structure and location of influence of the field would be different in the two cases and this must be considered in comparing the relative influence. As summarized by [@Brain2013], the field arising from interactions between the wind (perhaps as in present day Venus, Mars and Titan) likely deflects the wind only within $\lesssim 1$ planetary radii where as in internally produced field could be stronger, and abate the incoming wind are larger radii from the planet. Because an internally produced magnetic field would be anchored in the core, field lines converge toward magnetic poles. The role of a large magnetosphere combined with convergence of the field toward the poles may indeed be important in assessing the sign of the influence of the magnetic field on atmosphere retention, as suggested in argument [ *iv*]{}. Because incoming wind ions typically have very small gyro-radii, they are captured onto field lines that are concentrated toward the poles as discussed in [@Brain2013]. Supporting evidence comes in part from observations [@Strangeway2005] of the outward flux of O ions from auroral zones is consistent with concentration of a higher solar wind energy flux by up to 2 orders of magnitude in these regions, and is consistent with theoretical considerations of [@Moore2010]. Such an effect would preferentially eject heavy ions that would otherwise be gravitational bound at atmospheric temperatures, unlike light ions like H$^{+}$ whose thermal speed would already exceed the escape speed. Despite the plausibility of this magnetically aided concentration of wind flux toward the polar caps, we note that there are a number of effects to keep in mind in assessing its influence on habitable atmosphere retention. First, note that the same magnetic fields that might help concentrate the effect of the solar wind in local regions, also create a magnetospheric environment that can result in recapture. For example, the polar outflow of O$^{+}$ from Earth today is some 9 times greater than the net loss, considering recapture in the magnetosphere [@Seki2001]. A local outflow might therefore not always result in a catastrophic global outflow. Second, the depth to which ions can be accelerated by conversion of solar wind flux into the accelerating Poynting flux also depends on competing forces such as a magnetic mirror force [@Cowley1990]. Converging field lines can reflect particles, and trap them above a certain height. The specific depth to which ions can penetrate may be limited for specific planetary circumstances and should be quantified. Third, the time scale for the full atmosphere of ions essential for habitability to be ejected at local auroral regions depends on how fast the atmosphere can circulate into these regions of loss. This resupply requires horizontal flow to refill the cone of loss and thermal diffusion to move material up to the exobase. The slower of these mechanisms determines the rate of loss. While our main purpose here is to provide an introduction to the issues warranting further quantitative study, we now present one calculation on which all influences of the magnetic fields depend, namely the estimate of the impinging rate of solar ions for magnetically shielded versus unshielded planets (Figure 6). Regardless of how the flux that penetrates the atmosphere subsequently evolves, this calculation is the basic starting point for assessing how much stellar wind flux first enters the magnetosphere. The rate of solar ions with the potential to enter a planet’s atmosphere depends on the product of the effective area onto which ions are captured and the speed of the inflow. A large magnetosphere increases the effective area but reduces the inflow speed at which the ions flow into the atmosphere by abating the ram pressure of the solar wind and compressing the solar wind field at the magnetopause with the solar wind ions kept out. However, when the solar wind and magnetosphere fields reconnect, the solar wind ions can bleed onto the planetary magnetic field and into the atmosphere. The speed at which ions flow into the atmosphere is $\sim$1/2 the inflow reconnection speed when averaged over time if we assume that the solar wind field orientation is favorable to reconnection $\sim 50\% $ of the time. During the reconnection phase, the effective collecting area of stellar wind ions can become as large as the magnetopause radius (on the daylit side). Keeping these concepts in mind, the mass flux collected by a planet without a magnetosphere ${\dot M}_{c}$ from a stellar wind whose velocity exceeds the escape speed of the planet at its surface can be written as: $${\dot M}_{c}\simeq \frac{\pi r_p^2}{4\pi R^2}{{\dot M}_w} \label{4.4.1}$$ where $r_p$ is the planet radius and $R$ is the planet star distance assuming $R>>r_p$, and ${\dot M}_w$ is the wind mass flux. In the presence of a magnetosphere, the mass flux collected ${\dot M}_{c,m}$ is given by: $${\dot M}_{c,m}\simeq \frac{\pi r_{m}^2 }{4\pi R^2 } \frac{v_{rec}/2} {v_{sw}}{{\dot M}_w} \label{4.4.2}$$ where $v_{rec}$ is the reconnection speed at the magnetopause, $v_{sw}$ is the wind speed, and $r_s$ is the magnetospheric distance from the planet. We have assumed $R>>r_s$. Taking the ratio of equation 12 to equation 11 gives: $$Q\equiv {{\dot M}_{c,m}\over {\dot M}_{c}}\simeq 1.75 \left (v_{rec} \over 25 {\rm km/s}\right ) \left (v_{sw} \over 400 {\rm km/s}\right )^{-1} \left ({r_s/r_p \over 10}\right)^2 \label{4.4.1}$$ where we have scaled to the present solar wind- Earth values for reconnection at the magnetopause [@Cassak2007; @Paschmann2013; @Walsh2013]. Note that the estimate of the reconnection speed at the magnetopause is consistent both with the value estimated theoretically based on asymmetric reconnection [@Cassak2007] and observations [@Walsh2013]. While a crude estimate, if we scale equation 13 to Earth at 3.45 Ga when $r_s/r_p \sim 5$ this factor would reduce to $0.43$ if $v_{rec}/v_{sw}$ remained the same. Being less than or of order unity, it highlights that despite having a collection area 100 times larger, a robust magnetosphere collects less total ions at $r_s$ than directly impact the atmosphere of an unshielded planet. The fact that $Q$ is not $<<1$ implies that the fate of ions trapped at $r_{s}$ may be an important factor in evaluating the effectiveness of magnetospheric shielding of atmospheres. We should consider the concentration of all of this collected flux onto polar caps covering a surface area fraction $A_{cap}$ at the radius from the planet’s center where the ions are stopped, then the total wind flux impinging onto these caps would be $q_{cap}= Q/A_{cap}$. Today $q_{cap}$ is $>> 1$ because $A_{cap} <<1$. However, we recall that observations indicate that atmospheric loss from Earth is not much greater than that at Mars (which presently lacks an internal dynamo), contrary to that predicted if the focusing effect was the dominant factor in atmospheric loss. One reason for limited loss may be the return of ions in the magnetotail. It may be that increases in focusing at the polar cap associated with a magnetosphere are balanced by increased trapping in the magnetotail. For the early Earth, we cannot assume $A_{cap} <<1$, as discussed below. Implications for atmospheric escape at 3.4 to 3.45 Ga ----------------------------------------------------- While the Paleoarchean geodynamo produced a magnetic field that probably prevented whole-scale removal, magnetic field and solar wind strengths suggest important modifications of the atmosphere by thermal and non-thermal escape processes during the first billion years of Earth evolution. The $\sim$6 times greater XUV flux relative to today would have heated the exosphere to many thousand Kelvin, promoting thermal loss of hydrogen. The reduced standoff conditions would allow greater interaction of solar wind protons with the extended atmosphere, exacerbating thermal loss with non-thermal loss mechanisms (Table 1). The reduced standoff conditions would also be associated with expansion of the polar cap, that area where open field lines allow access of the solar wind to the deeper atmosphere. @Tarduno2010 used the latitude of the aurora to estimate the polar cap area derived from a scaling law of Siscoe and Chen [@Siscoe1975]: $${\rm cos} \lambda_{p} = \left( \frac{M_{E}}{M_{E_{0}}} \right)^{-1/6} P^{1/12} {\rm cos} \lambda_{p_{0}}$$ where $\lambda_{p}$ is the magnetic latitude of the polar cap edge, $\lambda_{p_{0}}$ is the present-day value of 71.9$^{o}$, $M_{E_{0}}$ is the present-day dipole moment, and $P$ is the solar wind dynamic pressure normalized to the present-day value ($\sim$2 nPa). Using this scaling law, @Tarduno2010 suggested the polar cap could have increased by as much as a factor of 3 under Paleoarchean solar wind forcing. In numerical simulations using the Space Weathering Modeling Framework modules, @Sterenborg2011 also found polar cap enlargements, but by smaller amounts (15-50%). We note that the larger values reflected the most compressed magnetospheres (model B) which represent much smaller standoff distances than those obtained in the simulations. Equation 14 assumes a circular polar cap; day-night side asymmetry should modify this shape, and the simulation results show highly elliptical polar caps. The circular approximation may be a better estimate of the open flux area sampled during one day. The combined effect of enhanced XUV, reduced magnetic magnetopause standoff, and increased polar cap area would have promoted the loss of hydrogen and ultimately water from the Paleoarchean Earth. The limiting factor for the water loss was probably not conditions of the upper atmosphere, but the efficiency of water transport through a lower atmosphere cold trap. However, high rates of photolysis related to the large Paleoarchean UV flux may have enhanced the net removal of water. Given that these reduced standoffs existed for hundreds of millions of years (Figure 5), water transport and removal is probable. This in turns implies that the Earth may have had a greater water inventory at and prior to the Paleoarchean, allowing for preservation of the modern oceans. The gradual removal of hydrogen under reduced standoff conditions may also have been important for the transformation of Earth’s atmosphere from one that was mildly reducing to one that was oxidizing. The near certainty of extreme heating causing expansion of the atmosphere prior to 3.45 Ga, and creating even greater opportunities for atmospheric escape provides motivation for determining solar-terrestrial interaction for even older times. We start with a discussion of recent model predictions of dynamo onset, follow this with estimates of solar winds for the first 700 million years of Earth, and conclude with a discussion of the potential for magnetic field strength observations for times $\gg$3.45 Ga.\ Delayed Dynamo Onset ==================== While the paleointensity values at 3.4 to 3.45 Ga are slightly less than present-day, they are not remarkably different from variations that may have occurred over the last 200 million years [e.g. @Aubert2010]. This raises the question of whether Earth has always had a relatively strong magnetic field, dating from a time shortly after core formation. Several diverse lines of reasoning have been used to argue otherwise, with dynamo onset extending to times just older than the 3.45 Ga constraint discussed above. [ *Notwithstanding caveats discussed in section 4.4,*]{} a delayed dynamo onset and its associated long period without magnetic shielding imply a correspondingly long episode of extreme water loss. In this case, a huge initial water reservoir and/or large supply as a late veneer [e.g. @Albarede2009] may be needed to account for the present-day conditions. One of the most fascinating of these delayed dynamo hypotheses is derived not from the study of terrestrial samples, but from investigations of the Moon. Lunar ilmenite samples obtained during Apollo missions have unusual nitrogen isotopic values; these have been interpreted by @Ozima2005 as reflecting nitrogen picked up from Earth’s atmosphere by the ancient solar wind and transported to the lunar surface. An example of this process is the pickup of elements from Venus observed today. The relevant point for our consideration is that @Ozima2005 recognized the importance of magnetic shielding and hypothesized that the Earth’s magnetic field was null (or of very low intensity) during the time of nitrogen transfer, which was constrained by the age of the Apollo samples (3.9 to 3.8 Ga). On the basis of terrestrial constraints, this remains a viable hypothesis. However, emerging paleointensity from lunar samples suggest an important component of magnetic shielding may have been present on the Moon. The Moon appears to have a core that is at least partially molten [@Weber2011] and magnetized crust [@Carley2012], and it has long been suspected that an ancient dynamo was once present on the basis of analyses of Apollo samples [@Fuller1987]. The fidelity of paleointensity data from lunar samples is subject to similar domain state constraints as discussed earlier for terrestrial samples, but these limitations pale in comparison to greater obstacles: the inherent thermal instability of FeNi phases that makeup typical lunar magnetic grain populations and the effects of impact-induced shock [@Fuller1974]. Because of the thermal instability, alternating field demagnetization and normalization with applied fields has generally been used to estimate paleointensity rather than Thellier analyses. Although there have been calibration efforts [e.g. @Gattacceca2004], the accuracy of these data is difficult to assess [see @Lawrence2008] because the laboratory method employed does not replicate the magnetization process. Notwithstanding these uncertainties, recent studies have provided evidence for lunar magnetizations $\sim$4.2 billion-years-old [@Garrick-Bethell2009], and possibly extending as young as 3.65 Ga [@Shea2012; @Suavet2013]. Beyond the issue of measurement fidelity, the latter interpretations are somewhat controversial because they call for a lunar field at times younger than the time interval predicted for a viable dynamo powered by thermochemical convection [@Stegman2003]. However, other dynamo mechanisms, such as impact-stirring [@LeBars2011] and precession [@Tilgner2005; @Dwyer2011] might have powered a late lunar dynamo. If confirmed, however, these late lunar magnetic fields pose a serious challenge to the @Ozima2005 hypothesis. (To test his hypothesis, @Ozima2008 have proposed sampling the dark side of the Moon.) An entirely different approach to constraining the early dynamo history is inspired by the detection of ultra-low seismic velocity zones above the core, interpreted as dense melt lenses [@Williams1996]. @Labrosse2007 interpreted these zones as remnants of a once continuous dense melt layer. While present as a continuous layer, @Labrosse2007 postulated that it would form a thermal boundary layer, limiting heat flow from the core and thus suppressing thermal convection needed for dynamo generation. The dense layer was thought to have dispersed into pods sometime between 4 and 3.4 Ga, after which a dynamo could be generated. @Aubert2010 expressed dynamo evolution in terms of scaling laws derived from numerical simulations and present-day heat flow at the core mantle boundary. In one model, a low CMB heat flow of 3 TW is assumed. This model predicts the presence of an earliest Paleoarchean and Hadean dynamo producing a field comparable in strength to today (Figure 3). A deficiency of the model, however, is that is underestimates modern field values. In another end member, @Aubert2010 considered a CMB heatflow similar of 11 TW; this is closer to some estimates based on seismology [@Lay2008]. In this model, dynamo onset is again delayed to times between 4 and 3.5 Ga. The @Labrosse2007 and @Aubert2010 high CMB heatflow models are just two in a class of thermal models that suggest the onset of the dynamo may have been delayed because of mantle conditions [@Jackson2013]: the mantle controls heat flow from the core and if the lower mantle is too hot the heat flow will be limited (and the core could theoretically heat up as has been proposed for small bodies). The problem for early dynamo generation has been exacerbated by recent changes in ideas on core thermal conductivity [e.g. @Olson2013]. Hence, onset of the dynamo is naturally linked with the thermal regime of the lowermost mantle, and finding constraints on the dynamo implicitly tells us about mantle history. We will return to the potential for obtaining this record, but first we address extending the solar wind to times older than 3.45 Ga.\ Solar Wind before 3.45 Ga ========================= In the prior considerations (equations 6-7), mass loss calculations were not extended to the first $\sim$700 million years of Earth history. If we were to extend these loss rates to earliest times, it would imply that the Sun was at least a few percent more massive than otherwise assumed in standard models of solar evolution. A greater luminosity associated with this more massive Sun could provide a solution to the Faint Young Sun Paradox [e.g. @Sackmann2003]. However, while there are only a few solar analogs for times older than 3.45 Ga whose mass loss is constrained by H [I]{} Lyman-$\alpha$ observations [e.g. @Wood2005; @Wood2014], the few that are available seem to define mass loss rates that deviate from the trend defined by older stars. This suggests that there may be a different magnetic topology affecting mass loss. Specifically, the surfaces of very young, active solar-like stars are thought to be dominated by closed magnetic flux tubes [e.g. @Schrijver2002], whereas mass loss mainly proceeds through open flux tubes [e.g. @Vidotto2009]. We thus proceed assuming that the few stellar analogs available are suggesting a different solar evolution that must be addressed. @Suzuki2012 proposed a model which addresses this issue, offering a scenario where the distribution of open magnetic flux tubes important for mass loss for solar-like star evolves with time. Closed flux loops dominate in the youngest times, limiting mass loss even though stellar magnetic field intensity is high. Later, open flux tubes occupy a greater portion of the solar surface. Eventually the mass loss decreases with stellar magnetic field intensity decrease. @Suzuki2013 quantified this scenario as follows: $$\dot{m} = \dot{M_{\odot}} \left(\frac{c_{M}}{0.023}\right) \left(\frac{R}{R_{\odot}}\right)^{3} \left( \frac{M}{M_{\odot}} \right)^{-1} \left(\frac{\rho_{o}}{10^{-7} \rm{g}\, \rm{cm}^{-3}}\right) \left (\frac{B_{r,0}f_{0}}{1.25\,\rm{G}}\right) \langle \left( \frac{ \delta v_{0}}{1.34\, \rm{km}\, \rm{s}^{-1}} \right)^{2}\rangle$$ where $\dot{M}_{\odot}$ is the modern solar mass loss rate via wind ($\sim$2 $\times$ 10$^{-14}$ M$_{\odot}$yr$^{-1}$), $c_{M}$ is a conversion factor (numerical simulations suggest $\sim$0.02), $R$ is the stellar radius, $M$ is the stellar mass, $\rho_{o}$ is density (i.e. at the photosphere), $B_{r,0}$ is radial magnetic field, $f_{0}$ is the open flux tube filling factor over the photosphere, and $\delta v_{0}$ is a velocity perturbation (which is expected to be a faction of the sound speed at the photosphere). @Suzuki2013 ran a series of magnetohydrodynamical simulations of stellar mass loss, varying the input parameters previously discussed over ranges of plausible values. They fit a power-law to their simulation results for X-ray surface flux $f_X$ $<$ 10$^{6}$ ergcm$^{-2}$s$^{-1}$, in the regime where the kinetic energy of the winds are unsaturated. This X-ray surface flux $f_X$ corresponds approximately to that of the Sun at age $\sim$0.5 Gyr ($\sim$4.1 Ga), as estimated using the rotational and X-ray evolution relations of @Mamajek2008. We adopt the power-law fits from @Suzuki2013 [; their Equation 25]; and scale to an adopted mean modern solar soft X-ray surface flux of $f_{X,\odot}$ = 3.7 $\times$ 10$^{4}$ ergcm$^{-2}$s$^{-1}$ [based on discussion in @Judge2003 we adopt a modern-day solar luminosity of L$_{\rm X}$ $\simeq$ 10$^{27.35}$ ergs$^{-1}$], to estimate mass loss and ram pressure as: $$\dot{m} = \dot{M}_{\odot} \left(\frac{R}{R_{\odot}}\right)^2 \left(\frac{F_{X}}{3.7\times 10^{4}\,\rm{erg}\,\rm{cm}^{-2}\,\rm{s^{-1}}}\right)^{0.82}$$ $$P_{\rm SW \odot} = ({\rm 2 nPa}) \left(\frac{R}{R_{\odot}}\right)^2 \left(\frac{F_{X}}{3.7\times 10^{4}\,\rm{erg}\,\rm{cm}^{-2}\,\rm{s^{-1}}}\right)^{0.70}$$ where again the modern average solar mass loss is $\dot{M}_{\odot}$ $\sim$ 2 $\times$ 10$^{-14}$ M$_{\odot}$yr$^{-1}$. The pressure P$_{SW}$ is evaluated at 1 AU, where we scale it to a mean solar wind ram pressure of 2 nPa (based on four decades of measurements compiled at\ http://omniweb.gsfc.nasa.gov/html and commensurate with a mean solar wind density of 7 cm$^{-3}$ and velocity of 440 kms$^{-1}$. Taking into account the expansion of the Sun over its main sequence evolution, and its empirically constrained rotational braking and X-ray luminosity evolution [@Mamajek2008] we use these scaling relations based on @Suzuki2013 to estimate the mean solar mass loss and solar wind pressure at 1 AU as a function of age. At age 0.5 Gyr ($\sim$4.1 Ga), these relations predict both a solar mass loss ($\sim$10$^{-12.7}$ M$_{\odot}$ yr$^{-1}$) and solar wind pressure at 1 AU ($\sim$17 nPa) enhanced over current mean values by only an order of magnitude (Model C, Figure 7; [*see also Supplementary Content for further description*]{}). A limitation of this model is that it somewhat underestimates the present-day standoff [Model C: $\sim$9.5R$_{E}$ vs. present-day value of 10.1R$_{E}$; @Shue1997]. Moreover, it should be emphasized that priority should be given to additional observations as mass loss during the first 500 million years of Earth history is constrained by analogy with data from only the following 4 stars: $\pi^1$ UMa, $\xi$Boo A, Proxima Centauri, and EV Lac [@Wood2014]. Of these, Prox Cen and EV Lac are active M dwarfs, and $\xi$ Boo is part of a binary system where it is not possible to accurately determine the relative contribution of winds from the components. Thus far, there seems to be a “Wind Dividing Line” for stars with X-ray surface flux $>$10$^6$ ergscm$^{-2}$s$^{-1}$ [@Wood2014], suggesting that our Sun’s mass loss during the main sequence stage may have peaked at $\sim$10$^{-12}$ M$_{\odot}$yr$^{-1}$ near age $\sim$500 Myr (4.1 Gya), and been surprisingly lower (only $\sim$0.5-10$\times$ current $\dot{M_{\odot}}$) at earlier ages. Finally, we note that the above calculations have assumed that the relevant pressure associated with inertia of the solar wind is dominated by ram pressure with the solar wind magnetic pressure being subdominant. This is a good approximation for the present sun but a faster rotation and stronger field at the surface of the young sun could increase importance of the magnetic contribution to the solar wind pressure. To see this, note that the strength of toroidal magnetic field that arises from winding up of the poloidal field is given by $$B_{IMF} = B (r) = B_{0} \frac{\omega}{v_{sw}} \frac{r_{0}^{2}}{r} \label{btor}$$ where $r$ is distance from Sun, $r_{0}$ is solar radius, $B_{0}$ is open mean field at solar surface, $\omega$ is solar rotation speed (faster for young sun) $v_{sw}$ is solar wind speed. If we ignore the thermal pressure, the total solar wind pressure at a given distance from the sun would then be the sum of the ram + magnetic pressure at that distance, and is then given by $$\rho_{sw} v_{sw}^2 \left(1 + {B_{IMF}^2/ 8\pi \over \rho_{sw} v_{sw}^2}\right) = \rho_{sw} v_{sw}^2 \left[1 + {B_0^2 \omega^2 r_0^4\over 8\pi \rho_{sw} v_{sw}^4 r^2} \right], \label{btor2}$$ using equation 18. Using the numerical values appropriate for the present sun of $B_0 =2{\rm G}$, $\omega = 3 \times 10^{-6} {\rm rad/s}$, $r_0= 7 \times 10^{10} {\rm cm}$, $r =1.5 \times 10^{13} {\rm cm}$, $\rho_{sw} = 10^{-23} {\rm g/cm^3}$, and $ v_{sw} = 4 \times 10^{7} {\rm cm/s}$, the magnetic correction term in parenthesis of equation 18 is $0.006$ today. If however $\omega$ and $B_0$ were significantly larger at early times compared to the increase in $\rho_{sw} v_{sw}^{4}$ at early times, then the magnetic correction term could increase. But depending on the mechanism that drives the solar wind, $\rho_{sw} v_{sw}^{4}$ itself could depend on $B_0$ and $\omega$ so it is also possible that this correct term does not substantially increase. A detailed discussion of these subtleties are beyond the present scope. External source of field for a planet lacking a core dynamo =========================================================== Given the increased solar wind pressure associated with the young Sun, it is prudent to revisit the question of solar-induced magnetic fields. This is relevant for gauging the minimum field that might be expected in the absence of a early internally-generated dynamo magnetic field for Earth. A lower limit for the external magnetic field is that due to the solar wind itself, and is given by equation 18 which suggests a value between about 7 and 10 nT for Earth today. However, when the supersonic solar wind impacts the atmosphere, field lines are compressed, and an enhancement of the field is expected. The maximum field strength would correspond to the case in which all of the wind ram pressure is converted into magnetic pressure, that is $$\frac{B^{2}}{8\pi} \sim \rho_{sw} v_{sw}{^2}$$ where $\rho_{sw}$ is the solar wind density. Such equipartition would give a maximum field of about 60 nT today (if Earth lacked an internal magnetic field). This field estimate thus scales with the square root of the solar wind ram pressure. For solar wind pressures of 100 to 1000 times those of today this estimate of the external field would give 0.6 to 2 $\mu$T. We note that for Model C, these values are approached in steady state only for times $>$4.5 Ga. The field will would begin to be amplified near the exobase as field lines are compressed, and deeper in the atmosphere as the field lines slip through atmospheric material. The amplified field lines will drape around Earth from the force for the continuous solar wind. The mixing may also be contemporaneous with turbulence that produces complex directional changes in the field on small scales. However, because the solar wind properties vary on such a large scale compared to that of the distance from exobase to Earth’s surface, the overall magnitude of the supply energy and spatially averaged field are not expected to vary greatly over the daylight side of the planet. Thus, while there will be overall magnitude variability on day time-scales, a small external field could be sensed by a slowly cooled magnetic mineral on Earth’s surface. This serves as a bound on what paleomagnetic studies might be able to resolve about the earliest core dynamo. We also note that this external field could also serve as a seed-field for the start-up of a core dynamo if it penetrates all the way to the core.\ Paleoarchean and Hadean Magnetic Sands\ ======================================= Are there rocks that can be sampled to extend the paleointensity record back in time, before 3.45 Ga? The Pilbara Craton of Western Australia hosts Paleoarchean greenshist facies rocks, but these can extend the record from Barberton by only a few tens-of-millions of years. The key time interval for testing the geodynamo presence/absence question is considerably older (cf. Figure 3). Unfortunately, other terrestrial rocks with ages $\gg$3.45 Ga have been multiply deformed and metamorphosed to amphibolite-facies (or higher); the thermal and associated chemical transformations remove these from further paleomagnetic considerations. This includes the oldest known rocks, including parts of the 4.03 Ga Acasta Gneiss of northwestern Canada [@Bowring1999]. However, there is another recorder: silicate crystals hosting magnetic inclusions that have eroded from primary igneous rocks and are now found in younger sedimentary units. The benefit of this approach is that it might allow us to sample time intervals that are not otherwise available because the original igneous rocks have been lost to erosion. This requires dating of the silicate crystal itself, something that is commonly done with some sedimentary components (e.g. zircons). We note that in using this approach paleolatitudes will typically not be available, because as sedimentary detritus, the orientation of a given silicate crystal relative to horizontal is lost. In this case, inferences on the nature of the global field strength will be limited to the factor of 2 variation of a dipole with latitude. Nevertheless, this is sufficient to test the presence/absence of the geodynamo. We describe approaches to the paleointensity investigation of silicates found in younger sedimentary units below, focusing on what is arguably the most famous unit, the Jack Hills metaconglomerate.\ Jack Hills Metaconglomerate --------------------------- The Jack Hills metaconglomerate is located on the northern edge of the Yilgarn Craton of Western Australia. The unit is rich in zircons and in places, most notable the “Discovery outcrop", up to 12% of the zircons have ages of 4 billion years or older [@Compston1986]. The oldest zircons are dated to $\sim$4.4 Ga, only some 150 million years after formation of the planet [@Wilde2001]. The observation of elevated $\delta^{18}$O in some zircons indicates the incorporation of hydrated rocks in magmas from which they grew. This in turn suggests that Earth had oceans at this time [e.g. @Cavosie2007]. Because water is an essential ingredient for life as we know it, this observation has motivated some to suggest that the Archean/Hadean boundary should be moved from the traditional age of 3.9 Ga to 4.2 Ga [@Valley2006]. Concerned efforts to find the source of the zircons have failed, leading to the conclusion that these have been lost to erosion, possibly during intense chemical weathering, as supported by $\delta^{7}$Li data [@Ushikubo2008]. The Jack Hills sediments have experienced peak metamorphism of 420 to 475 $^{o}$C [@Rasmussen2010] and then have been deformed [@Spaggiari2007a; @Spaggiari2007b] into a stretched pebble conglomerate. As in the studies on the 3.45 Ga dacites from the Barberton Greenstone Belt, a first step in assessing the viability of the Jack Hills sediments is to conduct a paleomagnetic conglomerate test. To address the deformation issue, @Tarduno2013 sampled the interiors of cobble-sized rounded clasts, following the idea that they may have primarily undergone rolling motion rather than stretching, limiting penetrative deformation. Magnetic susceptibility versus temperature data indicate the presence of the Verwey transition, the cubic to monoclinic change in the crystal structure of magnetic seen on cooling through 120 K [@Verwey1939]. Thermal demagnetization reveal a complex series of magnetizations removed at low to intermediate temperatures. Some of these magnetizations are grouped, indicating the potential of magnetization during one of the Archean to Proterozoic metamorphic events that affected the Jack Hills. At relatively high unblocking temperatures, often greater than 550 $^{o}$C, a distinct component trending to the origin of orthogonal vector plots of the magnetization is commonly observed. This component is scattered, and statistical tests indicate formal passage of the conglomerate test (Figure 8). This indicates magnetization of the interior part of the cobble only prior to the deposition of the conglomerate; unlike the case of the dacite conglomerate of the Barberton Greenstone Belt, a substantial time period elapsed between the deposition of the conglomerate and the potential age of the mineral components. Most of the Jack Hills sediments appear to have been deposited at ca. 3 Ga, but rare younger units (as young as 1.2 Ga) have been tectonically interleaved. Notwithstanding the potential for some Proterozoic ages associated with these tectonic slivers, it appears that select components of the Jack Hills metasediments have the potential to preserve magnetizations at least as old as ca. 3 Ga.\ Quartz and Zircon carriers -------------------------- Is there a 4 billion-year-old (or older) rock preserved as a clast in the Jack Hills metaconglomerate? The intense chemical weathering called upon for the Hadean would seem to suggest this is unlikely, but it more possible that individual quartz grains composing the Jack Hills clasts are of this age. As discussed above, these quartz grains commonly contain magnetite, and thus they are viable paleomagnetic recorders. However, determining the age of these quartz grains is difficult. In the Jack Hills metaconglomerates, quartz grains often contain zircon inclusions; hence these could provide some age constraints. Zircon having magnetic inclusions is itself a potential paleomagnetic recorder (hinted at in [@Dunlop1997] and discussed explicitly in [@Tarduno2009] and [@Nelson2010]), although one that challenges paleomagnetic measurement technology because of their small size (typically $<$300 microns) and related small number of magnetic included particles. However, high resolution 3-component DC SQUID magnetometers have been used to detect paleointensities from single zircon crystals using Thellier-Coe techniques [@Cottrell2013]. It should also be kept in mind that the thermal history of these grains from their formation to their final incorporation into a conglomerate, may not be straightforward. Radiometric age dating coupled with petrologic examinations (i.e. the presence of overgrowths) has the potential to distinguish zircons which may record paleomagnetic signals dating from their initial formations to those that have been reset due to reheating (at times much older than the conglomerate age). While retrieving a paleomagnetic history from such zircons is a goal, it should be remembered that the complications of zircon resetting are somewhat less important when viewed in the context of simple field absence/presence criteria. That is, the possibility of a “false positive" identification of the geomagnetic field for times older than 3.45 Ga is omipresent because of subsequent geological activity. But the identification of an anomalously low or null field (within the bounds of recording zero discussed above), if this existed before 3.45 Ga, would be otherwise difficult to explain because of the abundance of opportunities for later magnetization.\ Summary and Future Potential ============================ The “habitable zone" is classically defined as that distance from a star where liquid water can exist. Even given birth of a planet in this zone, there is no assurance that a habitable planet will evolve given the potential for water loss. The magnetic field is a key factor that must be considered in determining whether a terrestrial-like planet will retain its water. The preservation potential will in turn depend on the balance of stellar wind pressure and magnetic field strength. Stellar wind history will be a function of star spin rate and stellar evolution. For terrestrial-like planets, salient variables include the time of onset and duration of the dynamo (which are related to the efficiency of heat removal from the core), especially during the first billion years after planet formation. The magnetic field has competing effects with respect to atmospheric retention (and ultimately water survival). Understanding better the net influence of these effects is itself an important direction of future research. For example, an increased magnetic field provides more pressure to abate the solar wind dynamic pressure and increase the magnetopause radius but the larger magnetopause also means a larger collecting area for solar wind flux during phases of magnetic reconnection. In addition, a strong ordered dipole magnetic field may provide a pathway for concentrating the solar wind flux toward polar caps where its local mass-loss effects may be exacerbated. Yet, this same ordered field provides the magnetic topology for recapturing this mass in the opposite hemisphere such that the net global atmospheric mass loss might not be affected. Comparing similarities and differences of mass loss from planets within our present day solar system will be helpful for progress. Presently available data support the net atmospheric protective role of dynamo magnetic fields. Ultimately, understanding differences between present-day and early solar wind conditions and influences on Earth’s atmosphere (and the atmospheres of other terrestrial planets) is an important goal. To constrain early solar winds, additional observations on solar analog stars younger than 500 Myr (4.1 Ga) are needed. For the Paleoarchean Earth (at ca. 3.4-3.45 Ga), the balance between core dynamo field values and increased solar wind pressure results in standoff of the solar wind to distances half of those of the present-day, suggesting some atmospheric loss and removal of water. This erosive potential implies a more water-rich initial Earth, and/or the delivery of water as a late veneer, to account for the present terrestrial reservoir. The need for large early water reservoirs, or late replenishment, is exacerbated if onset of the geodynamo was delayed by a hot lower mantle. Therefore, from a deep Earth perspective, better constraints on Hadean lower mantle evolution and its interplay with core heat loss could aid our understanding of the geodynamo/shielding history. These linkages between the early mantle, core dynamo and atmospheric development are equally relevant for Mars (and possibly Venus). From an observational perspective on Earth, there is potential in early Archean and Hadean zircons (and other sedimentary crystals) hosting magnetic inclusions to record the first billion years of the geodynamo using single crystal paleointensity methods. This remains a grand challenge, requiring the most sensitive magnetometers and the development of methods to understand the effects of geologic history after zircon formation. Many of these ongoing instrument and technique developments will have continued application in the analysis of Martian rocks retrieved by future sample return missions.\ Acknowledgements ================ We thank Rory Cottrell, Richard Bono, Axel Hofmann, Ariel Anbar and David Sibeck for helpful conversations, and the editor and two anonymous reviewers for their comments. EGB acknowledges partial support from the Simons Foundation.This work was supported by NSF. **Supplementary Content** [*Reconstructing the Past Sun*]{} We combine a theoretical stellar evolutionary track for a 1 M$_{\odot}$ star with observational constraints on the time-evolution of various stellar parameters for Sun-like stars to produce a reconstruction of the Sun’s characteristics at geologically and astronomically relevant times ([**Table 2**]{}). The solar parameters are listed at the starts of various geological time periods, adopting recent ages from the Geological Society of America [@Walker13]. We adopt the 1 M$_{\odot}$ stellar evolutionary track from @Bressan12, which employs the recent @Caffau11 mixture for protosolar chemical composition. We make minor systematic shifts to the luminosity and effective temperature of the evolutionary track at the $\sim$1% level in order to match the current Sun at an adopted meteoritic age for the solar system [4568 Myr; @Bouvier10; @Amelin10]. We adopt the revised stellar parameters compiled by @Mamajek12: effective temperature T$_{\rm eff}$ = 5772 K, luminosity $L$ = 3.827 $\times$ 10$^{33}$ ergs$^{-1}$, and radius $R$ = 695660 km. Spectral types were estimated through the new main sequence effective temperature scale of @Pecaut13 (although in practice spectral types for G-stars are rarely quoted to better than $\pm$1 subtype precision). The X-ray luminosity evolution as a function of rotation was calibrated to the data from @Wright11, but was adjusted to pass through the Sun’s current combination of average X-ray luminosity and rotation period [parameterized via Rossby number; @Mamajek08]. We have included an estimate of the mean emission-measure-averaged coronal temperature log$\tilde{T}_X$ as a function of the Sun’s age, based on a custom fit to the data of @Telleschi05 and the modern Sun [@Peres00] as a function of mean X-ray luminosity in the ROSAT X-ray bandpass (L$_X$): log$\tilde{T}_X$ $\simeq$ -1.54 + 0.282logL$_X$ (L$_X$ in ergs$^{-1}$). We estimate the Sun’s current average solar wind mass loss as follows. Based on $\sim$15,000 daily solar wind measurements in the OmniWeb database[^1] between 1963 and 2014, we estimate a mean daily solar wind density of $n$ = 6.94 cm$^{-3}$ and mean solar wind velocity of $V_{SW}$ = 439 kms$^{-1}$. Extrapolating these values over 4$\pi$ steradians, one would estimate the solar wind mass loss rate to be 1.94$\times$10$^{-14}$ M$_{\odot}$yr$^{-1}$. Results from the Ulysses mission [@Goldstein96] show that at high heliographic latitude ($>$20$^{\circ}$) the solar wind has a product of density and velocity approximately half that at lower latitudes. We take this result into account and multiple our original estimate by $\sim$2/3, leading to a spherically-averaged mean solar mass loss via solar wind of $\dot{M}_{\odot}$ = 1.3$\times$10$^{-14}$ M$_{\odot}$yr$^{-1}$. Three mass loss rates are listed in Table 2: $\dot{M}_{W}$ is estimated following the observational trends from @Wood14, $\dot{M}_{S}$ is estimated following the simulations of @Suzuki13, and $\dot{M}_{CME}$ is the estimated mass loss due solely to coronal mass ejections from flares [@Drake13]. Magnetopause radii are estimated following @Tarduno10, assuming Earth magnetic field strengths equal to the current value (R$_{S,1}$) and half (R$_{S,1/2}$; similar to that measured for 3.45 Gya by Tarduno et al. 2010). Interplanetary magnetic field pressure was assumed to be negligible at all periods compared to the dynamical ram pressure of the wind. Several of the stellar parameters for the Sun and Sun-like stars (mostly related to magnetic activity) are observationally well correlated with rotation period [e.g. @Mamajek08], so we made a careful reassessment of the Sun’s likely rotational evolution through study of Sun-like main sequence stars of different ages. Based upon a literature review of measured rotation rates of $\sim$1 M$_{\odot}$ ($\pm$10%) stars in ten young star clusters[^2] and older field stars[^3] we derive a revised version of the Skumanich law [@Skumanich72]: P$_{rot}$ = 25.5($t/t_{\odot}$)$^{0.526\,\pm\,0.022}$ day, where $t$ is stellar age, $t_{\odot}$ is the Sun’s age (4568 Myr), and the relation is empirically constrained between $\sim$0.1-7 Gyr. A fit of Sun-like stars in young clusters and (older) field stars [*omitting*]{} the Sun, yields a nearly identical relation, [*predicting*]{} the modern Sun’s rotation period to be 25.4 day. We surmise that the Sun is a normal rotator (within $\pm$1 day) for its mass and age. Has mass loss via the solar wind had an impact on the Sun’s luminosity evolution since reaching the main sequence? An enhanced early solar wind has been proposed to be a potential solution to the Faint Young Sun paradox [e.g. @Sackmann03]. We have surveyed the recent literature for published mass loss estimates and trends for Sun-like stars, as a function of age and/or X-ray luminosity, as well as theoretical predictions [e.g. @Holzwarth07; @Cranmer11; @Drake13; @Suzuki13; @Wood14]. Thus far, these recent studies are consistent with a total solar main sequence mass loss in the range $\sim$0.01-0.4%. In the same period, the Sun has lost $\sim$0.03% of its mass due to radiative losses through converting mass to energy [@Sackmann03]. The @Bressan12 stellar evolutionary tracks are consistent with having zero-age main sequence luminosities of L$_{ZAMS}$ $\simeq$ 0.70 (M/M$_{\odot}$)$^{4.535}$ L$_{\odot}$ for solar composition stars within 10% of a solar mass. After 4.6 Gyr, the total predicted solar mass loss due to solar wind and radiative losses is in the range $\sim$0.04-0.4%, so the Sun could have plausibly been negligibly more luminous ($\sim$0.2-1.8%) early in its main sequence phase. Hence, current observational and theoretical constraints on the mass loss history of the Sun seem inconsistent with enhanced early stellar winds providing a parsimonious solution to the Faint Young Sun paradox [e.g. @Sackmann03]. -------- ------- --------------- ------- --------------- --------------- ---------- ------------- ---------- ----------------------- ----------------------- ----------------------- ------------- ------------- ------------------------------------- $\tau$ Age T$_{\rm eff}$ Spec. Lum. Rad. logR$_X$ logL$_{X}$ logT$_X$ $\dot{M}_{W}$ $\dot{M}_{S}$ $\dot{M}_{CME}$ R$_{S,1}$ R$_{S,1/2}$ Name of Starting Geological Period Gya Gyr K Type L/L$_{\odot}$ R/R$_{\odot}$ dex ergs$^{-1}$ K M$_{\odot}$ yr$^{-1}$ M$_{\odot}$ yr$^{-1}$ M$_{\odot}$ yr$^{-1}$ R$_{Earth}$ R$_{Earth}$ ... 4.525 0.045 5630 G5.4V 0.686 0.871 -3.33 30.1 6.96 -12.6 -11.5 -10.1 4.7 3.7 [*Zero-Age Main Sequence (ZAMS)*]{} 4.450 0.120 5645 G5.2V 0.707 0.879 -3.92 29.5 6.79 -13.4 -11.9 -10.9 5.5 4.4 [*Pleiades Cluster Age*]{} 4.000 0.570 5660 G5.0V 0.735 0.891 -4.85 28.6 6.54 -12.0 -12.7 -12.3 7.0 5.6 Archaen Eon/Eoarchean Era 3.920 0.650 5662 G4.9V 0.739 0.893 -4.93 28.5 6.51 -12.1 -12.8 -12.4 7.2 5.7 [*Hyades Cluster Age*]{} 3.600 0.970 5672 G4.4V 0.756 0.900 -5.18 28.3 6.45 -12.4 -13.0 -12.7 7.7 6.1 Paleoarchean Era 3.450 1.120 5676 G4.2V 0.764 0.904 -5.27 28.2 6.42 -12.5 -13.0 -12.9 7.8 6.2 Barberton Greenstone Belt dacite 3.200 1.370 5684 G3.9V 0.777 0.909 -5.40 28.1 6.39 -12.7 -13.1 -13.1 8.1 6.4 Mesoarchean Era 2.800 1.770 5696 G3.6V 0.800 0.918 -5.56 27.9 6.35 -12.9 -13.2 -13.4 8.4 6.7 Neoarchean Era 2.500 2.070 5705 G3.4V 0.818 0.926 -5.67 27.8 6.32 -13.0 -13.3 -13.6 8.6 6.8 Proterozoic Eon 2.300 2.270 5710 G3.2V 0.830 0.931 -5.73 27.8 6.30 -13.1 -13.4 -13.7 8.8 6.9 Rhyacian Period 2.050 2.520 5718 G3.1V 0.846 0.937 -5.81 27.7 6.28 -13.2 -13.4 -13.8 8.9 7.1 Orosirian Period 1.800 2.770 5725 G2.9V 0.862 0.944 -5.87 27.6 6.27 -13.3 -13.5 -13.9 9.0 7.2 Stratherian Period 1.600 2.970 5731 G2.8V 0.876 0.949 -5.92 27.6 6.25 -13.3 -13.5 -14.0 9.1 7.2 Mesoproterozoic Era 1.400 3.170 5736 G2.7V 0.890 0.955 -5.97 27.6 6.24 -13.4 -13.5 -14.1 9.2 7.3 Ectasian Period 1.200 3.370 5742 G2.6V 0.904 0.961 -6.01 27.5 6.23 -13.4 -13.6 -14.2 9.3 7.4 Stenian Period 1.000 3.570 5747 G2.5V 0.919 0.967 -6.06 27.5 6.22 -13.5 -13.6 -14.2 9.4 7.5 Tonian Period 0.850 3.720 5751 G2.4V 0.930 0.971 -6.08 27.5 6.22 -13.5 -13.6 -14.3 9.4 7.5 Cryogenian Period 0.635 3.935 5757 G2.3V 0.947 0.978 -6.13 27.4 6.21 -13.6 -13.6 -14.4 9.5 7.6 Ediacaran Period 0.541 4.029 5759 G2.2V 0.954 0.981 -6.15 27.4 6.20 -13.6 -13.6 -14.4 9.6 7.6 Cambrian Period 0.485 4.085 5760 G2.2V 0.959 0.983 -6.15 27.4 6.20 -13.6 -13.7 -14.4 9.6 7.6 Ordovican Period 0.444 4.126 5762 G2.2V 0.962 0.985 -6.17 27.4 6.20 -13.6 -13.7 -14.4 9.6 7.6 Silurian Period 0.419 4.151 5762 G2.2V 0.964 0.985 -6.17 27.4 6.20 -13.6 -13.7 -14.4 9.6 7.6 Devonian Period 0.359 4.211 5764 G2.2V 0.969 0.987 -6.18 27.4 6.20 -13.6 -13.7 -14.5 9.6 7.6 Carboniferous Period 0.299 4.271 5765 G2.1V 0.974 0.990 -6.19 27.4 6.19 -13.7 -13.7 -14.5 9.7 7.7 Permian Period 0.252 4.318 5766 G2.1V 0.978 0.991 -6.20 27.4 6.19 -13.7 -13.7 -14.5 9.7 7.7 Triassic Period 0.201 4.369 5767 G2.1V 0.983 0.993 -6.21 27.4 6.19 -13.7 -13.7 -14.5 9.7 7.7 Jurassic Period 0.145 4.425 5769 G2.1V 0.988 0.995 -6.22 27.4 6.19 -13.7 -13.7 -14.5 9.7 7.7 Cretaceous Period 0.066 4.504 5771 G2.0V 0.994 0.998 -6.23 27.3 6.18 -13.7 -13.7 -14.5 9.7 7.7 Paleogene Period 0.023 4.547 5771 G2.0V 0.998 0.999 -6.24 27.3 6.18 -13.7 -13.7 -14.6 9.7 7.7 Neogene Period 0.003 4.567 5772 G2.0V 1.000 1.000 -6.24 27.3 6.18 -13.7 -13.7 -14.6 9.7 7.7 Quaternary Period -------- ------- --------------- ------- --------------- --------------- ---------- ------------- ---------- ----------------------- ----------------------- ----------------------- ------------- ------------- ------------------------------------- **Figures**\ ![ Magnetosphere (not to scale) shaped by the interaction of the solar wind (with pressure $P_{sw}$ and Earth magnetic field ($M_{E}$)). The point where the solar wind pressure is balanced by the magnetic field is the standoff distance ($r_{s}$).](Tarduno_Figure1.pdf){width="\linewidth"} ![Example Thellier-Coe paleointensity experiment conducted on a single silicate crystal. a. Sample analyzed (quartz phenocryst with magnetite inclusions). b. Orthogonal vector plot of “field off" steps (see text). These define a component that trends toward the origin between approximately 515 and 570 $^{o}$C. Red is inclination, blue is declination (note absolute orientation is arbitrary because the sample is unoriented). (c) Natural remanent magnetization (NRM) lost versus thermoremanent magnetization gained (TRM). TRM was acquired using an applied field of 60 $\mu$T. The slope of the line, for the temperature range where field-off data show linear decay to the origin (b) defines the paleofield strength. Example from [@Tarduno2010].](Tarduno_Figure2.pdf){width="\linewidth"} ![ Archean paleointensity constraints based on single silicate crystal paleointensity and inner core growth. a. Field strength values are from Karelia (black, [@Smirnov2003] at $\sim$2.5 Ga, the Kaapvaal Craton at 3.2 Ga (red, blue, circles [@Tarduno2007] and 3.4-3.45 Ga (yellow, green, circles [@Tarduno2010]). Individual determinations (outlined circles) for the Kaapvaal craton are shown with mean (to left, circles without outlines). Green square is the 3.2 Ga data corrected for cooling rate. As discussed in the text, this cooling rate correction probably results in an underestimate of the true field value. Also shown are two models for field intensity history from [@Aubert2010] based on high/low values of present-day core-mantle boundary heat flow. IC is the time of onset of inner core growth for the models. Many recent models invoke the onset of inner core growth younger than 1 Ga (b). In nearly all models, a long-lived Paleoarchean geodynamo would need to be entirely thermally driven.](Tarduno_Figure3.pdf){width="\linewidth"} ![ Conglomerate test on $\sim$3.4 Ga rock unit from the Barberton Greenstone Belt (Kaapvaal Craton). a. Sketch of conglomerate formation. Key: “v" symbol, dacite; triangles, chert; arrows, imparted thermoremanent magnetization. b. Field photo of 3.4 Ga conglomerate. c. Orthogonal vector plot of stepwise thermal demagnetization of dacite clast from conglomerate; red is inclination, blue is declination. (b). Two components are defined at low unblocking temperatures (LT, green) and high unblocking temperatures (HT, red). d. Stereonet plots show that the LT component is well-grouped, as expected for a secondary magnetization. The HT component is scattered, as expected for a primary magnetization (cf. part a). (c) and (d) are from Usui et al. [@Usui2009].](Tarduno_Figure4.pdf){width="\linewidth"} ![Standoff versus time. Subsolar standoff distance in Earth radii (${\rm R_{E}}$) plotted versus age from solar mass loss Model A (see text). Contours are Earth’s dipole moment, with paleointensity data from single silicate crystals (cf. Figure 3). Figure modified from [@Tarduno2010].](Tarduno_Figure5.pdf){width="\linewidth"} ![Magnetic shielded versus unshielded planet. a. Planet lacking a core dynamo in the presence of a solar wind exhibiting atmospheric loss (shaded). b. Planet with a core dynamo exhibiting atmospheric ion loss and return.](Tarduno_Figure6.pdf){width="\linewidth"} ![Standoff for solar ages younger than 700 Myr. Subsolar standoff distance in Earth radii (${\rm R_{E}}$) plotted versus age from solar mass loss Model A (red contours) versus Model C (yellow contours; see text).](Tarduno_Figure7.pdf){width="\linewidth"} ![Conglomerate test on $\sim$3.0 Ga metasediments from the Jack Hills (Yilgarn Craton). a. Field photo of conglomerate with cobble-sized clasts. b. Orthogonal vector plot of stepwise thermal demagnetization of subsample from interior of clast from conglomerate; red is inclination, blue is declination. Two components are defined at relatively low-intermediate unblocking temperatures (LT, green arrows) and high unblocking temperatures (HT, red arrows ). Example from [@Tarduno2013]. Note that the E-W labels on the diagram were inadvertently inverted in the original publication. c. Stereonet plots show that a relatively well-grouped LT component from some clasts (left), as expected for a secondary magnetization. The HT component is scattered (right), as expected for a primary magnetization (from [@Usui2009]).](Tarduno_Figure8.pdf){width="\linewidth"} [^1]: Goddard Space Flight Center Space Physics Data Facility: http://omniweb.gsfc.nasa.gov/. [^2]: In age order: Pleiades, M50, M35, M34, M11, Coma Ber, M37, Praesepe, Hyades, and NGC 6811. [^3]: In age order: 18 Sco, Sun, $\alpha$ Cen A & B (mean), 16 Cyg B.
{ "pile_set_name": "ArXiv" }
Introduction {#S0001} ============ Hip and knee arthroplasty continue to be among the most common surgical procedures performed in the United States with an expected increase in utilization as indications expand and an aging population lives longer with an increased demand for more active, pain-free lifestyles.[@CIT0001] As a result, the likelihood of patients having ipsilateral hip and knee prostheses increases, as does the risk of periprosthetic complications, specifically, interprosthetic femur fractures (IFF), defined by a fracture of the femur between an ipsilateral hip and knee prostheses. Early reports on the treatment of these fractures were presented with significant reservation. The earliest report by Dave et al described successful treatment of a single patient who sustained an interprosthetic femur shaft fracture around a stemmed total knee and total hip implant with the use of a Mennan plate, iliac crest bone grafting, and a 3-month period of restricted weight-bearing.[@CIT0002] However, subsequent reports by Kenny et al demonstrated poor outcomes in treating similar fractures, with all four patients in that series failing initial treatment, two of whom required either above knee amputation or hip disarticulation.[@CIT0003] Since these early reports advancement in treatment strategies, namely implant choice and understanding and classifying the fracture pattern have improved the outcomes for these complex injuries. Management and avoidance of treatment complications are dependent on understanding the patient, fracture pattern, intraoperative techniques, and the arthroplasty reconstructive options.[@CIT0004],[@CIT0005] Epidemiology and patient characteristics {#S0002} ======================================== The rising number of patients who are living longer and undergoing joint replacement, combined with technology advances and anesthetic protocols for rapid recovery have led to a dramatic increase in the number of patients living with joint replacement.[@CIT0006]--[@CIT0008] An estimated 620,000 of these patients have undergone both THA and TKA. Furthermore, about 19,200 Americans are living with ipsilateral hip and knee arthroplasties.[@CIT0009] Patients are remaining active as their quality of life improves and thus more demands are placed on their implants. As a result, the rates of periprosthetic fractures have also risen. While still uncommon, THA periprosthetic fracture rates are reported at 0.1--5% while TKA periprosthetic fractures occur at a rate of 0.3--5.5%.[@CIT0006],[@CIT0010]--[@CIT0012] A subset of these fractures known as IFFs was first described by Dave et al in 1995.[@CIT0002] Early estimates regarding the rate of IFFs were made by Kenny et al, reporting a rate of 1.25% in their series of over 300 patients.[@CIT0003] The actual number of IFFs is difficult to estimate, but more recent reports estimate the risk to be about 5--7% of all periprosthetic fractures.[@CIT0013] In some smaller series, Sah et al reported on 22 fractures over a 4-year period, while Platzer et al reported 23 patients in 16 years.[@CIT0014],[@CIT0015] Valle Cruz's group reported on 6 fractures over a period of 6 years in a cohort of 112 patients.[@CIT0016] While uncommon, these fractures can have significant implications for patient outcomes. Recognition and effective management of these injuries are paramount to maintaining preinjury quality of life for the patient. Certain implant construct characteristics increase the preponderance or pose management challenges for IFFs. Regarding interprosthetic distance, there is no clear consensus on how far apart hip and knee stems should be to mitigate fracture risk. Theoretically, a reduced distance may lead to higher stress concentration in the femoral shaft and thus an increased risk for fracture at this location. A biomechanical study by Soenen et al observed that gaps \<110 mm between stems increased risk of fracture; however, this study did not take into account cortical thickness.[@CIT0017] An alternative argument in the risk for IFF is cortical and medullary diameters. In a series of 23 patients, Lipof et al found that the IFF group was more likely to have significantly narrower femoral cortices at the isthmus compared with intact femurs, but larger medullary canals, suggestive of the typical biomechanical changes seen in patients of older age and those with osteoporosis.[@CIT0006] Similarly, Valle Cruz et al found a higher rate of IFF in areas distal to the hip stem tip which correlated to widening of the femoral canal and narrowing of the femoral cortices.[@CIT0016] Despite diaphyseal stress risers being considered more high risk for fracture compared with metaphyseal ones, Mamczaks' data corroborated this notion, showing a higher incidence of IFF in the supracondylar area.[@CIT0008] These changes in femoral architecture predisposing patients to periprosthetic and interprosthetic fractures more generally relate to overall bone health; thus, patient risk factors for IFF are similar to those for periprosthetic fractures. These include female gender, advanced age, revision surgery, osteoporosis, and inflammatory diseases such as rheumatoid arthritis.[@CIT0018] Implant-related factors include press-fit stems as a risk for early fracture and the development of osteolytic lesions with the use of conventional polyethylene as a late risk for fracture.[@CIT0019],[@CIT0020] Osteolysis surrounding total hip implants is far less prevalent today as a result of the use of highly cross-linked polyethylene (HXLPE) introduced in the late 1990s. However, patients who underwent hip arthroplasty prior to the use of HXLPE are at higher risk for the development of wear and osteolytic lesions leading to decreased femoral structural stability. Osteoporosis is additionally considered an independent risk factor for fracture. Modular mismatch between the bone--implant interface contributes to stress shielding in already weakened bone which may predispose the patient to fracture in low energy mechanisms of injury. Further, careful consideration should be given to prolonged bisphosphonate use in these patients, as there may be a risk of increased, atypical fractures as a result of the combination of suppressed bone turnover and repetitive stress.[@CIT0021] Platzer et al reported on the presence of severe osteoporosis or rheumatoid arthritis in 73% of the patients treated for distal femoral periprosthetic fracture.[@CIT0022] Interprosthetic fracture classification {#S0003} ======================================= Classification of intertrochanteric femur fractures has evolved from initial modifications of the Vancouver and Société Française de Chirurgie Orthopédique et Traumatologique classifications, traditionally used to describe periprosthetic femur and knee fractures, respectively, to an interprosthetic fracture specific classification system described by Pires et al ([Figure 1](#F0001){ref-type="fig"}).[@CIT0023]--[@CIT0025] In this classification system, interprosthetic fractures are divided into three main types: Type I which describes fractures around a femoral prosthesis, Type II which describes fractures around a knee prosthesis without a stem, and Type III which describes fractures around a knee prosthesis that contain a stem extension. Type I and II fractures are further subdivided into groups A (stable femoral and knee prosthesis), B (unstable femoral but stable knee prosthesis), C (stable femoral but unstable knee prosthesis), and D (unstable femoral and knee prostheses). For Type III fractures, the subgroup differs from Types I and II in that group Type IIIA represents stable prostheses with viable bone between the prostheses, Type IIIB describes stable femoral and knee prostheses with a nonviable fragment or lack of bone between prostheses ends, Type IIIC describes unstable prostheses (hip, knee, or both) with viable bone between the prostheses, and Type IIID represents unstable prostheses (hip, knee, or both) with a nonviable interval fragment due to lack of viable bone between prostheses' ends. By providing a description of the fracture location, identification of the type of arthroplasty prosthesis that is present, and delineating stability of the prosthesis, this classification system provides both descriptive utility and also aims to direct treatment strategies. For Type I and II fractures, treatment includes plate fixation in the case of a stable prosthesis (subtype A) or revision of an unstable prosthesis to a longer and/or stemmed prosthesis with or without the addition of supplemental plate fixation as needed for fracture fixation (subtypes B-C). For Type III fractures, stable implants without sufficient bone stock or unstable implants with insufficient bone stock can be treated with revision arthroplasty and/or plate fixation with or without bone grafting. Depending on the quality of bone and usability of the original arthroplasty prostheses, consideration of a total femoral replacement or augmentation of the femur with a strut allograft is available tools in the armamentarium of this treatment algorithm (Type ID, IID, or III B-D).[@CIT0025],[@CIT0026]Figure 1Interprosthetic femur fracture classification as described by Pires et al.**Note:** Data from Pires et al.[@CIT0025],[@CIT0026] Intraoperative considerations {#S0004} ============================= Operative management of IFFs poses significant surgical challenges. Navigating these difficult fractures should rely on preoperative patient optimization of medical conditions and an algorithmic, practical surgical approach with a focus on adherence to Arbeitsgemeinschaft für Osteosynthesefragen (AO) principles. Development of a preoperative surgical plan complete with any necessary implants, trays, and adjunct grafts is paramount to success in the operating room. When positioning patients for surgery, our preferred method is in the semi-lateral position on a beanbag over a fully radiolucent table. If a beanbag is not available, one may use an inflatable rapid infuser sleeve attached to a blood pressure cuff machine. The sleeve is placed under the patient's ipsilateral buttock and during various portions of the procedure the bag may be inflated or deflated to accommodate radiographic views or surgical visualization. Surgical approach may incorporate prior incisions but typically follows an extensile lateral exposure to the femur to facilitate exposure, fracture reduction, and fixation. Careful attention should be paid during dissection to limit periosteal stripping and soft tissue destruction as this may lead to a higher incidence of nonunion. In the case of a short lateral and large medial fracture fragment, a lazy "S" incision that starts laterally and then the lateral femur and tracking medially to expose the knee distally. The patient's prior medial parapatellar arthrotomy may be re-incised to limit any patellar devascularization with a parallel lateral parapatellar arthrotomy. This will provide an extensile exposure to both the medial and lateral aspects of the distal femur and more easily allow for supplemental neutralizing fixation on the medial side of the femur following lateral plate fixation if varus stress testing produces lateral opening. As noted earlier, the fracture classification has aided in elucidating fixation options for interprosthetic fractures, depending on the fracture pattern and location, stability of the prostheses, and the patient's bone quality. For patients with adequate bone stock, constructs may include locking plates, cables, intramedullary nails, or some combination of these. Currently, locking plates are the implant of choice in the treatment of IFFs ([Figure 2A](#F0002){ref-type="fig"}--[D](#F0002){ref-type="fig"}).[@CIT0014],[@CIT0027]--[@CIT0029] These implants provide stable fixation even in osteoporotic bone, help to resist varus collapse when placed on the tensile side of the femur, and are often applied overlying periosteal tissues to preserve the blood supply to the bone.[@CIT0005] The major treatment goals are adequate fixation with restoration of length, alignment, and rotation, early mobilization, and fracture union. Locking plates should be applied along the length of the femur, spanning the prior implant stem(s) by at least two cortical diameters. This serves to maximally disperse forces across the bone, protect the bone from further fracture, and decrease the overall stress concentration at implant--bone interfaces. Many modern locking plates are titanium with a similar modulus of elasticity as bone to limit modulus mismatch. In addition, strategically placed screw holes within the plate function to allow screws to be inserted around hip or knee stems and also prevent a postage stamping effect within the bone, thereby decreasing the risk of further fracture. Finally, to limit the stiffness of the construct and the potential for nonunion, screw density within the plate should be approximately 40--50% ([Figure 3A](#F0003){ref-type="fig"}--[D](#F0003){ref-type="fig"}). In areas where fixation may not be amenable to bicortical screws due to the presence of prior arthroplasty implants, the use of cerclage cables or unicortical locking screws may supplement fixation ([Figure 4](#F0004){ref-type="fig"}). Additionally, in the presence of a cruciate retaining TKA or one with an open box configuration, a retrograde nail may be used. This is typically used in the setting of a revision or interprosthetic nonunion where a supplemental plate is also used, as the nail is unable to overlap the hip stem and causes an area of stress concentration just distal to the hip prosthesis ([Figure 5](#F0005){ref-type="fig"}). A recent case series performed by Hussain et al review 9 IFFs treated with a combination of a retrograde nail and laterally locked plate. Fixation proximal to the fracture included an average of 3 bicortical screws and one unicortical screw, and a minimum of four cortices of fixation. They observed a 100% union rate with immediate weight-bearing.[@CIT0030] Utilization of the intramedullary nail functions biologically and biomechanically. Reaming the canal for the nail allows for cancellous bone to be impacted into the fracture site after preliminary reduction with the plate. Biomechanically, an intramedullary nail imparts longitudinal and rotational stability and enhances fixation stability.Figure 2Preoperative radiographs (**A, B**) demonstrating a long spiral oblique interprosthetic fracture with apex posterior angulation. A laterally based femoral locking plate was used for internal fixation of this fracture (**C, D**) with the addition of interfragmentary lag and position screws.Figure 3Preoperative anteroposterior (**A**) and lateral (**B**) radiographs showing plate osteosynthesis with a lateral locking plate and high screw density and thus stiff construct, resulting in a failure of fixation. Anteroposterior (**C, D**) radiographs of the revision construct demonstrating interfragmentary screws, decreased screw density and construct stiffness, and appropriate prosthesis overlap.Figure 4Anteroposterior radiograph of the proximal femur demonstrating periprosthetic fracture fixation around a long, cemented hip stem. Unicortical screws (outlined by red box) and supplemental cables are useful in this situation with limited bone stock around the stem and a stiff cement mantle.Figure 5Anteroposterior (**A, B**) and lateral (**C**) radiographs of a distal periprosthetic femur fracture fixed with a short retrograde nail and supplemented with a long lateral locking plate in a patient with osteoporotic bone. The lateral radiograph demonstrates the staggered screw holes to maximize coverage around the nail and hip stem. (Image Courtesy: Derek J. Donegan, MD). Due to the relatively low incidence of IFFs, the clinical literature is primarily in the form of case reports and case series. Recently, however, Bonnevialle et al conducted a retrospective, multicenter study of 51 patients with a mean age of 82.5 years who suffered an IFF between 2009 and 2015. At a mean follow-up of 27 months, there were 6 mechanical complications, 2 surgical site infections, and 2 cases of loosening, illustrating the morbid nature of this injury. The overall mortality at final review was 31% (9 deaths in the first 6 months) with a median survival of 3.45 years.[@CIT0013] In a smaller series, Hoffman et al conducted a retrospective review of 27 IFFs, the majority of whom were females, over a 7-year period. They reported an 89% union rate with long, lateral, titanium plates. They advocated a submuscular plating technique to avoid soft-tissue stripping and adequate proximal fixation around the hip stem.[@CIT0009] One patient did develop hardware failure that required further treatment with dual plating. This serves as an additional option for fixation in cases of limited bone stock anterior or posterior to the stem. A smaller plate may be placed on the anterior surface of the femur to improve biomechanical stability with screws angled medially or laterally to the stem if possible. Sah et al evaluated 22 patients with IFFs treated with locked, condylar plating via a minimally invasive technique. All fractures healed within 14 weeks.[@CIT0014] Similarly, Platzer et al retrospectively evaluated 23 IFFs treated with locked plating and supplementary cerclage cables. By 6 months, 82% were radiographically healed. The four failures were attributed to poor reduction and fixation techniques.[@CIT0015] Moreover, while a treatment algorithm has not been universally adopted, adherence to fixation and surgical principles described earlier can maximize the chance of union and a satisfactory outcome in these difficult fracture patterns. Considerations in patients with poor bone stock {#S0005} =============================================== Special consideration should be given to circumstances where poor bone stock limits standard fixation or arthroplasty reconstruction methods. The initial management as mentioned previously requires assessment of the stability of the implants, which will dictate the ability to retain or need to revise the existing prostheses. For stable implants, fixation methods may include a plate with or without adjunct bone grafting or cortical strut augmentation; a long, lateral locking plate remains the implant of choice for osteoporotic or compromised bone stock.[@CIT0009],[@CIT0014],[@CIT0015],[@CIT0028],[@CIT0029],[@CIT0031]--[@CIT0033] When permitted by a knee prosthesis that allows for the introduction of an intramedullary device, dual fixation methods to include both intramedullary nail and plate fixation as previously described may be an option in the initial treatment.[@CIT0030] In the situation of an unstable knee prosthesis without a stemmed component, revision to a stemmed component with or without supplemental metaphyseal fixation may be required ([Figure 6](#F0006){ref-type="fig"}). For unstable hip prostheses, treatment first includes revision to a longer distally engaging prosthesis. Definitive fracture fixation with plate and supplemental bone augmentation as needed should follow arthroplasty component revision. Interprosthetic sleeves can be useful to help bypass insufficient diaphyseal bone between stemmed prosthesis.[@CIT0034] Cortical strut augmentation in the setting of interprosthetic fractures has not been explicitly described; however, experience in the setting of single joint periprosthetic fracture can be extrapolated and applied to treatment of interprosthetic fractures with deficient bone stock or nonunion. In these situations, the cortical struts can serve to restore bone stock for noncircumferential loss of cortical bone, bypass stress risers, and further provide biological stabilization at the allograft-host bone interface.[@CIT0031]--[@CIT0033] Adjunct autograft or allograft bone graft can further provide osteoinductive and osteoconductive support to fracture healing.[@CIT0035] In certain circumstances where revision reconstructive options are limited due to substantial bone loss around a loose femoral or knee prosthesis, or where both prostheses are loose and reconstruction of both is not a viable option, revision to a total femur prosthesis may be required.[@CIT0003],[@CIT0005],[@CIT0014],[@CIT0025] Similarly, in the situation of multiple failed fracture fixation or persistent fracture non-union, revision to a megaprosthesis may provide a route to definitive treatment.[@CIT0003],[@CIT0005]Figure 6Anteroposterior (**A**) and lateral (**B**) radiographs of a stemmed revision total knee arthroplasty following periprosthetic fracture around a loose total knee prosthesis. Fixation is supplemented with a long lateral locking plate and several adaption plates to maximize screw purchase around the stems. Abundant callus is noted around the stem junction indicating a robust healing response. Authors' preferred management strategy {#S0006} ====================================== Our preferred algorithm combines a treatment approach based on the classification by Pires et al with adherence to strict surgical principles to maximize bony healing and restoration of limb alignment ([Figure 7](#F0007){ref-type="fig"}). For IFFs with stable hip and knee implants, fixation includes a long, lateral femoral locking plate that spans both implants by at least two cortical diameters. If the total knee prosthesis does not have a stem, the flare of the locking plate extends into the condylar region with multiple locking screws clustered around the implant wherever possible. In cases of cemented implants, diamond tip drill bits may be used to pierce the cement mantle to allow more screw fixation. The plate should be applied with minimal damage to periosteal soft tissues and extended proximally beyond the hip stem and secured with both unicortical and bicortical locking screws as appropriate. Cerclage cables may be applied through reliefs in the plate to supplement fixation. In cases of a loose femoral hip or knee component and a fracture distant from the implant, the arthroplasty is revised to a diaphyseal engaging femoral implant or stemmed knee prosthesis, respectively, and supplemented with plate osteosynthesis. If the fracture closely surrounds one implant that is determined to be loose, such that the fracture is distant from the other implant and without diaphyseal extension of the fracture, that singular component is revised with the fracture bypassed with revision component. In general, however, one should be liberal in the use of supplemental plate fixation to disperse contact forces across the entire femur and limit stress risers, especially in areas of bone with high modulus mismatch. If both implants are loose, further evaluation of the bone quality will dictate more extensive treatment options. If the bone quality is adequate, both components may be revised and an interprosthetic sleeve placed as an internal strut. Cortical strut grafting may be supplemented for additional biologic and structural fixation and secured in place with cerclage cables. If bone stock is inadequate, the patient may obtain the most benefit and a more expedient surgery with conversion to a total femur replacement.Figure 7Authors' preferred management strategy for fixation of interprosthetic femur fractures based upon fracture location, implant stability, and bone stock. Postoperative rehabilitation {#S0007} ============================ Following fixation of interprosthetic fractures, allowing bony union and maintaining a stable construct is of paramount importance. Therefore, patients are typically kept toe-touch weight-bearing (10%) with a walker for the first 6 weeks after surgery. Multimodal pain management strategies with acetaminophen, minimal and judicious opioid use, and various gabapentanoids are used. Use of nonsteroidal anti-inflammatory medications remains controversial regarding their effects on fracture healing, but often three doses of intramuscular or intravenous ketorolac may be given in the immediate postoperative period. Physical therapy begins on the day of surgery or first postoperative day and focuses on range of motion, strengthening, and gait training. At the 6-week visit, follow-up radiographs are obtained to assess the degree of healing. At that time patients may be progressed to partial weight-bearing and transitioned to full weight-bearing as tolerated over the next 2--4 months. Conclusion {#S0008} ========== Interprosthetic femur fractures represent a difficult clinical problem with a growing incidence in the face of more patients living with ipsilateral total hip and knee arthroplasties. When treating a patient with an IFF, careful consideration of the patient's preoperative medical status, implant type and stability, and surrounding bone stock will help guide treatment options. Standard fixation options include locking plates and screws, supplemental cerclage cables, with possible bone grafting. In cases of unstable or loose implants, techniques including intramedullary nails, interprosthetic sleeves, revision arthroplasty components, and graft material. Finally, total femur replacements or a megaprosthesis are typically reserved for patients with limited bone stock and loose implants. Disclosure {#S0009} ========== Dr Ran Schwarzkopf provided consultancy service to Smith & Nephew, holds stock options from Intelijoint, and involved in the Gauss Surgical Research for Smith & Nephew. The authors report no other conflicts of interest in this work.
{ "pile_set_name": "PubMed Central" }
Speaker to present on medicinal plants Laurel Redmon, diplomat in Chinese herbology, will host the 6:30 p.m. Tuesday meeting at Adams County Community Center. Her topic will be Medicinal Plants: what they are; what they do; how to use them. Redmon has been featured on Wisconsin Gardener with Shelly Ryan and has spoken at numerous herbal conferences. She is the owner of Red Sage Health in Madison, where she sees patients for acupuncture and herbal treatments. She studied Chinese medicine, including acupuncture and herbology, at the American College of Traditional Chinese Medicine in San Francisco and Five Branches Institute in Santa Cruz, Calif. She has taught classes on subjects from cooking with invasive plants to pulse diagnosis to herbal alternatives to pharmaceuticals, as well as wild culinary/medicinal mushroom identification and use. She has also written for and edited herb and aromatherapy books, and was a regular herb writer for the Oriental Medicine Journal. Go to www.redsagehealth.com. ADVERTISEMENT ADVERTISEMENT ADVERTISEMENT Email this article Speaker to present on medicinal plants Laurel Redmon, diplomat in Chinese herbology, will host the 6:30 p.m. Tuesday meeting at Adams County Community Center. Her topic will be Medicinal Plants: what they are; what they do; how to use A link to this page will be included in your message. Join Our Team! If you are interested in working for an innovative media company, you can learn more by visiting:
{ "pile_set_name": "Pile-CC" }
19 Cal.App.3d 1019 (1971) 97 Cal. Rptr. 419 THE PEOPLE, Plaintiff and Respondent, v. HARVEY LEWIS, JR., Defendant and Appellant. Docket No. 20427. Court of Appeals of California, Second District, Division Five. September 13, 1971. *1021 COUNSEL Jeannette Christy for Defendant and Appellant. Roger Arnebergh, City Attorney, and Howard Fox, Deputy City Attorney, for Plaintiff and Respondent. OPINION STEPHENS, Acting P.J. By misdemeanor complaint in two counts filed in the Los Angeles Municipal Court, defendant was charged in count I with violation of Penal Code section 148 (resisting, delaying and obstructing a public officer in the attempted discharge of his duties), and in count II, with violation of Penal Code section 415 (disturbing the peace). Defendant pleaded not guilty and after a jury trial was found guilty as charged. After the filing of a formal probation report, defendant was sentenced on each count to pay a fine, or to serve a specified number of days in county jail. An appeal was taken to the appellate department of the superior court, which affirmed the judgment (People v. Lewis, No. CR A 9645). Defendant then moved in the trial court for a stay of execution on the ground of indigency. After a hearing, the motion was denied and the previously imposed sentence was declared to be in full force and effect. Defendant then appealed to the superior court appellate department from the post judgment order denying relief. He also filed a petition for writ of habeas corpus and posted bond. The appellate department affirmed the order and on its *1022 own motion certified the appeal to this court pursuant to rule 63(a) and (c), California Rules of Court, to settle new and important questions of law, and we accepted the transfer. The questions so presented are: (1) What constitutes indigency of a defendant under the ruling in In re Antazo (1970) 3 Cal.3d 100 [89 Cal. Rptr. 255, 473 P.2d 999] that an indigent defendant cannot constitutionally be imprisoned for nonpayment of a fine? (2) Where the fact of indigency has been decided by the trial judge, does the appellate court reexamine the question de novo, or do the usual rules for reviewing a trial court's determination of fact apply? We agree with the conclusions and reasoning set forth in the opinion by Presiding Judge James G. Whyte concurred in by Associate Judges Martin Katz and Delbert E. Wong of the appellate department affirming the judgment; we therefore adopt that opinion here as our own. (1) "In re Antazo (1970) 3 Cal.3d 100 [89 Cal. Rptr. 255, 473 P.2d 999] establishes the rule that an indigent defendant cannot constitutionally be imprisoned for nonpayment of a fine. (See also Williams v. Illinois (1970) 399 U.S. 235 [26 L.Ed.2d 586, 90 S.Ct. 2018]; Tate v. Short (1971) 401 U.S. 395 [28 L.Ed.2d 130, 91 S.Ct. 668].) In all of these cases, the fact of defendant's indigency was conceded. "In the case at bench defendant was sentenced (on count I to pay a fine of $450 with an alternative of 45 days in jail, and on count II, to pay a fine of $25 with an alternative of two days in jail) on April 20, 1970. The judgment was affirmed on October 29, 1970 (Cr. A. 9645). No Antazo problems were raised on this appeal. "When the remittitur had been returned and the defendant was called into the trial court to satisfy the judgment, he made a motion to stay the alternative jail sentences on the ground that he was an indigent and therefore came under the Antazo rule. The trial court held a hearing and made an express finding `that the defendant is not an indigent at this time and was not an indigent at the time of sentence on April 20, 1970.' The court then reimposed the original sentence. "Defendant has again appealed, this time expressly limiting his appeal to that portion of the judgment which held he `was not an indigent person as defined in In re Antazo, 3 Cal.3d 100 [89 Cal. Rptr. 255, 473 P.2d 999].' "The cases referred to above make it clear that the rule announced, i.e., *1023 against imprisonment for nonpayment, applies only to indigents. In Antazo, the court said, `We have no doubt that this practice may properly be used to compel payment of fines in proper cases.' (3 Cal.3d 100 at p. 114.) In Tate v. Short, supra, Justice Brennan, speaking for the court, said, `We emphasize that our holding today does not suggest any constitutional infirmity in imprisonment of a defendant with the means to pay a fine who refuses or neglects to do so.' (401 U.S. 395 at p. 400 [28 L.Ed.2d 130, at p. 134].) "This leaves for determination two issues. What is indigency as used in Antazo? How is the fact of indigency determined? It is not strange that, the basic rule having been so recently established, we find no authoritative answer to either question. "Williams refers to `any individual who, by definition, is without funds.' (399 U.S. 235 at p. 242 [26 L.Ed.2d at p. 593].) However, the words `without funds' cannot be taken literally. A man with $500 in his pocket is not without funds, yet if he has no other property or source of income, he is just as unable to pay the second $500 of a $1,000 fine as a man with nothing is unable to pay a total fine of $500.[1] "In the field of appointment of counsel, we have some discussions of the meaning of indigency. In Williams v. Superior Court (1964) 226 Cal. App.2d 666, 672 [38 Cal. Rptr. 291], the court quotes with approval from 13 Stanford Law Review (at p. 545): `The standard applied is flexible, and contemplates consideration of such factors as amount of income, bank accounts, ownership of a home or car, outstanding debts, the number of dependents and the seriousness of the charge.' In People v. Ferry (1965) 237 Cal. App.2d 880 [47 Cal. Rptr. 324], the same language appears. (2) "While indigency as to payment of a fine involves somewhat different considerations, we can paraphrase these holdings and say: The standard to be applied is a flexible one and contemplates such factors as amount of income, bank accounts, ownership of a home, car, or other property, tangible or intangible, the number of dependents, the cost of sustenance for defendant and his dependents, and the amount of the fine. *1024 "Nor do we feel that indigency as to payment of a fine is an all or nothing proposition. Penal Code section 1205 provides that in misdemeanor cases, `a judgment that the defendant pay a fine may also direct that he pay the fine within a limited time or in installments on specified dates and that in default of payment as therein stipulated he be imprisoned in the discretion of the court either until the defaulted installment is satisfied or until the fine is satisfied in full.[2] (3) "We hold that for the purpose of applying the rule of Antazo, indigency is a fact to be determined like any other fact, and that it may be total or partial and, if partial, may be of indefinite duration or temporary. (4) "We also feel that the original determination of this fact should be for the trial judge. Trial judges are in the best position administratively to determine these matters. (See Williams v. Superior Court, supra, 226 Cal. App.2d 666 at p. 672; Morris v. Schoonfield (1969) 301 F. Supp. 158 (vacated on other grounds, 399 U.S. 508 [26 L.Ed.2d 773, 90 S.Ct. 2232]); State v. MacGregor (1968) 5 Conn. Cir. 298 [250 A.2d 721]; State v. DeJoseph (1966) 3 Conn. Cir. 624 [222 A.2d 752].) Where a proper hearing has been conducted by the trial judge, if there is any substantial evidence to support his finding, it should be affirmed. This is the usual rule for appellate review and we see no reason for departing therefrom.[3] (5) "Tested by the foregoing rules, we affirm the decision of the trial court. A full hearing on the question of indigency was held. There was evidence that defendant had a job (he testified his income therefrom was approximately $490 per month, but he had previously admitted to his probation officer an income of around $1,000 per month); that he had a car in which there was some equity; that he had a small bank account; that for the last two years, his income tax returns showed an income of a little over $5,000; that he had paid his own bond premiums and attorney's fees in this litigation. While there was evidence of some payment for child support (the children lived with their mother, whose income was nearly *1025 as large as defendant's), and considerable indebtedness, the trial court was not obliged to allow him to prefer his creditors to his obligation to the state for payment of his fine, nor was it required to allow defendant the same standard of living to which he had become accustomed. Under these circumstances, although the trial judge could have allowed installment payments,[4] we cannot say his refusal to do so and his finding of ability to pay the fine was without evidentiary support." The order is affirmed. Aiso, J., and Reppy, J., concurred. NOTES [1] "In neither case should the presently unpayable portion of the fine be remitted (64 Mich. L.Rev. 938, 944.) `The State is not powerless to enforce judgments against those financially unable to pay a fine; indeed, a different result would amount to inverse discrimination since it would enable an indigent to avoid both fine and imprisonment for nonpayment whereas other defendants must always suffer one or the other conviction.' (Williams v. Illinois, supra, p. 595 [26 L.Ed.2d].) The fine became a lien against defendant (Pen. Code, § 1206). Should the defendant later become able to pay, it may be enforced by execution as a civil judgment (Pen. Code, § 1215). In an appropriate case, deferred or installment payments may be enforced (Pen. Code, § 1205). [2] "The sociological and penological advantages of delayed and installment payments is ably discussed in Fines, Imprisonment and the Poor: Thirty Dollars or Thirty Days, 57 Cal.L.Rev. 778, 816-819. [3] "We are aware of the cases holding that the `constitutional fact' of obscenity must be reexamined de novo at all levels, trial and appellate. However, we see no reason for extending this de novo examination of `constitutional fact' to all facts which may give rise to any claim of constitutional deficiency. While People v. Ferry, supra, (1965), 237 Cal. App.2d 880 [47 Cal. Rptr. 324] discusses the matter of indigency as applying to right to counsel as if it were for original determination by the appellate court, the question of which court should determine the fact of indigency was not raised. In any event, the entire discussion is dicta because the court determined that defendant had had counsel and by voluntarily releasing him, waived any further right to appointment. [4] "Counsel's argument was that defendant `really cannot afford to pay the entire fine at the present time.' The court was thus alerted to the possibility of deferred or installment payments. We cannot assume that the trial judge was unaware of the provisions of Penal Code section 1205 referred to above. His action can only be viewed as a refusal to adopt such procedure."
{ "pile_set_name": "FreeLaw" }
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>com.apple.security.app-sandbox</key> <true/> <key>com.apple.security.application-groups</key> <array> <string>$(TeamIdentifierPrefix)gifski_video_share_group</string> </array> <key>com.apple.security.files.user-selected.read-write</key> <true/> <key>com.apple.security.network.client</key> <true/> </dict> </plist>
{ "pile_set_name": "Github" }
Q: iTunes Connect Developer reject - Delete non needed build / binaries I uploaded a binary update to the new iTunesConnect, but had not submitted. I found a bug, and tried to remove the existing binary - could not find a way to do that. I then submitted for review but immediately rejected it. Still can't see a way to delete it and upload a new one. How do I upload my new version? OR how do I cancel my update and start a new one? A: You don't need to reject Binaries anymore, just change the BUILD number to something higher 1.1 or something like that. Leave the version number the same. Then it will let you upload it and you can select it from the list to add to your submission. It took me a while to figure out but it's actually a much better solution than all the rejecting nonsense. A: You can't delete binaries that are uploaded. I had to developer reject couple of times and I had 6 of these binaries there. I selected one and then submittted for approval. There is no way to remove "non-needed" ones from this list
{ "pile_set_name": "StackExchange" }
Acidification of non-medicated and oxytetracycline-medicated cattle manures during anaerobic digestion. Possible adverse effects of a commonly used veterinary antibiotic, oxytetracycline (OTC), on acidogenic phase of anaerobic digestion of cattle manure along with optimum operating conditions were investigated. A standard veterinary practice of 50 ml OTC solution (20 mg/kg cattle weight) was injected into the muscles of cattle and then manure samples were collected for 5 days following the injection. The 5-day samples were equally mixed and used throughout digestion experiments. Preliminary batch tests were conducted to obtain the optimum pH range and observe volatile fatty acids (VFAs) production. In this regard, different sets of batch digesters were operated at pH ranging from 5.2 +/- 0.1 to 5.8 +/- 0.1 at mesophilic conditions with total solids content of 6.0 +/- 0.2%. The pH of 5.5 +/- 0.1 was found to be the optimum value for acidification for both non-medicated and OTC-medicated conditions. Under predetermined conditions, maximum total VFA (VFAtot) of 830 +/- 3 mg (as acetic acid)/L was produced and maximum acidification rate was evaluated as 11% for OTC-medicated cattle manure, whereas they were 900 +/- 6 mg (as acetic acid)/L and 12% for non-medicated manure. Digestion studies were further continued in a semi-continuous mode at pH 5.5 +/- 0.1 and SRT/HRT of 5 days. VFAtot concentrations and maximum acidification rate increased up to 2181 +/- 19 mg (as acetic acid)/L and 29% for non-medicated cattle manure. For OTC-medicated cattle manure, lower acidification rate of 18% was observed.
{ "pile_set_name": "PubMed Abstracts" }
Medical Professionals Simplex Health implements the functional medicine model into healthcare settings by administering nutrition protocols and behavioral therapies that align with The Institute of Functional Medicine. Simplex Health provides insurance-funded, anytime access to a personalized team of dietitians with physician supervision who guide your patients through evidence-based protocols. Simplex Health's services can be accessed nationally with virtual live telehealth appointments or in person at their offices located throughout greater Philadelphia.
{ "pile_set_name": "Pile-CC" }
[Clinical usefulness of troponin I in acute pulmonary embolism]. Troponin-I (cTp-I) is considered a sensitive biomarker of myocardial injury in acute pulmonary thromboembolism (PE) with prognosis implications, though abnormal levels vary among reports. cTp-I was measured in consecutive patients objectively diagnosed of PE by means of pulmonary angiography made with helicoidal CT. Patients were classified radiologically as central or peripheral PE and hemodynamically as massive, submassive or non-massive according to the pulmonary vessel occluded and systolic blood pressure and ProBNP levels respectively. We checked also the delay in diagnosis (DD) and 30-days all-causes mortality rate. We evaluated 164 patients; the mean age was 70 (15) years, males: 76 (46%). Median DD was 5 [interquartile range (IQ) 12) days. Median cTp-I in patients with DD>5 was 0.003microg/L (IQ 0.072)microg/L while in patients with DD<5 was 0.05microg/L (IQ 0.096) (p<0.05). cTp-I higher than 0.5microg/L occurred in 11 (7%) patients. Levels of cTp-I higher than 0.03microg/L were associated with central PE, (AUROC 0.7059 CI95% 0.6643-0.7475, sensitivity 0.75, specificity 0.69, PPV 0.75 and NPV 0.69) and massive and submassive PE (AUROC 0.7685, CI95% 0.7288-0.8082 sensitivity 0.86, specificity 0.66, PPV 0.72 and NPV 0.82), but they were not associated with mortality (AUROC 0.5394). In a multivariate analysis cTp-I did not show to be an independent predictor of central, massive and submassive PE or all causes death. In this study cTp-I was not a proper biomarker of the size of pulmonary vessel occluded, the degree of hemodynamic derangement or short-term mortality. The delay in diagnosis could influence the usefulness of cTp-I.
{ "pile_set_name": "PubMed Abstracts" }
Scott Harrison — Flipping Your Story 180 Degrees The story of a man who went from selling $800 bottles of champagne and $500 bottles of vodka as a New York City nightclub promoter to raising millions of dollars to provide clean water to people in need as the founder and CEO of charity: water. “Working in an organization with your vocation doesn’t need to be a bummer, i don’t think you need to walk around with a sad face all day because you’re working in conditions of extreme poverty.”
{ "pile_set_name": "Pile-CC" }
Since Version > 5.3.0 we follow semantic versioning. See the [releases](https://github.com/Vydia/react-native-background-upload/releases) page on GitHub for information regarding each release.
{ "pile_set_name": "Github" }
Developing procedures for students who refuse to care for a client. The Human Immunodeficiency Virus (HIV) epidemic raised the question of whether students should be required to provide nursing care for clients whose diagnosis for treatments may jeopardize students' health. Policies and procedures are necessary to provide a framework of functioning for faculty and students when students question their patient assignment. The author explains one institution's successful process for developing a procedure which can facilitate development of appropriate policies and procedures by other faculty.
{ "pile_set_name": "PubMed Abstracts" }
What to do if you have to switch doctors Insurance changes related to Obamacare are forcing some boomers to find new MDs Under the Affordable Care Act, many people will be getting regular access to doctors for the first time. But some others who’ve built relationships with doctors that have lasted years are finding that they won’t be able to keep seeing those same physicians, because of changes to their health coverage—and that can be unsettling. In an interview last week with the website WebMD, President Obama distanced himself from his earlier assertions (made before many provisions of the ACA took effect) that, if you like your doctor, you can keep your doctor under Obamacare. The president acknowledged that some folks may, in fact, have to switch physicians. Nobody knows exactly how many people will need to find new doctors, but reports suggest that thousands of patients may be in that boat, either now or in a few years. Many consumers found out last fall that their individual insurance policies didn’t comply with the ACA and would be canceled (though the Obama administration recently announced an extension that would give customers the option to renew those policies for two more years). Shutterstock.com Is your health coverage requiring you to switch physicians? If so, the more information you can give your new doctor about your health history, the better off you’ll be. When folks sign up for an individual insurance plan—either by the deadline of March 31, or whenever their existing plan expires—many will find their new plan options include fewer doctors and hospitals than they could access before. This is because many insurers have limited the number of health-care providers in their plans in an effort to keep premium rates down, a strategy known as “narrowing the network.” These changes are affecting even individual-plan customers who buy health insurance outside of the Obamacare exchanges—and they could eventually trickle down to people who get group health insurance through their jobs. While a healthy 20-something may have no qualms going to a new doctor for the occasional checkup, the story can be quite different for a boomer who has chronic conditions and has been seeing the same doctor for years. “Disrupting these long-term relationships can be like a divorce,” said Dr. Albert Wu, a practicing doctor of internal medicine and director of the Center for Health Services & Outcomes Research at the Johns Hopkins Bloomberg School of Public Health. HMOs v. PPOs Each state’s Obamacare insurance exchange includes a mix of insurers, some of which participated in that state’s pre-ACA market and some of which are new to the market. Of those that had been in the market previously, some built new networks of doctors and hospitals, while some relied on their prior networks. Nearly half of the plans offered by new entrants nationwide are health maintenance organizations, or HMOs, according to a report last fall by McKinsey & Company. HMOs typically restrict patients to a limited number of doctors and hospitals that belong to their network. (An exception is usually made in cases that require an emergency-room visit.) Among the established insurers on the exchanges, 35% of the plan offerings are HMOs and 35% are preferred provider organizations, or PPOs, which offer a wider choice of doctors and hospitals. HMO premiums are generally lower than PPO premiums. Some of the other, less-common options also involve restricted networks of providers or higher charges for out-of-network coverage. Narrow networks aren’t unique to the Obamacare exchanges, experts say. Choices in the outside market for individual and family insurance coverage largely mirror the offerings on the exchanges, said Carrie McLean, director of customer care for eHealth EHTH, -3.20% parent company of eHealthInsurance.com, the country’s largest private online health exchange for individual and family health insurance. Last year, 8.6% of all individual and family plans that eHealth sold were HMOs, versus 36% so far this year, McLean said. This sharp rise is largely a function of the changing composition of the market, McLean said. The outside market is changing because the ACA included provisions to ensure that the inside and outside markets remain similar, partly to prevent the exchanges from attracting only the sickest customers, a phenomenon known as “adverse selection.” Indeed, many Americans would be willing to pay more for a health plan that allowed them to visit a broader range of doctors and hospitals, according to last month’s Kaiser Health Tracking Poll. Wider choice appeals somewhat more to boomers: Of the poll respondents, 52% of those ages 50 to 64 preferred paying more for more choice, versus 47% of those ages 18 to 29. To be sure, most states have a mix of HMOs and PPOs for consumers to choose from. Consumers who face an ample range of choices need to weigh their priorities: a plan that allows you to keep your existing doctors might cost more than one that doesn’t. Double-checking your doctor Simply verifying that their doctor takes a new insurance plan has proved vexing for many consumers, experts say. Links to online provider directories have sometimes not worked on the state exchanges, or have reflected out-of-date information. What’s more, because of the way doctors’ contracts are written with insurers, it’s possible some doctors may not even realize they’re participating in a given plan, said Ken Steele, senior manager in San Francisco with ECG Management Consultants, a consulting firm to the health-care industry. Intraday Data provided by SIX Financial Information and subject to terms of use. Historical and current end-of-day data provided by SIX Financial Information. Intraday data delayed per exchange requirements. S&P/Dow Jones Indices (SM) from Dow Jones & Company, Inc. All quotes are in local exchange time. Real time last sale data provided by NASDAQ. More information on NASDAQ traded symbols and their current financial status. Intraday data delayed 15 minutes for Nasdaq, and 20 minutes for other exchanges. S&P/Dow Jones Indices (SM) from Dow Jones & Company, Inc. SEHK intraday data is provided by SIX Financial Information and is at least 60-minutes delayed. All quotes are in local exchange time.
{ "pile_set_name": "Pile-CC" }
[CA 125 kinetic pattern during chemotherapy]. CA 125 is a tumoral marker that may be elevated in non-mucinous epithelial ovarian tumours but is neither sensitive nor specific enough to be used as a diagnostic tool. However, CA 125 serum concentration variations can be used for monitoring chemotherapy efficiency. This study concerns 75 patients who underwent surgical cytoreduction followed by a first-line chemotherapy during which CA 125 serum variations were studied by calculation of mono-exponential regression curve slopes and half-lives. Persistence of tumour residues was evaluated by tomodensitometry and/or second look surgery du- ring a post-chemotherapy check-up. A slope > - 0,0156 (half-life > 44,43 d) was a perfect predictor of a persistent tumour; conversely, a slope < - 0,0340 (half-life < 20,39 d) reliably predicted the absence of detectable tumour. We propose a personalised graphic representation of CA 125 variations to follow-up chemotherapy. Its exploitation, in association with classical prognostic factors, could improve patients monitoring.
{ "pile_set_name": "PubMed Abstracts" }
The season-long battle between UCLA and USC for supremacy in beach volleyball ended Sunday with a dominant victory for the Bruins in Gulf Shores, Ala. In the sixth meeting of the year between the rivals, UCLA did not lose a set in a 3-0 victory that gave the Bruins their second NCAA championship in a row. UCLA and USC spent most of the season as the top-ranked teams in the nation. The Bruins finished at 35-3, their only losses coming to the Trojans. USC (32-6) lost its first two matches against UCLA before winning three in a row, including two last week in the Pac-12 Conference tournament. The Pac-12 championship earned the Trojans the top seed at the NCAA tournament. USC lost its first match, to eighth-seeded Stetson, forcing the Trojans to win four elimination matches over three days to reach the title game. After dropping the first two points to Louisiana State on Sunday morning, USC rallied for a 3-2 victory that put the Trojans in the title match. The Bruins, the only team to go unbeaten in the NCAA tournament, overpowered the Trojans in the final. UCLA got its first point at the No. 5 pairs, Izzy Carey and Lindsey Sparks beating Cammie Dorn and Mollie Ebertin 21-16, 21-15. Moments later, the Bruins’ No. 4 pair of Madi Yeomans and Savvy Simo defeated Joy Dennis and Maja Kaiser 21-16, 21-19. After a break, the top three pairs squared off. The Bruins won the first set on all three courts. The Bruins’ No. 3 pair, Abby Van Winkle and Zana Muno, clinched the title with a 22-20, 21-13 win over Haley Hallgren and Alexandra Poletto. In the four seasons that beach volleyball has been an NCAA sport, UCLA and USC are the only schools to win the national championship. The Trojans won the title in 2016 and 2017. Sign up for our daily sports newsletter » 📸🏆 Frame it, hang it, here are your 2019 NCAA Beach Volleyball National Champions, the UCLA Bruins!#NCAABeachVB pic.twitter.com/5is1TFtfL4 — NCAA Volleyball (@NCAAVolleyball) May 5, 2019 eric.sondheimer@latimes.com Twitter: @latsondheimer
{ "pile_set_name": "OpenWebText2" }
An independent biomechanical revealed that for both Shillingford's standard off-break delivery and his doosra, the amount of elbow extension in his bowling action exceeded the 15 degrees' level of tolerance permitted under the ICC Regulations for the Review of Bowlers Reported with Suspected Illegal Bowling Actions. The 30-year-old is suspended from bowling in international cricket until such time that he has submitted to a fresh analysis, which concludes that he has remedied his bowling action. Meanwhile, the independent biomechanical analysis of the bowling action of Marlon Samuels concluded that his standard off-break delivery was bowled with a legal action, but that his quicker deliveries exceeded the 15 degrees’ level of tolerance and thus, were considered to be illegal. While Samuels is allowed to continue to bowl his standard off-break delivery in international cricket, he is not permitted to bowl his quicker deliveries. The independent analyses of Shillingford and Samuels were performed by Associate Professor Jacque Alderson and her team at the School of Sport Science, Exercise and Health, The University of Western Australia (UWA) in Perth on November 29. The players have the right to appeal against UWA’s conclusions to the Bowling Review Group. Should they choose to appeal, they must lodge written notification with the ICC within 14 days. Shillingford and Samuels were reported at the end of the second day’s play in the Mumbai Test against India last month by on-field umpires Richard Kettleborough and Nigel Llong, TV Umpire Vineet Kulkarni and Andy Pycroft of the Emirates Elite Panel of ICC Match Referees. Both bowlers have previously been reported and suspended from bowling in international cricket after their bowling actions were found to be illegal. They were, however, allowed to resume bowling in international cricket after they underwent remedial work on their bowling actions and further testing. Shillingford was reported in November 2010 and resumed bowling in June 2011, while Samuels was reported in February 2008 and resumed bowling in September 2011.
{ "pile_set_name": "OpenWebText2" }
Q: Xcode Discarded All Changes without commiting I accidentally hit discard all changes without having "committed". It seems to have wiped all the info from the app when I open Xcode but when I go to the project file all the .swift files and icons are still there. I have tried what the other posts have said about all of the git commands but this seems to be different. Since discarding changes brought me to the starting part of the project but I still have all the files from before is there an alternative way I can restore my program? A: Can't you just re-add the files in Xcode? Xcode > File > Add files to... ... and reselect the .swift files / icons that you mentioned that are still there.
{ "pile_set_name": "StackExchange" }
Brilliant discussion with Annie this lunchtime about the role of the media. Lots for me to take in and regurgitate for you guys. All good. Expect a post or two tomorrow… however, for now, you might want to cast your eyes over this excellent little article on Politico.eu: The Press Has Lost It. If you wanted to make the link between Murdoch and the politics of The Sun, you’d be hard pressed to find a better quotation to explain the reason than this one: Miliband was the first to call for the resignation of Murdoch’s British CEO, Rebekah Brooks, and for News Corp’s billion dollar bid for the remaining shares in BSkyB to be withdrawn. This knowledge goes a long way to explain why the English edition has supported the Tories: …while the Scottish edition has supported the SNP: In essence, neither supports Labour… can you — having read the Politico.eu article — work out why? And a wee bit of historical info from the previous year when, presumably, the News of the World were still actively trying to find scoops by hacking. Can you think why they may have felt justified? (And which perspective is this related to?)
{ "pile_set_name": "Pile-CC" }
Looking for Q & A Questions This past week I hit 1000 subscribers on YouTube. I’m super excited and thankful to anyone who watches my videos. I’m going to be recording a Q&A video next weekend, if you have any questions you’d like me to answer you can leave them as a comment either here or on the video below.
{ "pile_set_name": "Pile-CC" }
Effects of blood pressure and sex on heart-vessel coupling in essential hypertension. This study aimed to investigate the impact of blood pressure and sex on heart-vessel coupling in patients with essential hypertension via ultrasound. We studied 76 patients with essential hypertension (48 males and 28 females) and 65 healthy controls (33 males and 32 females). Coupling parameters were obtained using ultrasound technology combined with brachial artery blood pressure measurement. The Ea and Ees were higher in the hypertension group than in the control group (P < 0.01), with no statistically significant difference in Ea/Ees between the two groups (P >0.05). After subjects were classified by sex, the Ea and Ees of males and females in the hypertension group were higher than those in the control group (P < 0.05 or P < 0.01), while the Ea/Ees was lower in hypertensive females than in control females (P < 0.05). In female patients with essential hypertension, heart-vessel coupling was easily damaged, and systolic blood pressure was associated with heart-vessel coupling damage to some extent.
{ "pile_set_name": "PubMed Abstracts" }
A huge fire heavily damaged a historic church in New York City on Sunday. Authorities reported one minor injury in the blaze that started just before 7 p.m. at the Serbian Orthodox Cathedral of St. Sava in Manhattan. Three hours later the main body of the fire was knocked down, but firefighters were still putting out small pockets of flames, officials said. Authorities did not say what caused the fire. Father Djokan Majstorovic said he felt like he was “in a nightmare” as he tried to get to the fire scene that was blocked off by firefighters. The blaze completely destroyed the roof of the Gothic Revival-style building. It broke out on the same day Orthodox Christians around the world celebrated Easter. The church website listed services that morning and an Easter luncheon at 1 p.m. “Heartbroken. I was just inside that building three hours ago,” Dex Pipovic told PIX11 News. He said he had been attending the church for the last seven years. The church was designed by architect Richard M. Upjohn and built in the early 1850s. One of its earlier congregants was novelist Edith Wharton, who wrote “The Age of Innocence.” She was married in the church in 1885. The Serbian Orthodox Church purchased the building from the Episcopal Diocese of New York in 1943. The building was designated a city landmark in 1968. ALSO A weeping ‘miracle’ icon is drawing thousands of pilgrims to an Illinois church Why most of the freight engines that Metrolink is leasing to improve safety are sitting idle China’s plan for a modern Silk Road would cost even more than the Marshall Plan that rebuilt Europe
{ "pile_set_name": "OpenWebText2" }
Single-Page Applications (SPAs) improve user experience by offering rich UI interactions, fast feedback, and the relief of knowing you don’t need to download and install a traditional application. Browsers are now operating systems and websites are apps. While a SPA isn’t always the answer, for apps that rely on snappy user interaction they are increasingly common. To the end user, a well-designed SPA feels like rainbows and unicorns. From the developer perspective, reality can often be the opposite. Tough problems long-since solved on the backend like authentication, routing, state management, data binding, etc. all become time-consuming frontend challenges. Luckily for us, JavaScript frameworks like Vue, React, and Angular exist to help us craft powerful applications and allow us to focus our time on critical functionality and not reinventing the wheel. About Vue.js Who better to describe Vue than its creator, Evan You? Vue (pronounced /vjuː/ , like view) is a progressive framework for building user interfaces. It is designed from the ground up to be incrementally adoptable, and can easily scale between a library and a framework depending on different use cases. It consists of an approachable core library that focuses on the view layer only, and an ecosystem of supporting libraries that helps you tackle complexity in large Single-Page Applications. Here are some of Vue’s benefits: A gentle learning curve and low barrier to entry Provides the ability to bootstrap your app with vue-cli , saving you the hassle of setting up webpack and complex build pipelines , saving you the hassle of setting up webpack and complex build pipelines Explosive community growth! Vue now has more stars on GitHub than both React and Angular It’s flexible enough to adopt at a reasonable pace, component by component Create Your Vue + Go App In this tutorial, you’ll create a single-page application that allows users to show their love for open source projects on GitHub. For the frontend you’ll use Vue and popular tooling like vuex , vue-cli , vuetify , and vue-router . On the backend, you’ll use Go to build a REST API and persist your data in MongoDB. Authentication and user management can be a major pain, so you’ll use JSON Web Token (JWT) based authentication when making requests from the SPA and Okta’s Go JWT Verifier as a middleware on your backend to validate the user’s token on every request. Once complete, users will be able to authenticate via OpenID Connect (OIDC), search for projects on GitHub, favorite those projects, and even write notes about their favorite projects! PS: Don’t want to follow the tutorial? That’s fine! Feel free to check out the finished repository on GitHub. Create the Vue and Go Directory Structure For the sake of simplicity, let’s create the REST API and SPA in the same project, starting with a project directory in the Go workspace. Go projects live inside the directory the environment variable $GOPATH points to. In order to find the current $GOPATH value, run: go env GOPATH . To learn more about GOPATH, including how to set it yourself, refer to the official Go documentation on the topic. If you’re totally new to Go, check out this article in order to understand how projects are organized inside the GOPATH directory. With the GOPATH defined, you now can create a directory for your project: mkdir -p $GOPATH /src/github.com/ {{ YOUR_GITHUB_USERNAME } } /kudo-oos To get your SPA off the ground quickly you can leverage the scaffolding functionality of vue-cli. The CLI will prompt you with a series of options - pick the technology appropriate for this project: vue.js , vuex , and, webpack . Install vue-cli by running: yarn global add @vue/cli If you aren’t familiar with yarn, it’s an alternative to npm that many developers like. To install yarn you can run the command below: npm install -g yarn Next, create a new Vue project: mkdir -p pkg/http/web cd pkg/http/web vue create app You will be prompted with a series of questions about the project’s build details. For this app pick all the default choices. Congratulations, you have created your Vue.js SPA! Try it out by running: cd app yarn serve Open this URL: http://localhost:8080 in your browser and you should see the following. Next, let’s make your SPA modern and responsive using vuetify . Add Vuetify Vuetify is a collection of Vue.js components that abstract Material Design’s concepts. Vuetify provides out-of-the box features including a grid system, typography, basic layout, and also components like cards, dialogs, chips, tabs, icons and so on. Vuetify will pave your way to a rich UI! When installing vuetify, you will be prompted with a series of questions. For the sake of simplicity just go with the default choices again. vue add vuetify Spin up your SPA again to see vuetify in action. yarn serve Add Authentication to Your Vue App with Okta Writing secure user auth and building login pages are easy to get wrong and can be the downfall of a new project. Okta makes it simple to implement all the user management functionality quickly and securely. Get started by signing up for a free developer account and creating an OIDC application in Okta. Once logged in, create a new application by clicking “Add Application”. Select the “Single-Page App” platform option. The default application settings should be the same as those pictured. Next, install the Okta Vue SDK by running the following command: yarn add @okta/okta-vue@1.0.7 Install Your Vue App Dependencies You’ll also need to install a few dependencies in order to get your project running. Run the following command to install them all: yarn add vue-router@3.0.2 yarn add vuex@3.0.1 yarn add axios@0.18.0 vue-router is a popular routing tool you’ll be using to manage app routes vuex is what you’ll be using to manage page state axios is a popular HTTP library that lets you easily make REST calls Create Your Vue App Routes For this app, you need only 4 routes, all of which require authentication except for the login route. The root route / is our landing page where the login component will be rendered. Once the user has authenticated, we’ll redirect them to the /me route where the bulk of the functionality takes place: the user should be able to query for OSS projects via GitHub’s REST API, favorite projects returned from the query, see more details about the project, and leave a note describing why the project is important to them. Take note that both the /me and repo/:id have a meta: { requiresAuth: true } property specifying that the user must be authenticated to access that area of the app. The Okta plugin will use it to redirect the user the Okta login page if not authenticated. First, install the vue-router and vuex packages. Now create pkg/http/web/app/src/routes.js and define the following routes: import Vue from ' vue ' ; import VueRouter from ' vue-router ' ; import Auth from ' @okta/okta-vue ' import Home from ' ./components/Home ' ; import Login from ' ./components/Login ' ; import GitHubRepoDetails from ' ./components/GithubRepoDetails ' ; Vue . use ( VueRouter ); Vue . use ( Auth , { issuer : ' {{ OKTA_ORG_URL }}/oauth2/default ' , client_id : ' {{ OKTA_APP_CLIENT_ID }} ' , redirect_uri : ' http://localhost:8080/implicit/callback ' , scope : ' openid profile email ' }) export default new VueRouter ({ mode : ' history ' , routes : [ { path : ' / ' , component : Login }, { path : ' /me ' , component : Home , meta : { requiresAuth : true }}, { name : ' repo-details ' , path : ' /repo/:id ' , component : GitHubRepoDetails , meta : { requiresAuth : true } }, { path : ' /implicit/callback ' , component : Auth . handleCallback () } ] }); Calling Vue.use(Auth, ...) will inject an authClient object into your Vue instance which can be accessed by calling this.$auth anywhere inside your Vue instance. This is what you’ll use to make sure a user is logged in and/or to force the user to identify themself! Be sure to replace {{ OKTA_ORG_URL }} with your Okta ORG Url. This value can be found on the dashboard page of your Okta dashboard (pictured below). You’ll also need to replace {{ OKTA_APP_CLIENT_ID }} with your new Okta app’s Client ID. If you navigate back to your newly created application, you should see your Client ID value on the General tab (pictured below). Also, note the hard-coded value http://localhost:8080/implicit/callback . If you later change your app to run in production on a different URL, you’ll need to modify this value. A best practice is to store this value in an environment variable so that depending on where you’re running your application, the correct value will be used. Create the Vue Components The vue-router library contains a number of components to help developers create dynamic and rich UIs. One of them, router-view , renders the component for the matched route. In our case, when the user accesses the root route / , vue-router will render the Login component as configured in routers.js . Open ./kudo-oos/pkg/http/web/app/src/App.vue and copy in the following code. < template > < v - app > < router - view >< /router-view > < Footer /> < /v-app > < /template > < script > import Footer from ' @/components/Footer.vue ' export default { name : ' App ' , components : { Footer }, data () { return {} } } < /script > For every route other than the matched route component, Vue will render the Footer component. Create ./kudo-oos/pkg/http/web/app/src/components/Footer.vue and copy in the following code to create that footer component. < template > < v - footer class = " pa-3 white--text " color = " teal " absolute > < div > Developed with ❤️ by {{ YOUR_NAME }} & copy ; {{ new Date (). getFullYear () }} < /div > < /v-footer > < /template > Next, create the Login component. Create ./kudo-oos/pkg/http/web/app/src/components/Login.vue and copy in the following code. < template > < v - app id = " inspire " > < v - content > < v - container fluid fill - height > < v - layout align - center justify - center > < v - flex xs12 sm8 md4 > < v - card class = " elevation-12 " > < v - toolbar dark color = " teal " > < v - toolbar - title justify - center > Login < /v-toolbar-title > < /v-toolbar > < v - card - text > < v - btn @ click . prevent = " login " color = " primary " > Sign in with Okta < /v-btn > < /v-card-text > < /v-card > < /v-flex > < /v-layout > < /v-container > < /v-content > < /v-app > < /template > < script > export default { data () { return {}; }, async mounted () { const isAuthenticated = await this . $auth . isAuthenticated (); isAuthenticated && this . $router . push ( ' /me ' ); }, methods : { login () { this . $auth . loginRedirect ( ' /me ' ) } } } < /script > This Login component will eventually render a “sign in” button on the homepage of the website when a user visits the page who isn’t logged in. The home page will eventually look like the following: Once the login component is rendered, the user will be redirected to the login page after clicking the sign in button. And after a successful login, the user will be redirected back to your application to the configured route. In our app, that’s the /me route. The /me route was configured to render the Home component, which in turn renders the Sidebar, Kudos, and Search tabs. Each tab renders a specific set of GitHubRepo s. Let’s create these components now. Create the ./kudo-oos/pkg/http/web/app/src/components/Home.vue component. < template > < div > < SearchBar defaultQuery = ' okta ' v - on : search - submitted = " githubQuery " /> < v - container grid - list - md fluid class = " grey lighten-4 " > < v - tabs slot = " extension " v - model = " tabs " centered color = " teal " text - color = " white " slider - color = " white " > < v - tab class = " white--text " : key = " 2 " > KUDOS < /v-tab > < v - tab class = " white--text " : key = " 1 " > SEARCH < /v-tab > < /v-tabs > < v - tabs - items style = " width:100% " v - model = " tabs " > < v - tab - item : key = " 2 " > < v - layout row wrap > < v - flex v - for = " kudo in allKudos " : key = " kudo.id " md4 > < GitHubRepo : repo = " kudo " /> < /v-flex > < /v-layout > < /v-tab-item > < v - tab - item : key = " 1 " > < v - layout row wrap > < v - flex v - for = " repo in repos " : key = " repo.id " md4 > < GitHubRepo : repo = " repo " /> < /v-flex > < /v-layout > < /v-tab-item > < /v-tabs-items > < /v-container > < /div > < /template > < script > import SearchBar from ' ./SearchBar.vue ' import GitHubRepo from ' ./GithubRepo.vue ' import githubClient from ' ../githubClient ' import { mapMutations , mapGetters , mapActions } from ' vuex ' export default { name : ' Home ' , components : { SearchBar , GitHubRepo }, data () { return { tabs : 0 } }, computed : mapGetters ([ ' allKudos ' , ' repos ' ]), created () { this . getKudos (); }, methods : { githubQuery ( query ) { this . tabs = 1 ; githubClient . getJSONRepos ( query ) . then ( response => this . resetRepos ( response . items ) ) }, ... mapMutations ([ ' resetRepos ' ]), ... mapActions ([ ' getKudos ' ]), }, } < /script > < style > . v - tabs__content { padding - bottom : 2 px ; } < /style > SearchBar is the first component rendered in Home . When the user enters a query into the text input in the Sidebar , this component triggers a call to the Github API. SearchBar simply emits an event to its parent, Home , which contains the githubQuery . Create ./kudo-oos/pkg/http/web/app/src/components/SearchBar.vue and copy in the following code: < template > < v - toolbar dark color = " teal " > < v - spacer >< /v-spacer > < v - text - field solo - inverted flat hide - details label = " Search for your OOS project on Github + Press Enter " prepend - inner - icon = " search " v - model = " query " @ keyup . enter = " onSearchSubmition " >< /v-text-field > < v - spacer >< /v-spacer > < button @ click . prevent = " logout " > Logout < /button > < /v-toolbar > < /template > < script > export default { data () { return { query : null , }; }, props : [ ' defaultQuery ' ], methods : { onSearchSubmition () { this . $emit ( ' search-submitted ' , this . query ); }, async logout () { await this . $auth . logout () this . $router . push ( ' / ' ) } } } < /script > Thanks to @keyup.enter="onSearchSubmition" , whenever the user hits enter onSearchSubmition emits search-submitted with the query value. How do we capture this event you may ask? Simple! On the Home component, when you mounted the Sidebar component you also added a “listener” v-on:search-submitted="githubQuery" that calls githubQuery on every search-submitted event. The Sidebar is also responsible for logging the user out. Okta Vue SDK offers a handy method to clean up the session using the method this.$auth.logout() . Whenever the user logs out, they can be redirected to the login page. The second component rendered in Home is the GithubRepo . This component is used inside two tabs: the first tab Kudos represents the user’s favorites OSS projects and the Search tab renders the OSS projects returned from GitHub. Create ./kudo-oos/pkg/http/web/app/src/components/GithubRepo.vue and copy in the following code: < template > < v - card > < v - card - title primary - title > < div class = " repo-card-content " > < h3 class = " headline mb-0 " > < router - link : to = " { name: 'repo-details', params: { id: repo.id }} " > {{ repo . full_name }} < /router-link > < /h3 > < div > {{ repo . description }} < /div > < /div > < /v-card-title > < v - card - actions > < v - chip > {{ repo . language }} < /v-chip > < v - spacer >< /v-spacer > < v - btn @ click . prevent = " toggleKudo(repo) " flat icon color = " pink " > < v - icon v - if = " isKudo(repo) " > favorite < /v-icon > < v - icon v - else > favorite_border < /v-icon > < /v-btn > < /v-card-actions > < /v-card > < /template > < script > import { mapActions } from ' vuex ' ; export default { data () { return {} }, props : [ ' repo ' ], methods : { isKudo ( repo ) { return this . $store . getters . isKudo ( repo ); }, ... mapActions ([ ' toggleKudo ' ]) } } < /script > < style > . repo - card - content { height : 90 px ; overflow : scroll ; } < /style > Your SPA uses vuex to manage state in one centralized store accessible by all components. Vuex also ensures that access to the store is performed in a predictable fashion respecting a few rules. To read the state, you need to define getters , synchronous changes to the state must be done via mutations , and asynchronous changes are done via actions . You now need to create ./kudo-oos/pkg/http/web/app/src/store.js with actions , mutations and getters . Your initial data is { kudos: {}, repos: [] } . kudos holds all the user’s favorites OSS projects as a JavaScript Object where the key is the project id and the value is the project itself. repos is an array that holds the search results. There are two cases in which you may need to mutate state. First, when the user logs in you need to fetch the user’s favorites OSS projects from the Go server and set the repos in the store by calling resetRepos . Second, when the user favorites or unfavorites an OSS project, you need update the kudos in the store by calling resetKudos to reflect that change on the server. resetKudos is synchronous method that is called by actions inside asynchronous functions after each call to the Go server. The Home component uses the getters allKudos and repos to render the list of Kudos and SearchResults. In order to know whether a repo has been favorited or not, your app needs to call the isKudo getter. Create the ./kudo-oos/pkg/http/web/app/src/store.js file and copy in the code below: import Vue from ' vue ' ; import Vuex from ' vuex ' ; import APIClient from ' ./apiClient ' ; Vue . use ( Vuex ); const store = new Vuex . Store ({ state : { kudos : {}, repos : [], }, mutations : { resetRepos ( state , repos ) { state . repos = repos ; }, resetKudos ( state , kudos ) { state . kudos = kudos ; } }, getters : { allKudos ( state ) { return Object . values ( state . kudos ); }, kudos ( state ) { return state . kudos ; }, repos ( state ) { return state . repos ; }, isKudo ( state ) { return ( repo ) => { return !! state . kudos [ repo . id ]; }; } }, actions : { getKudos ({ commit }) { APIClient . getKudos (). then (( data ) => { commit ( ' resetKudos ' , data . reduce (( acc , kudo ) => { return {[ kudo . id ]: kudo , ... acc } }, {})) }) }, updateKudo ({ commit , state }, repo ) { const kudos = { ... state . kudos , [ repo . id ]: repo }; return APIClient . updateKudo ( repo ) . then (() => { commit ( ' resetKudos ' , kudos ) }); }, toggleKudo ({ commit , state }, repo ) { if ( ! state . kudos [ repo . id ]) { return APIClient . createKudo ( repo ) . then ( kudo => commit ( ' resetKudos ' , { [ kudo . id ]: kudo , ... state . kudos })) } const kudos = Object . entries ( state . kudos ). reduce (( acc , [ repoId , kudo ]) => { return ( repoId == repo . id ) ? acc : { [ repoId ]: kudo , ... acc }; }, {}); return APIClient . deleteKudo ( repo ) . then (() => commit ( ' resetKudos ' , kudos )); } } }); export default store ; Inside actions you are performing ajax calls to the Go server. Every request made to the server must be authenticated or the server will respond with a client error. When the user logs in, an access token is created and can be accessed by calling: await Vue.prototype.$auth.getAccessToken() . This asynchronous function returns an access token required to send authenticated requests to the server. The Go server exposes a REST API for the kudo resource. You will implement methods to make ajax calls in order to create with createKudo , update with updateKudo , delete with deleteKudo , and list all kudos with getKudos . Notice that these methods call the perform method by passing the endpoint and the HTTP verb. perform , in turn, populates the request Authorization header with the access token so the Go server can validate the request. Create ./kudo-oos/pkg/http/web/app/src/apiClient.js and copy in the code below. import Vue from ' vue ' ; import axios from ' axios ' ; const BASE_URI = ' http://localhost:4444 ' ; const client = axios . create ({ baseURL : BASE_URI , json : true }); const APIClient = { createKudo ( repo ) { return this . perform ( ' post ' , ' /kudos ' , repo ); }, deleteKudo ( repo ) { return this . perform ( ' delete ' , `/kudos/ ${ repo . id } ` ); }, updateKudo ( repo ) { return this . perform ( ' put ' , `/kudos/ ${ repo . id } ` , repo ); }, getKudos () { return this . perform ( ' get ' , ' /kudos ' ); }, getKudo ( repo ) { return this . perform ( ' get ' , `/kudo/ ${ repo . id } ` ); }, async perform ( method , resource , data ) { let accessToken = await Vue . prototype . $auth . getAccessToken () return client ({ method , url : resource , data , headers : { Authorization : `Bearer ${ accessToken } ` } }). then ( req => { return req . data }) } } export default APIClient ; Also, create ./kudo-oos/pkg/http/web/app/src/githubClient.js and copy in the code below. This file contains helper functions used to interact with the GitHub API. const API_URL = " https://api.github.com/search/repositories " export default { getJSONRepos ( query ) { return fetch ( ` ${ API_URL } ?q=` + query ). then ( response => response . json ()); } } Each GithubRepo has a router-link to /repo/:id that renders the GithubRepoDetails component. GithubRepoDetails shows details about the OSS project, like how many times the project has been of starred and the amount of open issues. The user can also leave a note describing why the project is special by clicking the Kudo button. The message is sent to Go server button by calling updateKudo . Create the ./kudo-oos/pkg/http/web/app/src/components/GithubRepoDetails.js file with the code below. < template > < v - container grid - list - md fluid class = " grey lighten-4 " > < v - layout align - center justify - space - around wrap > < v - flex md6 > <!-- < v - img : src = " repo.owner.avatar_url " : alt = " repo.owner.login " class = " grey darken-4 " width = " 200 " >< /v-img> -- > < h1 class = " primary--text " > < a : href = " repo.html_url " > {{ repo . full_name }} < /a > < /h1 > < v - chip class = " text-xs-center " > < v - avatar class = " teal " > < v - icon class = " white--text " > star < /v-icon > < /v-avatar > Stars : {{ repo . stargazers_count }} < /v-chip > < v - chip class = " text-xs-center " > < v - avatar class = " teal white--text " > L < /v-avatar > Language : {{ repo . language }} < /v-chip > < v - chip class = " text-xs-center " > < v - avatar class = " teal white--text " > O < /v-avatar > Open Issues : {{ repo . open_issues_count }} < /v-chip > < v - textarea name = " input-7-1 " label = " Show some love " value = "" v - model = " repo.notes " hint = " Describe why you love this project " >< /v-textarea > < v - btn @ click . prevent = " updateKudo(repo) " > Kudo < /v-btn > < router - link tag = " a " to = " /me " > Back < /router-link > < /v-flex > < /v-layout > < /v-container > < /template > < script > import { mapActions , mapGetters } from ' vuex ' ; export default { data () { return { repo : {} } }, watch : { ' $route ' : ' fetchData ' }, computed : mapGetters ([ ' kudos ' ]), created () { this . fetchData (); }, methods : { fetchData () { fetch ( ' https://api.github.com/repositories/ ' + this . $route . params . id ) . then ( response => response . json ()) . then (( response ) => { this . repo = Object . assign ( response , this . kudos [ this . $route . params . id ]) }) }, ... mapActions ([ ' updateKudo ' ]) } } < /script > < style > < /style > Now that your router, store, and components are in place, go ahead and modify ./kudo-oos/pkg/http/web/app/src/main.js to properly initialize your SPA. Copy in the following code and overwrite whatever is there. import ' @babel/polyfill ' import Vue from ' vue ' import ' ./plugins/vuetify ' import App from ' ./App.vue ' import store from ' ./store ' import router from ' ./routes ' Vue . config . productionTip = process . env . NODE_ENV == ' production ' ; router . beforeEach ( Vue . prototype . $auth . authRedirectGuard ()) new Vue ({ store , router , render : h => h ( App ) }). $mount ( ' #app ' ) Note that we are calling router.beforeEach(Vue.prototype.$auth.authRedirectGuard()) to look for routes tagged with meta: {requiresAuth: true} and redirect the user to the authentication flow if they are not logged in. Create a REST API with Go Now that users can securely authenticate on the frontend, you need to create a HTTP server written in Go to handle the requests, handle user authentication, and perform CRUD operations. The first code you need to create is a structure to represent a GitHub repository. Start by creating ./kudo-oos/pkg/core/kudo.go . mkdir pkg/core Now define the following struct to represent a “kudo” (someone giving kudos to a specific repo). Copy the code below into your newly created file, ./kudo-oos/pkg/core/kudo.go . package core // Kudo represents a oos kudo. type Kudo struct { UserID string `json:"user_id" bson:"userId"` RepoID string `json:"id" bson:"repoId"` RepoName string `json:"full_name" bson:"repoName"` RepoURL string `json:"html_url" bson:"repoUrl"` Language string `json:"language" bson:"language"` Description string `json:"description" bson:"description"` Notes string `json:"notes" bson:"notes"` } Next, create the ./kudo-oos/pkg/core/repository.go file and add the following interface to represent an API for any persistence layer you might want to use. In this article, we are going to use MongoDB. package core // Repository defines the API repository implementation should follow. type Repository interface { Find ( id string ) ( * Kudo , error ) FindAll ( selector map [ string ] interface {}) ([] * Kudo , error ) Delete ( kudo * Kudo ) error Update ( kudo * Kudo ) error Create ( kudo ...* Kudo ) error Count () ( int , error ) } Finally, create the MongoDB repository that implements the interface you’ve just created. First, create a new directory to hold this code. mkdir pkg/storage Then create the file ./kudo-oos/pkg/storage/mongo.go and add the following code. package storage import ( "log" "os" "github.com/globalsign/mgo" "github.com/globalsign/mgo/bson" "github.com/klebervirgilio/vue-crud-app-with-golang/pkg/core" ) const ( collectionName = "kudos" ) func GetCollectionName () string { return collectionName } type MongoRepository struct { logger * log . Logger session * mgo . Session } // Find fetches a kudo from mongo according to the query criteria provided. func ( r MongoRepository ) Find ( repoID string ) ( * core . Kudo , error ) { session := r . session . Copy () defer session . Close () coll := session . DB ( "" ) . C ( collectionName ) var kudo core . Kudo err := coll . Find ( bson . M { "repoId" : repoID , "userId" : kudo . UserID }) . One ( & kudo ) if err != nil { r . logger . Printf ( "error: %v " , err ) return nil , err } return & kudo , nil } // FindAll fetches all kudos from the database. YES.. ALL! be careful. func ( r MongoRepository ) FindAll ( selector map [ string ] interface {}) ([] * core . Kudo , error ) { session := r . session . Copy () defer session . Close () coll := session . DB ( "" ) . C ( collectionName ) var kudos [] * core . Kudo err := coll . Find ( selector ) . All ( & kudos ) if err != nil { r . logger . Printf ( "error: %v " , err ) return nil , err } return kudos , nil } // Delete deletes a kudo from mongo according to the query criteria provided. func ( r MongoRepository ) Delete ( kudo * core . Kudo ) error { session := r . session . Copy () defer session . Close () coll := session . DB ( "" ) . C ( collectionName ) return coll . Remove ( bson . M { "repoId" : kudo . RepoID , "userId" : kudo . UserID }) } // Update updates an kudo. func ( r MongoRepository ) Update ( kudo * core . Kudo ) error { session := r . session . Copy () defer session . Close () coll := session . DB ( "" ) . C ( collectionName ) return coll . Update ( bson . M { "repoId" : kudo . RepoID , "userId" : kudo . UserID }, kudo ) } // Create kudos in the database. func ( r MongoRepository ) Create ( kudos ...* core . Kudo ) error { session := r . session . Copy () defer session . Close () coll := session . DB ( "" ) . C ( collectionName ) for _ , kudo := range kudos { _ , err := coll . Upsert ( bson . M { "repoId" : kudo . RepoID , "userId" : kudo . UserID }, kudo ) if err != nil { return err } } return nil } // Count counts documents for a given collection func ( r MongoRepository ) Count () ( int , error ) { session := r . session . Copy () defer session . Close () coll := session . DB ( "" ) . C ( collectionName ) return coll . Count () } // NewMongoSession dials mongodb and creates a session. func newMongoSession () ( * mgo . Session , error ) { mongoURL := os . Getenv ( "MONGO_URL" ) if mongoURL == "" { log . Fatal ( "MONGO_URL not provided" ) } return mgo . Dial ( mongoURL ) } func newMongoRepositoryLogger () * log . Logger { return log . New ( os . Stdout , "[mongoDB] " , 0 ) } func NewMongoRepository () core . Repository { logger := newMongoRepositoryLogger () session , err := newMongoSession () if err != nil { logger . Fatalf ( "Could not connect to the database: %v " , err ) } return MongoRepository { session : session , logger : logger , } } Add the Go Backend Before you can create HTTP handlers, you’ll need to write code to handle incoming request payloads. First, create the necessary directory: mkdir pkg/kudo Then, create the file ./kudo-oos/pkg/kudo/service.go and insert the code below. package kudo import ( "strconv" "github.com/{{ YOUR_GITHUB_USERNAME }}/kudo-oos/pkg/core" ) type GitHubRepo struct { RepoID int64 `json:"id"` RepoURL string `json:"html_url"` RepoName string `json:"full_name"` Language string `json:"language"` Description string `json:"description"` Notes string `json:"notes"` } type Service struct { userId string repo core . Repository } func ( s Service ) GetKudos () ([] * core . Kudo , error ) { return s . repo . FindAll ( map [ string ] interface {}{ "userId" : s . userId }) } func ( s Service ) CreateKudoFor ( githubRepo GitHubRepo ) ( * core . Kudo , error ) { kudo := s . githubRepoToKudo ( githubRepo ) err := s . repo . Create ( kudo ) if err != nil { return nil , err } return kudo , nil } func ( s Service ) UpdateKudoWith ( githubRepo GitHubRepo ) ( * core . Kudo , error ) { kudo := s . githubRepoToKudo ( githubRepo ) err := s . repo . Create ( kudo ) if err != nil { return nil , err } return kudo , nil } func ( s Service ) RemoveKudo ( githubRepo GitHubRepo ) ( * core . Kudo , error ) { kudo := s . githubRepoToKudo ( githubRepo ) err := s . repo . Delete ( kudo ) if err != nil { return nil , err } return kudo , nil } func ( s Service ) githubRepoToKudo ( githubRepo GitHubRepo ) * core . Kudo { return & core . Kudo { UserID : s . userId , RepoID : strconv . Itoa ( int ( githubRepo . RepoID )), RepoName : githubRepo . RepoName , RepoURL : githubRepo . RepoURL , Language : githubRepo . Language , Description : githubRepo . Description , Notes : githubRepo . Notes , } } func NewService ( repo core . Repository , userId string ) Service { return Service { repo : repo , userId : userId , } } Define Go HTTP Handlers Your REST API exposes the kudo resource to support clients like your SPA. A normal SPA will expose endpoints so clients can create, update, delete, and list resources. For instance, when the user logs in, a request is made to fetch all the kudos for the authenticated user via GET /kudos . # Fetches all open source projects favorited by the user GET /kudos # Fetches a favorited open source project by id GET /kudos/:id # Creates (or favorites) an open source project for the logged in user POST /kudos # Updates a favorited open source project PUT /kudos/:id # Deletes (or unfavorites) a favorited open source project DELETE /kudos/:id To support this, you need to create a new file named ./kudo-oos/pkg/http/handlers.go and define your HTTP handlers using the fabulous httprouter library. package http import ( "encoding/json" "io/ioutil" "net/http" "strconv" "github.com/julienschmidt/httprouter" "github.com/{{ YOUR_GITHUB_USERNAME }}/kudo-oos/pkg/core" "github.com/{{ YOUR_GITHUB_USERNAME }}/kudo-oos/pkg/kudo" ) type Service struct { repo core . Repository Router http . Handler } func New ( repo core . Repository ) Service { service := Service { repo : repo , } router := httprouter . New () router . GET ( "/kudos" , service . Index ) router . POST ( "/kudos" , service . Create ) router . DELETE ( "/kudos/:id" , service . Delete ) router . PUT ( "/kudos/:id" , service . Update ) service . Router = UseMiddlewares ( router ) return service } func ( s Service ) Index ( w http . ResponseWriter , r * http . Request , params httprouter . Params ) { service := kudo . NewService ( s . repo , r . Context () . Value ( "userId" ) . ( string )) kudos , err := service . GetKudos () if err != nil { w . WriteHeader ( http . StatusInternalServerError ) return } w . WriteHeader ( http . StatusOK ) json . NewEncoder ( w ) . Encode ( kudos ) } func ( s Service ) Create ( w http . ResponseWriter , r * http . Request , params httprouter . Params ) { service := kudo . NewService ( s . repo , r . Context () . Value ( "userId" ) . ( string )) payload , _ := ioutil . ReadAll ( r . Body ) githubRepo := kudo . GitHubRepo {} json . Unmarshal ( payload , & githubRepo ) kudo , err := service . CreateKudoFor ( githubRepo ) if err != nil { w . WriteHeader ( http . StatusInternalServerError ) return } w . WriteHeader ( http . StatusCreated ) json . NewEncoder ( w ) . Encode ( kudo ) } func ( s Service ) Delete ( w http . ResponseWriter , r * http . Request , params httprouter . Params ) { service := kudo . NewService ( s . repo , r . Context () . Value ( "userId" ) . ( string )) repoID , _ := strconv . Atoi ( params . ByName ( "id" )) githubRepo := kudo . GitHubRepo { RepoID : int64 ( repoID )} _ , err := service . RemoveKudo ( githubRepo ) if err != nil { w . WriteHeader ( http . StatusInternalServerError ) return } w . WriteHeader ( http . StatusOK ) } func ( s Service ) Update ( w http . ResponseWriter , r * http . Request , params httprouter . Params ) { service := kudo . NewService ( s . repo , r . Context () . Value ( "userId" ) . ( string )) payload , _ := ioutil . ReadAll ( r . Body ) githubRepo := kudo . GitHubRepo {} json . Unmarshal ( payload , & githubRepo ) kudo , err := service . UpdateKudoWith ( githubRepo ) if err != nil { w . WriteHeader ( http . StatusInternalServerError ) return } w . WriteHeader ( http . StatusOK ) json . NewEncoder ( w ) . Encode ( kudo ) } Verify JSON Web Tokens (JWTs) with Go This is the most crucial component of your REST API server. Without this middleware, any user can perform CRUD operations against the database. In the event that no valid JWT is provided in the HTTP authorization header, the API call will be aborted and an error will be returned to the client. Create ./kudo-oos/pkg/http/middlewares.go and paste in the following code: package http import ( "context" "log" "net/http" "strings" jwtverifier "github.com/okta/okta-jwt-verifier-golang" "github.com/rs/cors" ) func OktaAuth ( h http . Handler ) http . Handler { return http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) { accessToken := r . Header [ "Authorization" ] jwt , err := validateAccessToken ( accessToken ) if err != nil { w . WriteHeader ( http . StatusForbidden ) w . Write ([] byte ( err . Error ())) return } ctx := context . WithValue ( r . Context (), "userId" , jwt . Claims [ "sub" ] . ( string )) h . ServeHTTP ( w , r . WithContext ( ctx )) }) } func validateAccessToken ( accessToken [] string ) ( * jwtverifier . Jwt , error ) { parts := strings . Split ( accessToken [ 0 ], " " ) jwtVerifierSetup := jwtverifier . JwtVerifier { Issuer : "{DOMAIN}" , ClaimsToValidate : map [ string ] string { "aud" : "api://default" , "cid" : "{CLIENT_ID}" }, } verifier := jwtVerifierSetup . New () return verifier . VerifyIdToken ( parts [ 1 ]) } func JSONApi ( h http . Handler ) http . Handler { return http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) { w . Header () . Set ( "Content-Type" , "application/json" ) h . ServeHTTP ( w , r ) }) } func AccsessLog ( h http . Handler ) http . Handler { return http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) { log . Printf ( "%s: %s" , r . Method , r . RequestURI ) h . ServeHTTP ( w , r ) }) } func Cors ( h http . Handler ) http . Handler { corsConfig := cors . New ( cors . Options { AllowedHeaders : [] string { "Origin" , "Accept" , "Content-Type" , "X-Requested-With" , "Authorization" }, AllowedMethods : [] string { "POST" , "PUT" , "GET" , "PATCH" , "OPTIONS" , "HEAD" , "DELETE" }, Debug : true , }) return corsConfig . Handler ( h ) } func UseMiddlewares ( h http . Handler ) http . Handler { h = JSONApi ( h ) h = OktaAuth ( h ) h = Cors ( h ) return AccsessLog ( h ) } As you can see, the middleware OktaAuth uses okta-jwt-verifier-golang to validate the user’s access token. Define Your Go REST API Entry Point Now create a new folder to hold the main Go application: mkdir cmd Then create the file ./kudo-oos/cmd/main.go and add the following code to spin up your Go web server. package main import ( "log" "net/http" "os" web "github.com/{{ YOUR_GITHUB_USERNAME }}/kudo-oos/pkg/http" "github.com/{{ YOUR_GITHUB_USERNAME }}/kudo-oos/pkg/storage" ) func main () { httpPort := os . Getenv ( "PORT" ) repo := storage . NewMongoRepository () webService := web . New ( repo ) log . Printf ( "Running on port %s " , httpPort ) log . Fatal ( http . ListenAndServe ( httpPort , webService . Router )) } Next, create a folder to hold your DB initialization code. mkdir cmd/db Then create a new file, ./kudo-oos/cmd/db/setup.go and insert the following code. package main import ( "log" "os" "github.com/globalsign/mgo" ) func main () { var err error mongoURL := os . Getenv ( "MONGO_URL" ) if mongoURL == "" { log . Fatal ( "MONGO_URL not provided" ) } session , err := mgo . Dial ( mongoURL ) defer session . Close () err = session . DB ( "" ) . AddUser ( "mongo_user" , "mongo_secret" , false ) info := & mgo . CollectionInfo {} err = session . DB ( "" ) . C ( "kudos" ) . Create ( info ) if err != nil { log . Fatal ( err ) } } Manage Dependencies I like using the dep tool to manage dependencies, so be sure to install it before continuing. Next, run the following commands to initialize the dep tool and create a Gopkg.lock and Gopkg.toml file (which will hold dependency resolution information). dep init dep ensure -add github.com/okta/okta-jwt-verifier-golang dep ensure -add github.com/rs/cors dep ensure -add github.com/globalsign/mgo Run the Go + Vue SPA There are many ways to run back-end and front-end apps. The simplest way (for development purposes) is to just use good old fashioned Make. A Makefile contains build instructions for your website. It’s like an old-school version of gulp , grunt , and the more hip Node tools. To get started, create a file named Makefile in the root of your project folder and copy in the following code. setup: run_services @go run ./cmd/db/setup.go run_services: @docker-compose up --build -d run_server: @MONGO_URL=mongodb://mongo_user:mongo_secret@0.0.0.0:27017/kudos PORT=:4444 go run cmd/main.go run_client: @/bin/bash -c "cd $$GOPATH/src/github.com/{{ YOUR_GITHUB_USERNAME }}/kudo-oos/pkg/http/web/app && yarn serve" Create a Dockerfile Next, you’ll want to create a Dockerfile. This file tells Docker how to run your application and spares you the effort of deploying a real MongoDB instance for testing purposes. If you don’t already have them installed, go install docker and docker-compose. Then create a file named docker-compose.yml and copy in the following code. version : ' 3' services : mongo : image : mongo restart : always ports : - " 27017:27017" environment : MONGO_INITDB_ROOT_USERNAME : mongo_user MONGO_INITDB_ROOT_PASSWORD : mongo_secret Your app is now ready to test! Run the following commands to get going. make setup make run_server make run_client Your Go webserver should be listening on 0.0.0.0:4444 and your SPA should be serving files from http://localhost:8080 . Visit http://localhost:8080 to play around with your new app! Learn More About Go and Vue Vue.js is a powerful and straightforward framework with phenomenal adoption and community growth. In this tutorial, you learned to build a fully-functional, secure SPA with Vue and Go. To learn more about Vue.js, head over to https://vuejs.org or check out these other great resources from the @oktadev team: If you have any questions, please let us know in the comments or follow and tweet us @oktadev.
{ "pile_set_name": "OpenWebText2" }
Evidence for a novel thioredoxin-like catalytic property of gonadotropic hormones. It has been proposed that dithiol-disulfide interchange and oxidation-reduction reactions may play a role in hormone-induced receptor activation. Inspection of the sequences of the gonadotropic hormones revealed a homologous tetrapeptide (Cys-Gly-Pro-Cys) between the beta subunit of lutropin (LH) and the active site of thioredoxin (TD). The beta subunit of follitropin (FSH) has a similar sequence (Cys-Gly-Lys-Cys). Thioredoxin is a ubiquitous protein serving as an electron donor for ribonucleotide reductase, but it also exhibits disulfide isomerase activity. The catalytic activity of TD was assayed by its ability to reactivate reduced and denatured ribonuclease. In this assay, the purified ovine FSH and bovine LH preparations tested were approximately 60 and approximately 300 times, respectively, as active as TD on a molar basis. This heretofore unsuspected catalytic property of FSH and LH may be important in understanding their mechanism of receptor activation and signal transduction.
{ "pile_set_name": "PubMed Abstracts" }
Q: Почему не работает проверка if? У меня простой вопрос, но я чет делаю не так. Приложение работает с БД и при открытии activity сохраняет инфу из БД в array, а из array выводит инфу в listview. Так вот каждый раз при открытии этого activity вызывается метод onCreate и естественно каждый раз при открытии одна и та же инфа сохраняется в array и естественно дублируется в listview. Я написал простую проверка "флаг" она вроде называется, но она почему то не работает и каждый раз список одних и тех же item в listview становиться все больше... Вот код : public class ListOfMyWords extends AppCompatActivity { private ProgressDialogClass dialog; private MyAdapterForWords adapterForWords; private boolean wasUpdate; //<===== @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_list_of_my_words); ListView lv = (ListView) findViewById(R.id.lvOfMyWords); adapterForWords = new MyAdapterForWords(getApplicationContext()); lv.setAdapter(adapterForWords); lv.setOnItemClickListener(new EditAndDelete()); dialog = new ProgressDialogClass(this); if (!wasUpdate) { //<===== saveFromParse(); } } private void saveFromParse() { dialog.showProgressDialog(); new Thread(new Runnable() { @Override public void run() { ParseQuery<ParseObject> pq = ParseQuery.getQuery("MyDictionary"); pq.findInBackground(new FindCallback<ParseObject>() { @Override public void done(List<ParseObject> list, ParseException e) { if (e == null) { for (int index = 0; index < list.size(); index++) { Word word = new Word( list.get(index).getString("hebrewWord"), list.get(index).getString("wordTranslation"), list.get(index).getString("wordTranslate"), list.get(index).getString("objectID")); SingletonForWords.getSingletonForWords().getArrayWords().add(word); } } else { Toast.makeText(getApplicationContext(), "sorry", Toast.LENGTH_SHORT).show(); } wasUpdate = true; //<===== dialog.hideProgressDialog(); adapterForWords.notifyDataSetChanged(); } }); } }).start(); } A: Во первых, создавать новый поток здесь не нужно, т.к. название метода findInBackground говорит о том, что работа и так выполняется в отдельном потоке, и результат приходит в callback done. Во вторых, при создании активити, создается новый экземпляр класса, следовательно всем полям класса присваиваются значения по умолчанию, если не определено явное инициализирование полей. Для типа boolean значением по умолчанию является false. Поэтому и происходит загрузка данных при старте активити. Отсюда ваша проверка флага выглядит абсолютно бесполезной. Выходом из этой ситуации может стать сохранение данных например в бд, а при старте смотреть есть ли данные или нет, и если их нет то загружать. В третьих, давайте методам, классам, переменным имена, которые смогут дать ответ на вопрос - "Что делает этот метод/класс/...?". Это может избавить от части комментариев, т.к. из названия будет понятно для чего это нужно. Ваш метод saveFromParse предполагает какое то сохранение, хотя на самом деле он что-то загружает. Ну и в четвертых, никогда не создавайте явно Thread, используйте хотя бы AsyncTask, хотя и его лучше заменить на что-то получше. Иначе это приведет к утечкам, непонятным ошибкам, неправильной работе и т.д.
{ "pile_set_name": "StackExchange" }
Comparison of statistical significance criteria. We study and compare two classes of statistical criteria to assess the significance of exceptional words. Indeed, the Z-score-like criteria, or the normal approximation that is a strict equivalent, suffer from several drawbacks in terms of sensitivity and specificity. Thanks to the combinatorial structure of words, a computation of the exact P-value has been made possible by recent mathematical results. We study here the drawbacks of the Z-score, the choice of the threshold and the tightness to the P-value. A major conclusion is that the normal approximation is always very poor and overestimates statistical significance.
{ "pile_set_name": "PubMed Abstracts" }
Dr. Thomas Deerinck/Visuals Unlimited/Corbis Many cell lines in laboratories around the world are mislabelled or contaminated, yet scientists seldom run checks to verify their cells’ true identity. In an effort to encourage scientists to authenticate cells, researchers at the biotech firm Genentech said on 15 April that they have created a faster, cheaper way to identify cell lines, and have also created an improved list of reliable cell lines.1 The work was published in the journal Nature, which accompanied the piece with an editorial announcing a renewed effort to get authors to authenticate their cell lines for papers submitted to Nature research journals2. The Global Biological Standards Institute, a non-profit group in Washington DC, is also launching a social media campaign this week, #authenticate, to publicize the problem of misidentified cell lines. “The fact that Genentech has chosen to invest in dealing with this problem gives a clear signal that it needs to be dealt with,” says Jon Lorsch, head of the US National Institute of General Medical Sciences. Genentech developed the authentication system to keep a better track of its cell-based activities, says Richard Neve, a cancer researcher at the firm, which is based in South San Francisco, California, and is a subsidary of the Swiss pharmaceutical giant Roche. When the label lies Biologists have known for decades about the cell-line contamination problem: a vial marked as containing, say, neural stem cells, can quickly end up containing only cancer cells, because cell lines can get swapped or mixed, or stray contaminant cells can invisibly outgrow their neighbours. The non-profit International Cell Line Authentication Committee (ICLAC) has identified 475 cell lines known to be misidentified or contaminated with other cells. One 2007 review of material in biorepositories suggested that a third or more of cell lines are misidentified or contaminated3. Lorsch estimates that far fewer than one-third of scientists actually check their cells. Mislabelled cell lines can foil researchers’ attempts to reproduce their own and others’ work, and lead to incorrect conclusions about the activity of tissues such as cancers. Genentech researchers began by cleaning up the existing lists of cell lines. Standard tests identify cells by short, repeated sequences of DNA. Neve and his colleagues gathered DNA profiles for cell lines from seven databases. By cross-referencing profiles and accounting for naming inconsistencies, they whittled down a collection of 8,577 DNA profiles to a set of 2,787 unique profiles. Neve says the firm has uploaded its data to the US National Center for Biotechnology Information, and is also working to make it available through ICLAC. Neve’s team also created a way to profile cell lines by looking at variations in single nucleotides from DNA, a diagnostic test that can be performed in a fraction of the time and cost of the standard procedure. The time needed is about 5.5 hours, as opposed to 21 hours for the older method, says Neve. The new method should also work for non-human cell lines (such as those from highly inbred laboratory mice, which are challenging to identify with standard tests), adds Amanda Capes-Davis, chair of the ICLAC. “It’s a great advance,” says Lorsch. “Hopefully, it will be one of the things that helps change the landscape and convinces people that they need to be rigorous.” Capes-Davis is most excited about the fact that, as part of formulating its method, Genentech also developed a system for more logically annotating and organizing cell lines that other scientists can use. “That framework has the potential to make online cell-line searches more reliable, all the way from Google searches to hunting through data sets and catalogues for mutations and cell-line stocks,” she says. Testing time Laboratories that already perform genomic analysis should not abandon the old test, because it has become a standard way of comparing samples from lab to lab, says Capes-Davis. Genentech will continue to do standard tests whenever it removes cells from an original vial, but will use the new test for assessing working stock for experiments, Neve says. Lorsch says that more work is needed to create a test that most scientists will be able to run themselves. For now — as before — most biologists will still need to send out their cell lines to a DNA-testing lab for analysis, a use of time and money that has been a major factor behind scientists’ reluctance to check their materials. The Nature editorial noted that specialist journals such as the International Journal of Cancer are already systematically asking authors to identify their cells. Nature said that it had been asking authors to authenticate cells since 2013, although most had not done so. However, with new resources available, it would now be asking all authors of papers involving cell lines to check their lines against publicly available lists of cells known to be problematic. “What makes the time ripe for action is a combination of a rising awareness among scientists … the availability of proper tests and resources … and the willingness of some funders to tackle the matter,” the journal said. [Nature's News team is editorially independent of its research editorial team.]
{ "pile_set_name": "OpenWebText2" }
Q: Question involving proof by contrapositive of: If $n$ is even, then $nk$ is even, where $k \in \mathbb Z$. The contrapositive of the statement is: If $nk$ is odd, then $n$ is odd; where $k \in \mathbb Z$. I am using the cases where $k$ is odd and $k$ is even. If $k$ is odd: $$nk=2l+1$$ $$n(2m+1)=2l+1$$ $$2mn+n=2l+1$$ $$2mn-2l-1=-n$$ $$2(mn-l)-1=-n$$ $$2(l-mn)+1=n$$ So, $n$ is odd. Now, if $k$ is even, then $$nk=2l+1$$ $$n(2m)=2l+1$$ $$2(mn)=2l+1$$ So, we have even is equal to odd. So, $k$ cannot be even. Does that change the domain in the contrapositive, hence making the two statements not equal? A: When we make the claim 'if n is even, then nk is even', we are not restricting the domain to where $n$ is even: the claim as a whole is about the whole domain. As another example: If I say 'All even integers greater than 2 are the sum of two prime numbers', I am making that claim within the domain of all numbers ... I am just claiming that some of the numbers from that domain have some interesting property. So in your case, once we specify that $n$ and $k$ are integers, we have fixed our domain, and that is not going to change. Even if the 'if' part of a conditional claim like seems to restrict the domain .. it does not. Likewise, the contrapositive 'if nk is odd, then n is even' still assumes that the domain for $n$ and $k$ is all integers. And otherwise, this is indeed a perfectly good proof by contraposition ... Maybe clean it up a little bit by first showing that $k$ cannot be even, and then showing that $n$ has to be odd (all under the assumption that $nk$ is odd, of course). But again, none of that changes the domain that the claims were about.
{ "pile_set_name": "StackExchange" }
On Saturday 11th February, Quad had the wrap party for its awesome experimental audio visual exhibition, v.01, which included collaborations between regular nights at Quad and local creatives. For the opening portion of the event, LongPlayer (an album playback night) chose ‘Cerulean’ by Baths, Five Lamps Films submitted videos for some of the tracks and Pixel Synthesis got Pictographik in to do a live visual remix of the cover artwork in the duration of the album. Above is the original artwork by Jesselisa Moretti, and below are the two remixs the pictobrain spewed forth. The album lasts about 45 minutes but after recently completing a qualifier for the Cut & Paste tournament which gives you 15 minutes to complete a piece of work, I finished pretty quickly, so did two! Enjoy. Blimey, another year passsed and what a productive year it was. It began with cd artwork for the A Plastic Rose guys (with more underway at present); picking up a new client and receiving regular artwork requests from Ground Level records main man and legend Andy McAllister; doing a spread for From Dusk 2 Dawn magazine and getting artwork in too; making it into the first edition of the highly contentious C A R S O N magazine that was designed/not designed by David Carson (now known as ‘Untitled magazine‘, making the first edition more collectable I guess!?); curating a live digital art event called Pixel Synthesis [blog here] in Quad [website here] (thanks for your continuous support team Quad); a ukulele themed charity tee shirt design for local music shop Foulds and finishing with getting a mention in the ComputerArts magazines December inspiration list on their blog. Phew. And this year is already off to a good start with a new client! Happy New Year folks, lets hope its a good one for all of us, eh?!
{ "pile_set_name": "Pile-CC" }
Determination of (+)-alpha-tocopherol in environmental tobacco smoke. A high-performance liquid chromatographic method is described for the quantitation of (+)-alpha-tocopherol in the particulate phase of environmental tobacco smoke (ETS) collected on a 1-micron pore size Fluoropore membrane. A methanol (MeOH) extract of the membrane, which can be used for four other ETS procedures, is analyzed for (+)-alpha-tocopherol on a reversed-phase column with fluorescence detection at selective wavelengths of 280 nm excitation and 330 nm emission. A mobile phase of MeOH and water is used. The method is reproducible with a relative standard deviation (%) of about 12. Recovery is 88%, and the procedure is capable of detecting greater than 0.04 microgram/m3 (+)-alpha-tocopherol in ETS. A comparison of the ETS from five commercially available cigarettes shows similar (+)-alpha-tocopherol concentrations. A cigarette that primarily heats tobacco yields about 6% of that amount of (+)-alpha-tocopherol found in ETS from tobacco-burning cigarettes. (+)-alpha-Tocopherol can be used as a marker for ETS respirable suspended particles (RSP) because it is found at a consistent amount in ETS RSP of 0.29%. However, sufficient amounts of RSP would have to be generated in order to detect (+)-alpha-tocopherol.
{ "pile_set_name": "PubMed Abstracts" }
Reread-adapt and answer-comprehend intervention with Deaf and hard of hearing readers: effect on fluency and reading achievement. The researchers investigated the effect of the Reread-Adapt and Answer-Comprehend intervention (Therrien, Gormley, & Kubina, 2006) on the reading fluency and achievement of d/Deaf and hard of hearing elementary-level students. Children in the third, fifth, and sixth grades at a state school for d/Deaf and hard of hearing students received a fluency intervention that was supplemental to their regular reading instruction. Significant improvement was found on a generalized measure of reading fluency after intervention. Though the researchers found no significant improvement in performance on a generalized measure of comprehension after intervention, the students demonstrated consistently good comprehension on both literal and inferential questions during the intervention sessions. The findings support the importance of incorporating a comprehension monitoring strategy in fluency instruction.
{ "pile_set_name": "PubMed Abstracts" }
Businesses are always hiring freelancers because they can instantly provide the services needed without having to find and onboard a full-time employee. You might have hired a freelancer before, perhaps on platforms like Upwork and Freelancer. The problem is that you’re not only paying the freelancer for their services, you are also paying the platform you’re using, just for using it. Wouldn’t it be better if you could search for and hire freelancers without additional costs? Or better yet, without any costs at all? We present to you: Hubstaff Talent. Hubstaff Talent is a zero-fee freelance website that connects businesses with great talent across the globe. It officially launched on September 30, 2016, and the platform now has more than 50,000 freelancers that specialize in everything from design and development to writing and marketing. Businesses can use Hubstaff Talent to find great talent at competitive prices, while freelancers can use it to find work that fits their schedule and meets their salary expectations. Time tracking for freelancers With invoicing, payments, and more Try it now The main advantages of Hubstaff Talent over most freelance websites It does not charge any fees to use It has over 50,000 freelance profiles to make sure you find the right person for the job Other freelance platforms require businesses and freelancers to use only their site for transactions. This is very limiting, especially when you already have payment and communication tools you prefer using. Hubstaff Talent helps you find ideal candidates for your business easier, and gives you full freedom to communicate and hire however you want—all without any transaction costs or hidden fees. No fees. No markups. No middlemen. This is the promise of Hubstaff Talent. Power up your workday Reach your goals faster with time tracking and work management. Get free demo How Hubstaff Talent works Before contacting a freelancer or applying for a job, businesses and freelancers must create a profile. This is free to do and takes 5-10 minutes. After you create your profile, using the site is straightforward. Here’s how to get started: Sign up and create a profile for free Post or apply for a job Communicate using Hubstaff Talent or your preferred means of communication Discuss terms and begin contract Send and receive payment through any payment platform or method of your choice. Freelancers can use Hubstaff to track their hours and export timesheets. There’s even an easy invoice generator so you can send clients exactly how much is due based on hours worked and set rates. Business owners can also create a Hubstaff account to manage their team of freelancers. Hubstaff will then be able to handle all kinds of admin work from monitoring their freelancers’ work progress to processing payments. Hubstaff offers a set of productivity features, as well, that makes it easy to work with freelancers no matter where they are. Note: A Hubstaff time tracking account is not needed to use Hubstaff Talent, but it is highly recommended as it streamlines freelancer management and payments. How it compares with other freelance websites Depending on the freelancer website you’re using, you may encounter fees for simply using the platform, or when you have successfully earned money on their site. Hubstaff Talent will charge you nothing. On top of being more flexible and affordable than the other freelance websites, Hubstaff Talent has tens of thousands of freelancers to choose from. Businesses have access to a global talent pool, and there are no restrictions to how they want to manage freelancers. You can use whatever software or apps you already use for communicating, paying, tracking time, and more. Track time, get paid An easier way for clients and freelancers to work together Try if free How easy Hubstaff Talent is to use If you have been to other freelance websites, you’ll have no trouble getting started using and navigating through the thousands of freelancers on Hubstaff Talent. If you’re a business owner and need to hire a freelancer, you simply type a keyword in the search bar that matches your project criteria, and a list of freelancers will appear. You can then narrow down freelancer results by skills, availability, pay rate, years of experience, language, location, and age. There is also an option to view agencies offering their services as well as freelancers. Looking at profiles, you will see their experience and languages, and links to their websites or portfolios. You will also see a button that lets you invite them to a job that you posted, and links to their social accounts so you can easily reach out to them. For freelancers, the process for finding jobs works very similar to how businesses go about finding freelancers. If you’re a freelancer or agency looking for work, you can type keywords in the search bar based on the types of services you offer. All job opportunities related to your search query will then populate the results page. You can filter jobs by posting date, required skills, required experience, pay rates, project budgets, location, and language. Pay freelancers in a matter of clicks Timesheet approvals and automated payroll Try it free Help us make Hubstaff Talent even better The number of remote opportunities is continuously growing, and we want to help freelancers find the remote jobs they’re searching for. That’s exactly what Hubstaff Talent is for—to connect freelancers and employers through a free and simple platform. We already have the first part nailed down, but we’re always open to suggestions for how to make the site more relevant and easy to use. What about you? Is there a Hubstaff Talent feature that you really love, or are there any improvements you’d like to see in a future release? We’d be happy to hear about them in the comments below. This post was originally published January 16, 2017, and updated March 2019.
{ "pile_set_name": "OpenWebText2" }
Q: Show that the function is well-defined and find its derivative and a closed form Explain why the function $$f(x):=\int_0^x{\sin t \cdot \cos (xt) \over t }dt$$ is well-defined and compute its derivative $\;f'(x)$ in a closed form. I am a bit confused and don't know where I should start at. Seems to me, that calculating this integral is not an option to explain that the function is well-defined. I have tried to find its derivative using differentiation under an integral sign. However I am not sure if it is allowed, since $x$ stays not only under the integral sign, but also is an integration border. How do I tackle the problem? A: The functions $f(t)=\frac{\sin t}{t}$ and $g(t)=\cos(xt)$ are both continuous and bounded, hence $f(x)$ is a $C^1$ Lipschitz function. Moreover, you can remove the $x$ from appearing in the integral by substituting $xw$ in place of $t$: $$\begin{eqnarray*}\int_{0}^{x}\frac{\sin(t)\cos(xt)}{t}\,dt &=& \int_{0}^{1}\frac{\sin(xw)\cos(x^2 w)}{w}\,dw\\ &=& \frac{1}{2}\int_{0}^{1}\frac{\sin((x-x^2)w)+\sin((x+x^2)w)}{w}\,dw.\end{eqnarray*}$$
{ "pile_set_name": "StackExchange" }
Q: Could not load file or assembly Exception from HRESULT: 0x80131040 I created my first MVC 4 project and it works perfectly on local server. But when I publish it on to local folder and upload the folder contents to hosting server. I try to run it and I get this error: Could not load file or assembly 'DotNetOpenAuth.Core, Version=4.0.0.0, Culture=neutral, PublicKeyToken=2780ccd10d57b246' or one of its dependencies. The system cannot find the file specified. Can any one help me please? Web.config: <assemblyBinding xmlns="urn:schemas-microsoft-com:asm.v1"> <dependentAssembly> <assemblyIdentity name="DotNetOpenAuth.Core" publicKeyToken="2780ccd10d57b246" /> <bindingRedirect oldVersion="1.0.0.0-4.0.0.0" newVersion="4.1.0.0" /> </dependentAssembly> <dependentAssembly> <assemblyIdentity name="DotNetOpenAuth.AspNet" publicKeyToken="2780ccd10d57b246" /> <bindingRedirect oldVersion="1.0.0.0-4.0.0.0" newVersion="4.1.0.0" /> </dependentAssembly> <dependentAssembly> <assemblyIdentity name="System.Web.Helpers" publicKeyToken="31bf3856ad364e35" /> <bindingRedirect oldVersion="1.0.0.0-2.0.0.0" newVersion="2.0.0.0" /> </dependentAssembly> <dependentAssembly> <assemblyIdentity name="System.Web.Mvc" publicKeyToken="31bf3856ad364e35" /> <bindingRedirect oldVersion="1.0.0.0-4.0.0.0" newVersion="4.0.0.0" /> </dependentAssembly> <dependentAssembly> <assemblyIdentity name="System.Web.WebPages" publicKeyToken="31bf3856ad364e35" /> <bindingRedirect oldVersion="1.0.0.0-2.0.0.0" newVersion="2.0.0.0" /> </dependentAssembly> <dependentAssembly> <assemblyIdentity name="WebGrease" publicKeyToken="31bf3856ad364e35" /> <bindingRedirect oldVersion="1.0.0.0-1.3.0.0" newVersion="1.3.0.0" /> </dependentAssembly> </assemblyBinding> P.S The same project in Web Forms works on hosting server. A: Finally found the answer!! Go to References --> right cilck on dll file that causing the problem --> select the properties --> check the version --> match the version in properties to web config <dependentAssembly> <assemblyIdentity name="YourDllFile" publicKeyToken="2780ccd10d57b246" culture="neutral" /> <bindingRedirect oldVersion="0.0.0.0-YourDllFileVersion" newVersion="YourDllFileVersion" /> </dependentAssembly> A: What worked for me immediately was: I located bin folder (picture below shows). moved all dll in other folder for safety. then rebuild ed the project. after solved the issue, deleted old dll files. A: If your solution contains two projects interacting with each other and both using one same reference, And if version of respective reference is different in both projects; Then also such errors occurred. Keep updating all references to latest one.
{ "pile_set_name": "StackExchange" }
When it comes to job interviews, we often see it as a one-way street, with the interviewer holding all the cards. In reality, though, it’s a two-way interaction. You are also interviewing them to see if their company is the right fit for you. Sure, sometimes desperation means you don’t have that luxury, but hopefully at some point you’ll have options and you’ll get to choose the company that’s best for you. A large part of determining that is the questions you ask at the end of the interview. Beyond that, asking questions shows your interest in the job and the company. Q&A often only consists of a few minutes at the end of an hour-long interview, but it’s the final impression you’ll make, and according to one-third of HR managers, it can make or break your chances of getting the gig. When they inevitably ask you if you have questions, not having any indicates that you don’t really care about the position and are seemingly only going through the motions of an interview; conversely, asking good, incisive questions shows you’re knowledgeable about the field and sincerely curious about the job. The goal with your own questions is to just get a better picture of the company as a whole and your potential role in it. You don’t want to get too detailed — save that for the follow-up interview, or when they offer you the job. For instance, you don’t want to ask about salary or benefits right off the bat; that will make it seem like you’re only interested in money, and not the position. Elsewhere online, you can find lengthy lists of 30-50 questions to ask at the end of an interview. That’s far too many, however, and makes you pick and choose out of your head based on the scenario. In this post, we’ll give you just a few options from a few different categories that we think are the most important. You want to have at least 3 questions to ask, so come prepared with at least 6 just in case some get answered in the course of the interview. Questions About the Position What is a day or week in the life of this position like? Can you show me an example of a project I’d be working on? — This is fairly straightforward. You obviously want to know what the daily/weekly workflow and tasks will be. For many jobs, it’s hard to nail down what a consistent day/week looks like, so the answer you get may be vague. But hopefully it’s enough to get a feel for whether you’re a good fit for the position. This is one that is often answered before the end of the interview, so be sure to have a back-up. What is the history of this position? Is it newly created? If not, why did the previous person leave it? — It’s beneficial to know the history of the position you’re interviewing for. Is it newly created? If so, you have the opportunity to set the standard. Has the position seen 5 employees in 5 years? You may want to think twice about taking it. This can be uncomfortable to ask, but is necessary on your end to know what kind of role you’re getting into. Questions About the Future Is there room for advancement or career training in this position? — If the answer is no, you may not want the position. If the answer is yes, it’s helpful to know what you can aspire to. It also signals to the interviewer that you have ambition and that you set your sights high. Is there the opportunity for mentorship within this position? — This is somewhat dependent on the individual. For some folks, it’s very important to have career mentorship from a manager or executive; if this is important to you, ask away. This will signal to the interviewer that you are interested in growth — nobody wants a static employee who plateaus in their first week. Questions About Success How will you define success for this position? — When expectations are vague, feedback is hard to come by, and you may be held to standards you didn’t know existed. You want to know exactly what they think a successful employee will accomplish in this position. There should be specific goals, too, versus something broad like, “Increase sales through marketing and advertising.” What are the most important objectives for this position in the first few months? — This is a follow-up question to the previous, and is important because how you kick off a new job is crucial in determining your future at that company. Will you immediately establish yourself as a go-getter, or as mediocre and inefficient? Knowing some immediate objectives will help you make sure you’re on the right course. You can also determine if the expectations are reasonable; if you’re asked to do too much in the first few months, it may be an unfortunate sign of things to come. Questions About the Company What are the 5- and 10-year goals of the company? — This tells the interviewer that you’re thinking about the future, and that you care about where the company is going. You’ll get an idea of whether this is a company you want to stick around with or not. What’s the company culture like? Do co-workers eat lunch together? Do you have regular team events? — You see this question a lot in lists like this, but it’s often too vague. Asking simply “What’s the company culture like?” leaves a lot of wiggle room for the interviewer, and can be hard to answer. Asking some specific questions along with it helps you get a better understanding of the specific environment. You can also ask about after-work activities, about collaborating on projects, etc. The culture of where you work will go a long way in determining your satisfaction with the job. Questions For the End Do you have any concerns about my qualifications? — This is a tough question to ask, but one that really sets you apart from other candidates. It may even throw off the interviewer, but in a good way, and will hopefully get them to voice some honest thoughts they have about your resume. If they bring up a couple problem areas they see, you can address them confidently and ease their fears. Hopefully you can go into the interview anticipating any concerns they may have, and be prepared to reassure them that you’re the right candidate. What are the next steps in the interview process? — This should always be your last question. This is simply for logistical purposes, and hopefully outlines whether there are more interviews, any homework for you (like writing or design tests), and what the timeline is like for hiring. What questions have you had good luck with using in job interviews?
{ "pile_set_name": "OpenWebText2" }
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <link rel="stylesheet" type="text/css" href="../styles/main.css" /> <script language="JavaScript" src="../javascript/main.js" type="text/javascript"> </script> <link rel="icon" type="image/png" href="/lib/axe/assets/img/docs-icon.png" /> <link href="http://fonts.googleapis.com/css?family=Droid+Sans" rel="stylesheet" type="text/css" /> <title></title> </head> <body class="PopupSearchResultsPage" onload="NDOnLoad()"> <script language="JavaScript" type="text/javascript"> //<![CDATA[ <!-- if (browserType) {document.write("<div class=" + browserType + ">");if (browserVer) {document.write("<div class=" + browserVer + ">"); }}// --> //]]> </script> <div id="Index"> <div class="SRStatus" id="Loading"> Loading... </div> <div class="SRResult" id="SR_list_perasp"> <div class="IEntry"> <a href="../files/lib/axe/classes/Utilities/list-asp.html#list.asp" target="_parent" class="ISymbol">list.asp</a> </div> </div> <div class="SRResult" id="SR_logger_perasp"> <div class="IEntry"> <a href="../files/lib/axe/classes/Utilities/logger-asp.html#logger.asp" target="_parent" class="ISymbol">logger.asp</a> </div> </div> <table border="0" cellspacing="0" cellpadding="0"></table> <div class="SRStatus" id="Searching"> Searching... </div> <div class="SRStatus" id="NoMatches"> No Matches </div><script type="text/javascript"> //<![CDATA[ <!-- document.getElementById("Loading").style.display="none"; document.getElementById("NoMatches").style.display="none"; var searchResults = new SearchResults("searchResults", "HTML"); searchResults.Search(); --> //]]> </script> </div><script language="JavaScript" type="text/javascript"> //<![CDATA[ <!-- if (browserType) {if (browserVer) {document.write("<\/div>"); }document.write("<\/div>");}// --> //]]> </script><script src="http://ajax.googleapis.com/ajax/libs/mootools/1.11/mootools-yui-compressed.js" type="text/javascript"> </script><script type="text/javascript"> //<![CDATA[ function ToggleMenu(c){}; window.addEvent("domready", function(){ $$(".CTitle").each(function(heading, i){if(i>0)new Element("a",{"href":"#MainTopic","class":"toTop"}).setHTML("top").injectBefore(heading.getFirst());});new SmoothScroll({transition:Fx.Transitions.Cubic.easeInOut,duration:1000}); var selected = $("MSelected"); var current = (selected) ? selected.getParent().getParent() : false; $$("div.MGroupContent").setStyle("display","block"); $$("div.MGroup").each(function(div){ var link = div.getElement("a"); var block = link.getNext(); var fx = new Fx.Slide(block); if(block != current) fx.hide(); link.addEvent("click", function(){fx.toggle();}); }); }); //]]> </script> </body> </html>
{ "pile_set_name": "Github" }
Brushing My Way! May 23, 2012 In 2010, Arm & Hammer Spinbrush launched My Way! Girls — a customizable toothbrush designed for girls 6-12. The team needed a bold strategy to differentiate as well as launch a product completely new to the category. Ask The Expert “There is a tremendous value in being part of the ANA’s strong network of brands… The Ask the Expert service validated the research that my team had already done [and] saved us time.” Michael Harvin, Senior Manager, Global Agency Relations at American Express
{ "pile_set_name": "Pile-CC" }
Introduction {#s1} ============ Chemothearpies are highly effective in treating most cancers, their use is limited by the potential for cardiotoxicity. All these drugs have a wide range of adverse effects the most serious one is cardiotoxicity, these severity of these effects are related to the chemotherapy regimens, patient populations and duration. The occurrence of clinical heart failure seems to be in the range of 1% to 5%, and asymptomatic decrease in left ventricular function is in the range of 5% to 20%. Toxicity can occur early (within 1 year) or late (particularly among children, where late cardiac abnormalities are detectable in two thirds of surviving patients). Many trials address the role of ACE inhibitors and beta-blockers, effective therapies for established LVSD, in preventing chemotherapy-induced cardiotoxicity. In this article we discuss the types of the cardiomyopathy, diagnosis, prognosis, prevention and managements. Chemotherapy induced cardiomyopathy {#s2} =================================== The survival rate of cancer patients has greatly increased over the last 20 years. However, to achieve this result, a considerable price has been paid in terms of the side effects associated with the intensive anticancer treatment. Cardiotoxicity may compromise the clinical effectiveness of chemotherapy, affecting the patient\'s survival and quality of life independently of the oncological prognosis. As a result of the increasing number of long-term cancer survivors and of the tendency to use higher doses of cytotoxics and combined treatments with synergistic cardiotoxic effects, the magnitude of this problem is growing. Accordingly the onset of cardiac dysfunction, even if it is asymptomatic, not only negatively affects cancer patients\' cardiac outcomes, but also seriously limits their therapeutic opportunities. There are 2 types of cardiac toxicities, type I which is more serious and result in permanent damage to the myocardium and type II which is usually reversible. Features and risk factors of both types summarised in [Table 1](#T1){ref-type="table"}. Anthracyclines, even after three decades, continue to play a prominent role in the treatment of a wide variety of both hematologic and solid tumors; it is now well established that anthracycline cardiotoxicity is a cumulative dose-related effect, suggesting that each administration constitutes additive or sequential damage. ###### Chemotherapy related cardiac dysfunction -------------------------------------------------------------------- TYPE I\ TYPE II\ e.g. Doxorubicin e.g. Trastuzumab --------------------------------- ---------------------------------- Cellular death\ Cellular dysfunction Damage starts with first\ administration Biopsy changes No typical biopsy changes Cumulative dose related Not cumulative dose related Permanent damage\ Predominately reversible\ (Myocyte death) (Myocyte dysfunction) Risk factors Risk factors   Prior/concurrent radiotherapy   Prior/concurrent anthracycline   Combination chemotherapy   Paclitaxel   Age   Age   Previous cardiac disease   Previous cardiac disease   Hypertension   Obesity (BMI \> 25) -------------------------------------------------------------------- As early as 1967, there were reports of heart failure in children treated with doxorubicin for leukemia; from that time concerns regarding chemotherapy starts. Aggressive and combination chemotherapy has achieved remission in most types of cancers. However, concerns for, or manifestations of, cardiac adverse events may result in discontinuation of or reluctance to use a particular agent at an effective dose. Cytostatic antibiotics of the anthracycline class have been clearly associated with cardiotoxicity. However, there are a number of other chemotherapy agents that cause cardiotoxicity and yet are not well recognized Cardiac events associated with chemotherapy vary in incidence and may occur acutely (during or shortly after treatment), sub-acutely (within days or weeks after completion of chemotherapy) or chronically (weeks to months after drug administration). They may also occur as late squeal, many years after the end of treatment. Cardiac events associated with chemotherapy may consist of mild blood pressure changes, thrombosis, Electrocardiographic (ECG) changes, arrhythmias, myocarditis, pericarditis, myocardial infarction, cardiomyopathy, cardiac failure (left ventricular failure), and congestive heart failure (CHF). The substantial limitations of using only changes in LVEF are compromised further by our knowledge that approximately half of all heart failure occurs in patients who maintain a normal LVEF; their overall cardiac outcomes are similar to those who exhibit a low LVEF \[[@R01]\]. Cardiotoxicity may depend on the dose administered during each course or on the total cumulative dose, or may be completely independent of the dose like Anthracycline-induced cardiotoxicity which has been recognized for more than 20 years. It has been described as 3 distinct types of cardiotoxicity. Acute or sub-acute injury is a rare form of cardiotoxicity that may occur immediately after a single dose or a course of anthracycline therapy, with clinical manifestations occurring within a week of treatment. These may be in the form of transient electrophysiological abnormalities, a pericarditis, myocarditis syndrome or acute left ventricular failure. The electrophysiological abnormalities may present as nonspecific ST and T wave changes, T wave flattening, decreased QRS voltage and prolongation of QT interval. Sinus tachycardia is the most common rhythm disturbance. ECG changes may be seen in 20 to 30% of the patients \[[@R02]\]. Arrhythmias, including ventricular, supraventricular and junctional tachycardia, are seen in 0.5 to 3% of patients with an overall incidence of 0.7% \[[@R02]\]. More serious arrhythmias, such as atrial flutter or atrial fibrillation, are rare. Sub-acute cardiotoxicity has resulted in acute failure of the left ventricle, pericarditis or a fatal pericarditis-myocarditis syndrome in some rare cases. The ECG changes or arrhythmias do not seem related to chronic cardiomyopathy. Early onset chronic progressive cardiotoxicity: anthracyclines can also induce early onset progressive chronic cardiotoxicity resulting in cardiomyopathy. This is a more common and clinically important type of cardiotoxicity \[[@R03]\].Chronic anthracycline-induced cardiomyopathy usually presents within a year of treatment. It may persist or progress even after discontinuation of anthracyclines therapy, and may evolve into a chronic dilated cardiomyopathy in adult patients and restrictive cardiomyopathy in pediatric patients \[[@R04]\]. Late onset chronic progressive anthracycline cardiotoxicity causes ventricular dysfunction \[[@R05]\], heart failure and arrhythmias \[[@R06]\] years to decades after chemotherapy has been completed. This suggests that patients who have received anthracyclines chemotherapy and survived their cancer may have undetected increases in morbidity and mortality due to cardiotoxicity. There may be a period of time, after completion of treatment, during which patients may experience no symptoms of left ventricular dysfunction or arrhythmia and cardiac function may appear normal. After the initial acute myocardial insult, there is a progressive decrease in ventricular function leading to late onset decompensation. An increased incidence of severe echocardiographic abnormalities has been seen with increased duration of follow-up. An 18% incidence of reduction in fractional shortening on resting echocardiogram was observed 4 to 10 years after completion of anthracycline therapy \[[@R06]\]. Cumulative doses of doxorubicin as low as 228 mg/m^2^ have shown to increase after-load or decrease contractility, or both, in 65% of patients with leukaemia up to 15 years after treatment with anthracyclines \[[@R07]\]. Late onset arrhythmia and sudden death has occurred more than 15 years after anthracycline treatment \[[@R08]\]. This could mean that more anthracyclines induced cardiotoxicity may appear in the future in patients who are presently asymptomatic. Patients may remain in a compensated state for many years until stressors such as acute viral infection \[[@R09]\] or cardiovascular stressors such as weight lifting, pregnancy and surgery \[[@R06]\] could possibly trigger a cardiac event. Pathogenesis {#s3} ============ The cause of anthracycline-induced cardiotoxicity is probably multi-factorial. Free radicalmediated myocyte damage is one of the most thoroughly studied mechanisms by which anthracyclines have been proposed to cause cardiotoxicity \[[@R10]\]. The myocardium is more susceptible to free radical damage than other tissues because it has comparatively less superoxide dismutase and catalase activity, and its major defense against free radical damage, glutathione peroxidase, is suppressed by doxorubicin. The superhydroxide free radicals accumulate and cause severe lipid peroxidation, leading to extensive destruction of the mitochondrial membranes, endoplasmic reticulum and nucleic acid. Circulating pro-inflammatory cytokines have also been implicated in anthracycline cardiotoxicity. Doxorubicin induces the release of histamine and tumour necrosis factor-α from macrophages and interleukin-2 from monocytes \[[@R11]\]. These cytokines have functional receptors on the myocardium and their release may result in dilated cardiomyopathy. Adrenergic dysfunction and down regulation of myocardial histamine and β-adrenergic receptors has also been proposed as a cause for an evolving and established anthracyclineinduced ventricular dysfunction. Risk factors for cardiotoxicity {#s4} =============================== Some of the risk factors relating to early and late (but not acute) cardiotoxicity have been reported. These include cumulative dose, rate of drug administration, mediastinal radiation, advanced age, younger age, female gender, pre-existing heart disease and hypertension. A multivariate analysis of these factors based on histological evidence of anthracycline-induced cardiac damage concluded that higher rates of administration and previous cardiac irradiation were independent risk factors. At a cumulative total dose of \< 400 mg/m^2^ body surface area, the incidence of CHF was found to be 0.14%. This increased to 7% at a dose of 550 mg/m^2^ and to 18% at a dose of 700 mg/m^2^ \[[@R04]\]. There is a formula to calculate the cardiac toxicity as follow Y= (X)^2^/a, Y is the likelihood of developing congestive heart failure, X equals to number of cycles of anthracycline-containg regimen administered, a equals to correction constant determined by cycle dose and the duration between the cycles, so if a patient receives 9 cycle of anthracycline in a dose of 50 mg/m^2^ every 21 days, so it will be Y= 81/16 X100, the Y will be equal to 5%, this is the risk of developing congestive heart failure \[[@R04]\]. Serial and post-therapy cardiac monitoring is necessary to reduce morbidity due to anthracycline-induced cardiotoxicity. Patients should be monitored for clinical signs of cardiomyopathy by physical examination, chest x-rays, ECG, echocardiogram, endomyocardial biopsy if feasible and radionuclide angiography before initiation of treatment and at periodic intervals during therapy. Physical examination alone may miss over 50% of cases of early and reversible chemotherapy-induced CHF \[[@R12]\]. Acute ECG changes and arrhythmias following doxorubicin therapy occur in 0 to 14% of patients. Serial measurements of LVEF and fractional shortening are the most common indices monitored to assess left ventricular systolic function and cardiotoxicity. This can be achieved by 2-dimensional, M-mode and color Doppler echocardiographic examination. Baseline LVEF estimation is recommended before the start of doxorubicin therapy. If LVEF is ≤ 30%, starting chemotherapy is not recommended. Patients with LVEF ≥ 30% but \<50% can receive doxorubicin, but measurements should be repeated before each dose. For patients with baseline LVEF ≥ 50%, evaluations should be repeated after a cumulative dose of 250 to 300 mg/m^2^ and thereafter at 450 mg/m^2^ if they have no risk factors. If patients have known cardiovascular disease, prior radiation treatment to the chest, abnormal ECG changes or concomitant cardiotoxic chemotherapy, LVEF measurement should be repeated at 400 mg/m^2^ instead of 450 mg/m^2^. It should be monitored with each dose thereafter. Doxorubicin therapy should be stopped if there is a ≥ 10% absolute drop in the ejection fraction associated with a decrease in LVEF to ≤ 50% in patients with baseline LVEF ≥ 50%, and to ≤ 30% in patients with baseline LVEF \< 50% but \> 30% \[[@R13]\]. Biomarkers such as B-type natriuretic peptide and troponins (I and T) are increasingly being used to stratify patients into higher and lower risk categories. This process is well established in the cardiology literature and recently has been reported in oncology patients. In fact, an elevated troponin during chemotherapy seems to correlate with increased risk for the development of cardiac toxicity \[[@R14]\]. Cardiac troponins as a biological marker for myocardial damage can be used for monitoring in patients received anthracyclines. troponin I (TnI) soon after high-dose chemotherapy (HDC) is a strong predictor of left ventricular dysfunction and poor cardiac outcome, particularly in patients showing a persistent TnI increase \[[@R15]\]. Some cardio-protective agents is used which associated with a decrease in cardiotoxicity and facilitates the use of higher cumulative doses of anthracyclines but it is beyond our article. Also Cardiotoxicity of anthracyclines can be minimized by using analogues that may be less cardiotoxic. Compounds such as epirubicin and idarubicin exhibited decreased cardiotoxicity in preclinical trials. The use of anti-oxidant agents or iron chelators finds their way in the prevention for cardiac toxicities. Probucol, vitamin E (as anti-oxidants) and carvedilol have shown promise in animal studies. Angiotensin-converting enzyme (ACE) inhibitors (ACEIs) have been shown to slow the progression of left ventricular dysfunction in several different clinical settings, including anthracycline-induced cardiomyopathy \[[@R16]\]. Furthermore, data referring to experimental models suggest that the cardiac renin-angiotensin system (RAS) plays an important role in the development of anthracycline-induced cardiomyopathy and that treatment with ACEIs protects against chemotherapy-induced cardiotoxicity. In a recent study showed that \[[@R17]\] early treatment with enalapril in patients with evidence of myocardial cell injury (TnI increase) after HDC seems to prevent the development of cardiotoxicity and the occurrence of associated adverse clinical events. The benefits of and clinical indications for ACEIs have been clearly defined in many cardiovascular conditions such as chronic heart failure, asymptomatic left ventricular dysfunction, acute myocardial infarction, and hypertension and in patients at increased risk of cardiovascular events. In cardiomyopathy, because of anthracycline induced cardiotoxicity, the use of enalapril has been proved to be beneficial in prolonging survival and in preventing further deterioration of cardiac function. At the end, frequent monitoring and follow up for patients took anthracyclines with troponins and echocardiograms with early administration of carvedilol, enalapril and probably with anti-oxidants like Probucol and vitamin E will benefit. Conflict of Interest {#s5} ==================== The authors declare no conflict of interests related to this article.
{ "pile_set_name": "PubMed Central" }