text
stringlengths
14
5.77M
meta
dict
__index_level_0__
int64
0
9.97k
package org.neo4j.visualization; import java.util.Collections; import java.util.HashMap; import java.util.Map; public enum PropertyType { /** * Represents a String property. */ STRING( null, "String", String.class ) { @Override <T> T apply( ValueFormatter<T> formatter, Object value ) { return formatter.formatString( ( String ) value ); } }, /** * Represents an integer property. */ INT( null, "int", Integer.class, int.class ), /** * Represents a long property. */ LONG( null, "long", Long.class, long.class ), /** * Represents a boolean property. */ BOOLEAN( null, "boolean", Boolean.class, boolean.class ), /** * Represents a short property. */ SHORT( null, "short", Short.class, short.class ), /** * Represents a character property. */ CHAR( null, "char", Character.class, char.class ), /** * Represents a byte property. */ BYTE( null, "byte", Byte.class, byte.class ), /** * Represents a float property. */ FLOAT( null, "float", Float.class, float.class ), /** * Represents a double property. */ DOUBLE( null, "double", Double.class, double.class ), // Array types /** * Represents an array of Strings. */ STRING_ARRAY( String.class, "String[]", String[].class ) { @Override <T> T apply( ValueFormatter<T> formatter, Object value ) { return formatter.formatStringArray( ( String[] ) value ); } }, /** * Represents an array of integers. */ INT_ARRAY( Integer.class, "int[]", Integer[].class, int[].class ), /** * Represents an array of longs. */ LONG_ARRAY( Long.class, "long[]", Long[].class, long[].class ), /** * Represents an array of booleans. */ BOOLEAN_ARRAY( Boolean.class, "boolean[]", Boolean[].class, boolean[].class ), /** * Represents an array of shorts. */ SHORT_ARRAY( Short.class, "short[]", Short[].class, short[].class ), /** * Represents an array of characters. */ CHAR_ARRAY( Character.class, "char[]", Character[].class, char[].class ), /** * Represents an array of bytes. */ BYTE_ARRAY( Byte.class, "byte[]", Byte[].class, byte[].class ), /** * Represents an array of floats. */ FLOAT_ARRAY( Float.class, "float[]", Float[].class, float[].class ), /** * Represents an array of doubles. */ DOUBLE_ARRAY( Double.class, "double[]", Double[].class, double[].class ), /** * Represents an undefined type. */ UNDEFINED( null, "Object" ) { @Override <T> T apply( ValueFormatter<T> formatter, Object value ) { return formatter.formatUnknownObject( value ); } }; /** * Get the {@link PropertyType} representing the type of a value. * @param propertyValue * the value to get the type of. * @return the type of the given value. */ public static PropertyType getTypeOf( Object propertyValue ) { return getTypeFor( propertyValue.getClass() ); } static PropertyType getTypeFor( Class<? extends Object> type ) { PropertyType result = typeMap.get( type ); if ( result != null ) { return result; } else { return UNDEFINED; } } <T> T apply( ValueFormatter<T> formatter, Object value ) { if ( scalarType != null ) { PropertyType type = getTypeFor( scalarType ); if ( value instanceof Object[] ) { return formatter.formatBoxedPrimitiveArray( type, ( Object[] ) value ); } else { return formatter.formatPrimitiveArray( type, value ); } } else { return formatter.formatBoxedPrimitive( this, value ); } } /** * Apply a formatter to a value and return the result. * @param <T> * the type of the result. * @param formatter * the formatter to apply to the value. * @param propertyValue * the value to apply the formatter to. * @return the value as produced by the formatter. */ public static <T> T format( ValueFormatter<T> formatter, Object propertyValue ) { return getTypeOf( propertyValue ).apply( formatter, propertyValue ); } /** * Format a given value to a String by applying a string formatter. * @see #format(ValueFormatter, Object) * @param propertyValue * the value to turn into a string. * @return the given value formatted as a string. */ public static String format( Object propertyValue ) { return format( ValueFormatter.DEFAULT_STRING_FORMATTER, propertyValue ); } private final Class<?>[] types; private final Class<?> scalarType; /** * Specifies the name of the type. */ public final String typeName; private PropertyType( Class<?> scalarType, String descriptor, Class<?>... types ) { this.typeName = descriptor; this.types = types; this.scalarType = scalarType; } private static final Map<Class<?>, PropertyType> typeMap; static { Map<Class<?>, PropertyType> types = new HashMap<Class<?>, PropertyType>(); for ( PropertyType type : values() ) { for ( Class<?> cls : type.types ) { types.put( cls, type ); } } typeMap = Collections.unmodifiableMap( types ); } }
{ "redpajama_set_name": "RedPajamaGithub" }
4,472
Saturday Night! – The Album est le deuxième album studio de Schoolly D, sorti en 1986. L'album s'est classé au Top R&B/Hip-Hop Albums Liste des titres Notes et références Album de Schoolly D Album musical sorti en 1986 Album publié par Jive Records
{ "redpajama_set_name": "RedPajamaWikipedia" }
1,432
Каба́нов Павло́ Олексі́йович ( , Санкт-Петербург — , Москва) — радянський військовий діяч, Герой соціалістичної праці (1943), начальник залізничних військ СРСР (1945–1968), генерал-полковник технічних військ. Біографія Народився в Санкт-Петербурзі в сім'ї робітника каменетесної майстерні. Закінчив початкову трирічну школу, працював разом з батьком, потім учнем бляхаря. У 1916 році призваний до російської армії, служив у броньовий дивізіоні в Петрограді. Учасник бурхливих революційних подій у Петрограді з лютого по жовтень 1917 року і Жовтневої революції. У 1918 році одним з перших вступив у Червону Армію, і в тому ж році — в РКП (б). Служив у Першій окремій залізничний роті. У роки Громадянської війни в складі роти брав участь у відновленні залізничних мостів і шляхів в Прибалтиці і Білорусі, брав участь у забезпеченні бойових дій на Західному фронті проти польської армії. Одночасно був парторгом роти і помічником комісара залізничного дивізіону. Після Громадянської війни закінчив курси при Училищі військових сполучень РСЧА. Командував взводом залізничних військ, будував залізничну лінію Орша—Лепель в Білорусі, потім — Чернігів-Овруч в Україні з мостами через Дніпро і Прип'ять. У 1930 році направлений на навчання на факультет військових сполучень при Ленінградському інституті інженерів залізничного транспорту, в 1932 році переведений у Військово-транспортну академію РСЧА, яку успішно закінчив у 1936 році. Призначений помічником командира залізничного полку з технічної частини, з 1937 року — командир залізничного полку. З 1939 року командував 5-ю залізничною бригадою Особливого корпусу залізничних військ на Далекому Сході. Бригада побудувала залізничну лінію Мензовка-Варфоломеєвка через хребет Сіхоте-Алінь. У березні 1941 року бригада була перекинута на західний кордон в район Львів—Тернопіль—Перемишль для будівництва залізничних шляхів в прикордонній смузі. Там 22 червня 1941 року бригада і її командир полковник Павло Кабанов вступили у Німецько-радянську війну. У перші місяці війни бригада виконувала завдання з відновлення зруйнованих ворожою авіацією об'єктів залізничного транспорту, забезпечення безперебійної подачі фронту ешелонів з озброєнням, паливом і боєприпасами, знищенням і евакуацією залізничного майна при відході Червоної Армії. У січні 1942 року призначений начальником Управління військово-відновних і загороджувальних робіт № 3 (УВВР-3), яке відповідало за всі роботи з будівництва та відновлення залізничних магістралей в смузі Південного і Південно-Західного фронтів. Особливо важкими видалися літо і осінь 1942 року, коли саме на цих фронтах противник перейшов в генеральний наступ. До складу цього потужного формування входило 4 бригади і десятки частин залізничних військ. Кабанов особисто відповідав за залізничні перевезення вантажів для військ, які обороняли Сталінград. Коли в районі міста були знищені всі мости через Волгу, запропонував спорудити наплавний залізничний міст, зібраний зі старих барж. Постановою Раднаркому СРСР від 4 серпня 1942 року полковнику П. О. Кабанову присвоєно військове звання «генерал-майор технічних військ». У ході зимового наступу радянських військ на початку 1943 року бійці УВВР-3 відновили за 18 діб залізничний міст через Дон у міста Лиски і 2 мости у міста Липки. Тим самим була успішно вирішена задача подачі військових вантажів для наступаючих військ. Навесні 1943 року під командуванням Кабанова за 2 місяці побудована нова залізнична магістраль Старий Оскол—Сараєвка, яка прийняла на себе все постачання військ Воронезького фронту в ході Курської битви. При підготовці битви і під час неї ця дорога, як і інші, піддавалася щоденним бомбардуванням ворожої авіацією. У найважчих умовах бійці відновлювали всі пошкодження. Перервати рух ворогові не вдалося. Указом Президії Верховної Ради СРСР від 5 листопада 1943 року «за особливі заслуги у забезпеченні перевезень для фронту і народного господарства і видатні досягнення у відновленні залізничного господарства у важких умовах воєнного часу» генерал-майору технічних військ Павлу Олексійовичу Кабанову присвоєно звання Героя Соціалістичної Праці з врученням ордена Леніна і Золотої медалі «Серп і Молот». У розпал битви за Дніпро на початку листопада Кабанов отримав наказ спорудити залізничний міст через Дніпро для забезпечення військ фронту. Зумівши правильно організувати використання наявних у його розпорядженні сил і засобів, активно використовуючи допомогу місцевого населення та ініціативу підлеглих, наказ був виконаний наказ за 14 діб. У цей термін був споруджений низьководний Дарницький залізничний міст у Києві протяжністю понад 1 кілометр. Фактично на цьому мосту «висіло» все постачання 1-го українського фронту при проведенні Київської оборонної та Житомирсько-Бердичівської наступальних операцій радянських військ. А в лютому 1944 року було завершено спорудження висоководного Дарницького залізничного моста, завдяки якому ешелони йшли до військ фронту і в період бурхливого дніпровського паводку. Продовжуючи шлях на захід, частини УВВР-3 забезпечували успішний наступ в ході битви за Правобережну Україну, Львівсько-Сандомирської, Вісло-Одерської і Берлінській операцій. За 3 роки війни УВВР-3 під командуванням генерал-лейтенанта технічних військ П. О. Кабанова (це звання присвоєно 13 серпня 1944 року) відновили понад 8500 кілометрів шляхів, 114 великих і середніх мостів, тисячі споруд та об'єктів залізничного господарства. На початку квітня 1945 року генерал Кабанов призначений начальником Головного Управління військово-відновлювальних робіт Народного комісаріату шляхів сполучення СРСР. Одночасно він був призначений начальником Залізничних військ Міністерства оборони СРСР. Цей високий пост він займав до виходу у відставку в 1968 році. Кілька років одночасно був заступником Міністра транспортного будівництва СРСР. У післявоєнні роки під його командуванням воїни-залізничники побудували залізничні магістралі Перм—Кізел, Усть-Каменогорськ—Зиряновська, Абакан—Тайшет, а також Західно-Карельську і Трансмонгольскую магістралі. З 1968 року генерал Кабанов у відставці. До самої смерті був головою Ради ветеранів Залізничних військ. Автор кількох робіт по тактиці та історії залізничних військ. Жив у Москві. Помер 27 лютого 1987 року. Похований на Кунцевському кладовищі Москви (ділянка 9). Джерела Біографія П. О. Кабанова на сайті «Мой фронт» П. О. Кабанов на сайті «Железнодорожные войска» Учасники Громадянської війни в Росії Учасники польсько-радянської війни Учасники Другої світової війни
{ "redpajama_set_name": "RedPajamaWikipedia" }
3,038
Climate Leaders What do Algae and Beer have to do with Climate Change? 01.05.20 By Climate Council When a few mates started a brewery in Sydney's Inner West back in 2012, they had no idea that eight years later it would become home to a 400 litre micro-algae bioreactor that glows fluro green and sucks carbon dioxide from the air. "We sometimes pinch ourselves and think 'wow, how did we end up here?'" said Richard Adamson, one of the three co-owners of the Young Henrys Brewery in Newtown, Sydney. Brewing beer isn't the first culprit that comes to mind when you think of carbon dioxide emitters. And while the process isn't a huge contributor to Australia's overall greenhouse gas emissions, the team at Young Henrys are showing what true local legends look like when it comes to taking climate action. "We just took the position that as leaders in our industry, we've got to show that leadership. For us, it was a matter of thinking 'well, we can't wait for someone else to come up with the solutions.'" The Young Henrys co-owners with the 400L microalgae bioreactor. From left: Richard Adamson, Dan Hampton and Oscar McMahon. Image credit: supplied/Yeah Rad Climate conscious beginnings Richard Adamson, Oscar McMahon and Dan Hampton have been conscious of their environmental impact ever since they launched the brewery. They started with returnable bottles to reduce waste, then settled on cans "as the best packaging option due to their high recyclability factor and low weight for transport," said Adamson. From there, they worked with a community solar farm and had solar panels installed on their roof, which are owned by the community. "It ends up costing us less, and the community gets a return from the investment, as well as reducing our greenhouse gas emissions on that front," said Adamson. But it wasn't until they met the scientists from the University of Technology Sydney Climate Change Cluster (C3), that their sights became set on algae. Is algae the solution? Algae is a powerful climate solution as it is extremely effective at converting carbon dioxide into oxygen. Carbon dioxide is greenhouse gas, and too much of it is being released into the atmosphere, which is driving climate change. Wondering what is climate change, and what can we do about it? Click here. Interestingly for the team at Young Henrys, yeast — which is the primary ingredient in beer — has an almost inverse relationship with algae, which is what provided the initial sparks of inspiration in terms of reducing their brewery's carbon emissions. Yeast converts sugar into alcohol and carbon dioxide, while algae take carbon dioxide and sunlight, and produce sugar and oxygen. "So we thought, if we can work these two things together, we could have a net — or maybe even net-negative — impact, in terms of actually reducing CO2 [carbon dioxide] emissions," said Adamson. The inverse relationship between yeast — one of the main ingredients in beer — and algae is what forms the basis of the algae experiment. In a partnership with the C3 group, the team at Young Henrys has set up two 400 litre bioreactors in the brewery, to apply the reverse relationship logic and counteract the emissions of the brewing process. Each bioreactor takes up about a metre squared of space, yet produces the same amount of oxygen as one hectare of Australian bush, according to the C3 team. So the small reactor punches well above its size. The life of the algae grown in the Young Henrys bioreactors doesn't end there – the C3 group take it back to the lab and use it in a whole bunch of other applications, including the production of pharmaceuticals. "In terms of numbers, the emissions reductions aren't hugely significant but it's really going to be about the application of the algae and how we can scale it up in the urban environment. That's where we'll really start to see the impact." Richard Adamson says 'there are huge economic opportunities in reducing emissions' in Australia. Image credit: supplied/Yeah Rad. Like many other Australian companies and entrepreneurs, the Young Henrys team have realised that there are great economic opportunities in developing, implementing and marketing climate solutions. They are looking into ways to package the project up to make it commercially viable for other breweries — or any producers of fugitive carbon dioxide emissions in general — to reduce their emissions. There's also the potential for a commercial avenue for the algae after it's grown, but this is all a few years down the track. Across the board, the Young Henrys Team are taking action on climate change into their own hands, despite a lack of action from some political bodies. "I think in Australia there's a bit of fear around the economic impacts," Adamson explained. "But with those challenges come massive opportunities. We're pretty well placed as a nation to take advantage of a low carbon economy, especially with our access to solar. "There are huge economic opportunities in reducing emissions." Would you like to learn more about climate solutions? Sign up to our mailing list, and we'll keep you in the loop. By Climate Council / 01 May 2020 More Climate Leaders More Climate Science More The Facts Cleaner energy: a revolution that ordinary people can drive State of the Climate Report Solar Panels in Melbourne's Western Suburbs Have Doubled NEW REPORT | From Paris to Glasgow: a World on the Move Renewable Energy Cleaner energy: a revolution that ordinary people can drive The Facts State of the Climate Report Cities/Towns Solar Panels in Melbourne's Western Suburbs Have Doubled
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
7,580
El Gerry Weber Open 2016 es un torneo de tenis jugado en césped al aire libre. Esta fue la 24ª edición del Gerry Weber Open, y forma parte de la gira mundial ATP World Tour 2016 en la categoría ATP 500 series. Se llevó a cabo en Halle, Alemania, del 13 de junio al 19 de junio de 2016. Cabezas de serie Individual Ranking del 6 de junio de 2016 Dobles Campeones Individual masculino Florian Mayer venció a Alexander Zverev por 6-2, 5-7, 6-3 Dobles masculino Raven Klaasen / Rajeev Ram vencieron a Lukasz Kubot / Alexander Peya por 7-6(5), 6-2 Véase también Torneos ATP en 2016 Torneo de Halle Referencias Enlaces externos Web oficial Torneo de Halle 2016
{ "redpajama_set_name": "RedPajamaWikipedia" }
4,656
layout: post title: "Let team decide: hire or fire" subtitle: "One more decision your team should take over" date: 2013-06-17 author: Lena Barinova id: 181 categories: - Know-how --- In our company we have recommended questionnaire to gather feedback about new team member at the end of his/her probationary period. It's an usual form with very smart and useful questions such as > How well does employee possess job-related knowledge and skills? > > How well does he /she work as part of a team? > > How well does the employee finish what he is committed to? and so on. Overall it is only 10 such questions and you may answer either putting a number or adding a more comprehensive comment. Recently I have tried something different. Instead of sending this form and asking guys to fill in, I've sent a short email to team members with only 2 questions: > Do you want to continue working with John in one team? > > Why? The response rate was exactly as I expected: 100% of team members answered my questions (with original questionnaire response rate is approximately 40% after the first call and 60% after the reminder). Another positive aspect of this is that the decision was made by team, not the manager. Manager in this case is only a facilitator. One thing you need to be ready for - properly react to answers. If you ask straightforward question and get "No" - be ready to act straightforward as well: find another team for a guy or fire. Do you still have probationary periods? How do you handle them?
{ "redpajama_set_name": "RedPajamaGithub" }
4,617
Follow Making it to the Makeover! What Does My OTTB Already Know? To contact Lindsay or for more information about training services including training, lessons and sales, please click here. Or follow us on Facebook!
{ "redpajama_set_name": "RedPajamaC4" }
9,944
Brincos são peças de joalheria ou bijuteria que servem para adornar as orelhas. Podem ser usados tanto por mulheres como por homens. Os brincos são uma espécie simples de piercing, atravessando o lóbulo ou qualquer outra parte externa das orelhas, ou podem ser presos externamente, por uma espécie de clipe. Os brincos podem ser feitos de inúmeros materiais, como metal, plástico, vidro, pedras preciosas ou contas. O tamanho é geralmente limitado pela capacidade física do lóbulo em levar o brinco sem sofrimento físico. Os brincos são utilizados em todo o mundo, nas mais variadas culturas, e é comum colocá-los nas meninas logo após seu nascimento. Brincos e os homens Embora mais popular o uso de brincos entre as mulheres, tem-se tornado popular o seu uso entre os homens também. Nos anos 70, o uso de brincos entre os homens, voltou pelo underground por conta do movimento Punk Na década de 80, o cantor George Michael popularizou o uso do brinco em apenas uma orelha. O uso de brincos entre os homens era comum na Pérsia e na Grécia antiga, como pode ser observado nas ruínas de pinturas e estátuas da Antiguidade. Piratas também são frequentemente associados ao uso de brincos. Os índios também usam o brinco como adorno. Não podemos esquecer que antes nos tempos bíblicos as mulheres tinham brincos que eram usados nos narizes, por parte de Israel. Isso ocorreu devido a cultura da época que era necessário para dar importância e valores à mulher daquela época. Ver também Moça com brinco de pérola, no Brasil, ou Rapariga com brinco de pérola, em Portugal (em holandês: Het meisje met de parel) – uma pintura do século XVI, de Johannes Vermeer. Girl with a Pearl Earring – um romance de Tracy Chevalier, inspirada na pintura. Moça com brinco de pérola – um filme de 2003, baseado no romance. Leitura de apoio van Cutsem, Anne, A World of Earrings: Africa, Asia, America, Skira, 2001. ISBN 88-8118-973-9 Holmes, Anita, Pierced and Pretty: The Complete Guide to Ear Piercing, Pierced Earrings, and How to Create Your Own, William Morrow and Co., 1988. ISBN 0-688-03820-4 Jolly, Penny Howell, "Marked Difference: Earrings and 'The Other' in Fifteenth-Century Flemish Artwork," in Encountering Medieval Textiles and Dress: Objects, Texts, Images, Palgrave Macmillan, 2002, pp. 195–208. ISBN 0-312-29377-1 Mascetti, Daniela and Triossi, Amanda, Earrings: From Antiquity to the Present, Thames and Hudson, 1999. ISBN 0-500-28161-0 McNab, Nan, Body Bizarre Body Beautiful, Fireside, 2001. ISBN 0-7432-1304-1 Mercury, Maureen and Haworth, Steve, Pagan Fleshworks: The Alchemy of Body Modification, Park Street Press, 2000. ISBN 0-89281-809-3 Steinbach, Ronald D., The Fashionable Ear: A History of Ear Piercing Trends for Men and Women, Vantage Press, 1995. ISBN 0-533-11237-0 Vale, V., Modern Primitives, RE/Search, 1989. ISBN 0-9650469-3-1 Joalharia Acessórios de moda Orelha
{ "redpajama_set_name": "RedPajamaWikipedia" }
4,271
Q: Find file name not present in a directory I have list if files names(not paths) in a List<string>. I have to find a List<string> of files that are NOT present in a directory. Right now I am iterating the files one by one and checking each one of them to all files in the folder. Is there any LINQ way of achiving the same thing ? A: You can use Enumerable.Except like: List<string> compareList = new List<string>(); //.... items in the list DirectoryInfo di = new DirectoryInfo("C:\\"); var fileArray = di.GetFiles().Select(r => r.Name).ToArray(); var filesNotPresent = compareList.Except(fileArray); A: Depending on how many names you have in your list (and how many files are in the destination), you may find that the iterative approach is still the most efficient (but with some LINQ enhancements): var missingFiles = names.Where(x => !File.Exists(x));
{ "redpajama_set_name": "RedPajamaStackExchange" }
1,803
En linguistique, le terme prosodie (du latin prosodia, à son tour du grec ancien prosōidía « chant pour accompagner la lyre ; variation de hauteur de la voix »), tel qu'il est entendu en français, dénomme la branche de la phonétique et de la phonologie qui étudie ce qu'on appelle les « traits prosodiques » de la langue, nommés aussi « traits suprasegmentaux ». Ce sont principalement l'accent, le ton, l'intonation, la jointure, la pause, le rythme, le tempo et le débit. Le terme français a plusieurs correspondants en anglais. Prosody désigne l'ensemble des traits prosodiques, et la discipline qui les étudie est nommée par plusieurs termes : prosodics, suprasegmental phonetics (« phonétique suprasegmentale ») ou prosodic phonology (« phonologie prosodique »). Avant d'être adopté par la phonétique et la phonologie, « prosodie » était un terme utilisé dans la métrique (voir l'article Prosodie). Il a acquis dès l'Antiquité les sens « prononciation régulière des mots, qui respecte l'accent et la quantité (durée) des sons » et « organisation vocalique et consonantique d'un texte ». Le premier de ces sens a constitué la base de la métrique, la prosodie devenant l'ensemble des règles de versification qui concernent la quantité des voyelles, les faits accentuels et mélodiques. Le domaine de recherche de la prosodie est vaste et hétérogène, sa littérature se caractérisant par une grande diversité d'approches divergeant quant à ce qu'on peut prendre en compte en tant que traits prosodiques, à leur définition et à l'établissement de ce qui leur est commun. Généralités sur les traits prosodiques On considère généralement comme des traits prosodiques des phénomènes phoniques ayant pour caractéristique commune de se manifester simultanément avec les segments de la chaîne parlée, qui sont leurs supports : sons (phones du point de vue de la phonétique, phonèmes du point de vue de la phonologie) et segments qui en sont formés (syllabes, affixes, mots, syntagmes, propositions, phrases complexes, groupes de phrases correspondant aux paragraphes, discours). Bien que certains traits prosodiques (l'accent, le ton) se manifestent sur le son, ils ne peuvent être saisis sur le son isolé, mais seulement sur un segment plus grand, par rapport à leur manifestation sur d'autres sons. C'est pourquoi on dit que ces traits, ainsi que tous les autres, affectent des segments plus grands qu'un son. Les traits prosodiques se manifestant sur le son sont appelés intensifs, et ceux qui se manifestent sur des segments plus grands – extensifs, par exemple l'intonation. Certains traits ont un statut ambigu ou controversé. Ainsi, la quantité (durée) du son est abordée comme un trait d'entité segmentale, mais elle apparaît aussi en tant que l'un des traits prosodiques. Bien que ce soit une composante de l'accent, certains auteurs prennent en compte séparément l'intensité du son. Une autre composante de l'accent, la hauteur du son, est elle aussi considérée à part par certains auteurs. D'autres traitent la hauteur sans la dissocier du ton, qu'ils opposent à l'accent comme une alternative de celui-ci dans les langues dites à tons, où l'accent n'a qu'un rôle marginal selon eux. De l'avis de certains auteurs, on peut considérer comme prosodiques des traits telles la nasalité, dans les langues où une consonne nasale nasalise la voyelle qui la précède ou qui la suit, ou bien l'ouverture des voyelles, dans les langues où l'harmonie vocalique a un rôle important. Le ton, le rythme ou les pauses peuvent être des éléments appelés paralinguistiques, lorsqu'ils sont caractéristiques pour des façons de parler personnelles. Le timbre des sons peut aussi être un tel élément, que certains auteurs mentionnent parmi les traits prosodiques. Les traits prosodiques peuvent être abordés du point de vue phonétique (leurs caractéristiques acoustiques, par exemple), mais aussi du point de vue phonologique, dans la mesure où ces traits aussi remplissent la fonction de différencier des sens. Par analogie avec les phonèmes en tant qu'objets d'étude de la phonologie, les traits prosodiques sont appelées « prosodèmes » dans cette perspective, par certains auteurs. Les traits prosodiques sont liés non seulement aux unités segmentales, mais aussi entre eux. Ainsi, l'intonation est liée à la hauteur des sons, étant donnée par la variation de celle-ci d'un son à un autre. D'un autre côté, l'intonation est liée à la pause et à l'accent. Devant une pause qui sépare deux segments constituant des unités syntaxiques, l'intonation est à un certain niveau de hauteur. Par exemple, dans la phrase Il n'est pas parti, parce qu'il avait peur, l'intonation atteint sa hauteur maximale sur le i de parti, voyelle accentuée, le mot étant suivi d'une pause. Traits prosodiques L'accent Articles détaillés : Accent tonique, Accent de hauteur L'accent est un trait prosodique intensif, par lequel une syllabe est mise en relief par rapport à d'autres syllabes. C'est valable tout d'abord pour un mot polysyllabique, mais le mot à accent peut être mis ainsi en relief par rapport aux autres mots de son syntagme, un syntagme dans une proposition ou une proposition dans une phrase complexe. Les composantes principales de l'accent sont l'énergie (l'intensité) du son accentué et son niveau de hauteur (appelé aussi ton), ayant pour composante secondaire la quantité (la durée) de ce son. Selon le rôle fonctionnel plus ou moins important de l'une ou de l'autre des composantes principales de l'accent, on distingue plusieurs types de langues : Dans les langues à accent d'énergie (ou d'intensité ou dynamique ou expiratoire), que certains auteurs appellent « accent tonique », l'intensité du son accentué à un rôle primordial par rapport à sa hauteur. Le français entre dans cette catégorie. Dans les langues à accent de hauteur ou musical, la composante de hauteur a un certain rôle fonctionnel, à côté de l'intensité. De telles langues sont celles du diasystème slave du centre-sud ou le suédois, par exemple. Dans les langues à tons, la composante de hauteur a un rôle primordial par rapport à l'intensité. C'est le propre de langues comme le chinois ou le vietnamien. Certains linguistes ne distinguent que deux catégories, opposant d'un côté langues à accent, dans lesquelles ils incluent celles à accent d'énergie et celles à accent de hauteur, et, d'un autre côté, langues à tons. Le ton Le ton est un trait prosodique intensif qui consiste en le niveau de hauteur du son ou en sa variation, limités à la syllabe d'un mot. Dans les langues à tons (le chinois, par exemple), les tons ont une fonction phonologique prépondérante, étant nommés « tonèmes » de ce point de vue. Le ton est aussi présent, étant d'habitude appelé « hauteur », en tant que composante de l'accent appelé d'énergie ou d'intensité. Dans certaines langues, ce n'est qu'un corollaire de l'intensité, sans valeur fonctionnelle (par exemple en français), dans d'autres il a une fonction phonologique limitée par rapport à l'intensité, par exemple en norvégien. L'intonation L'intonation est un trait prosodique extensif qui consiste en la variation de la hauteur des sons dans un segment de la chaîne parlée, donnant à ce segment une certaine ligne mélodique. Elle peut avoir une fonction grammaticale, par exemple dans la distinction des phrases selon le but de la communication (déclaratif, interrogatif ou exclamatif) et/ou dans l'expression de l'état affectif du locuteur, de son attitude ou de ses intentions de communication. Selon sa direction, l'intonation peut être principalement ascendante, descendante ou uniforme. La jointure La jointure ou joncture est une limite entre deux segments de la chaîne parlée : syllabes, morphèmes, mots, syntagmes, propositions ou phrases. Celle de l'intérieur d'un mot est appelée jointure interne et celle entre deux mots – jointure externe. D'un autre point de vue, l'un des types de ce trait est la jointure effective. La pause aussi en fait partie. L'une des jointures effectives se réalise sous forme de coup de glotte, par exemple en anglais, entre une syllabe terminée en voyelle avant une syllabe à initiale vocalique, ex. co-operate « coopérer ». Parfois, la jointure interne entre deux syllabes ou la jointure externe est marquée par un groupe de consonnes qui ne peuvent pas se suivre à l'intérieur d'une syllabe, par exemple, en roumain, bp, mh, nm, sz, etc. Il y a, par exemple, jointure externe effective dans le syntagme merg repede « je vais vite », à cause du voisinage des consonnes rgr. Il y a également jointure externe entre un mot avec la même consonne finale que la consonne initiale du mot suivant, groupe qui à l'intérieur d'un mot se prononcerait comme une seule consonne, ex. Good day! [ɡʊddeɪ] « Bonjour ! » vs sudden [sʌdən] « brusque ». Le type de jointure opposé au précédent est la jointure virtuelle. Externe, c'est une jointure entre mots qui forment un syntagme qu'on peut confondre dans la parole avec un autre syntagme, ex. l'essence [l#esãs] vs les sens [le#sãs], a name [ə#neɪm] « un nom » vs an aim [ən#eɪm] « une cible ». La jointure virtuelle peut être interne aussi, par exemple entre un morphème racine qui existe en tant que mot également, et un morphème affixe, lorsqu'on sait que ce sont des morphèmes à part, ex. farouchement [faruʃ#mã] (< farouche + suffixe formateur d'adverbes). La pause La pause est une interruption dans le flux articulé de la parole, étant une partie intégrante de ce flux. Il y a plusieurs types de pauses, établis de plusieurs points de vue. De l'un, la pause peut être voulue ou non voulue par le locuteur. L'une des pauses non voulues est celle provoquée par l'inspiration en tant que phase de la respiration. Une autre de ce genre est la pause d'hésitation, quand le locuteur ne sait pas quoi dire ou comment dire ce qu'il veut exprimer. Certains auteurs distinguent pause d'hésitation silencieuse et pause d'hésitation remplie par une séquence sonore non articulée, telle euh. La pause voulue peut avoir une motivation pragmatique, par exemple celle utilisée par un orateur pour faire de l'effet sur son auditoire. La pause a aussi des fonctions linguistiques, délimitant des segments de diverses longueurs : syntagmes, propositions, phrases, groupes de phrases. La pause est d'autant plus longue, qu'elle délimite des segments plus longs, la plus longue étant celle entre groupes de phrases. Ces pauses sont en général rendues à l'écrit par la ponctuation, et les groupes de phrases constituant une unité de sens – par des alinéas. Les fonctions syntaxiques de la pause sont importantes. Dans le cadre de la phrase complexe, elle délimite les propositions appelées isolées, certains types de propositions l'étant facultativement, d'autres – obligatoirement. Exemples : la proposition incise : Qu'allons-nous faire, demanda-t-il ? ; certaines propositions subordonnées, surtout antéposées à la principale : Comme nous n'avions pas beaucoup de temps, nous avons dû prendre un taxi ; la proposition relative ou circonstantielle intercalée dans la principale : La réforme des diplômes, qui a été préparée pendant plusieurs années […], entrera en vigueur l'an prochain. Dans la proposition, l'apposition explicative est toujours délimitée par des pauses : M. Guérin, directeur de la cave coopérative, nous a fait visiter la cave. La pause peut distinguer la fonction syntaxique d'un élément d'une construction, ex. Problema dificilă a rămas nerezolvată « Le problème difficile est resté sans solution » (épithète déterminative d'identification/qualification) vs Problema, dificilă, a rămas nerezolvată « Le problème, difficile, est resté sans solution » (épithète détachée, explicative, à nuance circonstantielle de cause). En roumain, la pause peut parfois tenir lieu de verbe de la proposition : Eu, acasă « Moi, je suis rentré (chez moi) » (littéralement « Moi, chez moi »). Le russe est une langue dans laquelle la pause correspond à la copule « être » au présent de l'indicatif, d'ordinaire obligatoirement omise : Женщина красавица (Jenchtchina krassavitsa) « La femme est une beauté ». La pause distingue cette proposition du mot composé женщина-красавица « belle femme ». L'absence, respectivement la présence de la pause peut différencier les sens d'un segment. Exemples : : Il n'est pas parti parce qu'il avait peur vs Il n'est pas parti, | parce qu'il avait peur ; : háromnegyed négy felé « vers quatre heures moins le quart » (litt. « trois quarts quatre vers ») vs három, | negyed négy felé « vers trois heures, trois heures et quart » (litt. « trois, quart quatre vers »). Parfois, c'est la place de la pause qui remplit cette fonction. Par exemple, le segment [sø ki sav lœ(ː)ʁ sufləʁɔ̃] peut être divisé différemment en deux groupes, chacun étant marqué par l'accent unique du groupe et l'intonation de celui-ci, qui culmine sur la voyelle accentuée : Ceux qui savent | leur souffleront vs Ceux qui savent l'heure | souffleront. La pause est liée à d'autres traits prosodiques, à l'intonation notamment. En roumain, par exemple, devant la pause de la fin d'une phrase déclarative, l'intonation descend. À la fin d'une phrase interrogative totale (sans mot interrogatif), l'intonation monte, ainsi qu'à la fin d'une subordonnée antéposée à la principale. Le rythme Dans la parole, le rythme est donné par la répétition à des intervalles approximativement réguliers d'un facteur de proéminence d'une syllabe (accent, durée plus grande du noyau de syllabe, ton plus haut) après un certain nombre de syllabes atones, plus brèves, à ton moins haut, respectivement. Il est donc directement dépendant de ces facteurs. Il s'agit souvent d'une combinaison de ceux-ci. Le rythme est réalisé de façon consciente en poésie et parfois en prose littéraire. Dans la langue habituelle on réalise du rythme dans des expressions figées de façon semblable à celle de la poésie, associé à d'autres éléments de celle-ci, telle l'allitération, la rime ou l'assonance : sans feu ni lieu, à la queue leu leu. Dans la langue habituelle, le rythme est perceptible en fonction du poids des intervalles réguliers dans la langue donnée. En français, par exemple, les groupes de mots constituant des unités de sens et délimités par des pauses, appelés justement « groupes rythmiques », tendent à avoir un nombre ni trop petit ni trop grand de syllabes, entre trois et sept environ. Les groupes les plus fréquents sont de trois et de quatre syllabes, par conséquent les rythmes prédominants sont : le rythme ternaire : Vous avez | certainement | répondu | qu'il viendrait | en voiture et le rythme quaternaire : Vous avez dit | qu'il arriverait | mardi matin. À part celles-ci, il y a encore de nombreux autres combinaisons possibles de groupes de deux à quatre syllabes, par exemple : 4 | 2 | 4: Dépêchez-vous | d'écrire, | je vous attends ; 2 | 3 | 4: Il dit | qu'il viendra | un peu plus tard. Le tempo et le débit Le tempo (terme pris à la musique) ou la vitesse de la parole donne le débit (terme pris à la physique) de celle-ci, c'est-à-dire le nombre de segments de la chaîne parlée prononcés par unité de temps, par exemple le nombre de syllabes par minute ou le nombre de sons par seconde. Le tempo est très varié en fonction des individus (certains ont le spécifique inné de parler plus lentement que d'autres), de l'état psychique du moment (un locuteur furieux parle plus vite qu'un locuteur calme), mais aussi en fonction de la variété régionale de la langue (par exemple le français du Nord est plus rapide que celui du Midi), ainsi qu'en fonction de la langue. L'italien, par exemple, est parlé plus rapidement que le hongrois. On a remarqué aussi que le tempo d'une langue peut évoluer dans le temps. Des études sur le hongrois des années 1960 ont constaté un débit moyen de 11,35 sons par seconde, alors que dans les années 2000 il a atteint 15 à 16 sons par seconde. Le tempo dépend également de la situation de communication, qui inclut le statut des locuteurs les uns par rapport aux autres, le type de discours, etc. Par exemple, un adulte parle, normalement, plus lentement à un enfant qu'à un autre adulte, et le tempo d'une conversation quotidienne est plus alerte que celui d'un exposé. Il y a aussi des différences de tempo entre types de discours monologués : un sermon, par exemple, est beaucoup plus lent qu'une transmission sportive. Les variations du tempo dans un même discours ont également une fonction : ses ralentissements servent à communiquer des informations nouvelles pour les auditeurs, ses accélérations étant associées à celles qu'ils connaissent déjà. Le tempo est strictement lié aux pauses. Celles-ci sont d'autant plus longues et plus nombreuses que le tempo est plus lent. Notes et références Sources bibliographiques (BDL) (PDF à télécharger) Articles connexes Accent de hauteur Accent tonique Intonation prosodique Ton (linguistique) Tonème Phonétique suprasegmentale Pragmatique
{ "redpajama_set_name": "RedPajamaWikipedia" }
8,993
Blockchain: on the verge of revolutionising society Decentralised ledger The technology behind Bitcoin holds immense potential that we're just now beginning to fathom. Guillaume Meyer The system establishes a global data base of past transactions. Any attempt to tamper with information by deleting or modifying it is doomed to fail. The blockchain is considered incorruptible. To take over the network, an attacker would have to control more than 50% of its total computing power. Online payments diverted into the wrong pockets. Election results disputed by a political party. Idea ownership claimed by two authors. What do these systemic malfunctions have in common? They could all be prevented with a revolutionary technology called blockchain. Common mortals will probably have to wait at least a decade before they feel the full impact of blockchain technology, but it is no longer something out of science fiction. This very real technology forms the backbone of Bitcoin, the mysterious cryptocurrency released in2009. It's the mechanism that allows Bitcoin to exist without any central institution issuing bitcoins or clearing transactions. The network is self-administered based on a set of protocols. The "simple" definition is that a blockchain is a digital ledger in which all user transactions are noted in a time-stamped "chain" of "blocks" of information. At any time, the system can show who has what and prove it. Every computer logged on to the network stores a copy of the ledger. Some users voluntarily take part in the authentication process, called "mining". It's like a lottery: users compete to be the first to confirm transactions using their own hardware, which runs continuously. The winning "miner" is rewarded in bitcoins. Once a new block of transactions is validated by the network, it is added to the ledger in what is referred to as a chain, thus forming the blockchain. "The system establishes a global consensus about past transactions," says Maxime Augier, a PhD student in cryptography at the École Polytechnique Fédérale de Lausanne (EPFL). "It means that all participants agree on the ledger's content because they've created this consensus system in which any attempt to tamper with information by deleting or modifying it once it's recorded is doomed to fail." These virtual transactions are irreversible. There is no going back without network approval. Proven viability The blockchain is considered incorruptible. Any ill-intentioned individual acting alone is powerless. "To take over the network, an attacker would have to control more than 50%of its total computing power," Augier explains. "We hope that's a theoretical scenario, but we can't be sure. Should it happen, the individual would take every precaution to avoid being noticed." Not to mention the energy required to power the computers needed for the blockchain system to work. This limitation in no way detracts from the technology's advantages, says Augier. "No other mechanism as simple and popular as blockchain is currently available to achieve this global consensus. The system has proved its viability. And the principle of a ledger used exclusively to record data, and not modify it, could have useful applications outside of finance." That is precisely why areas as diverse as e-voting and intellectual property are taking a closer look at blockchain technology (see below). "We're on the brink of a revolution with blockchain," says Alexis Roussel, co-founder of Bity, a platform for buying and selling bitcoins. "The shock wave will be similar to that caused by the Internet. At first, it only interested scientists, but now we can't do without it." Banks: the cost advantage Finance was the first industry to explore blockchain technology for applications other than Bitcoin. "It's used to carry out faster, cheaper and more secure financial transactions," explains Andreas Lenzhofer, a partner at PwC Strategy&. "Customers don't notice any significant change. It's the infrastructure suppliers that are more directly impacted." Most payment systems are centralised. Transactions between financial services companies, for example, go through several intermediaries and are cleared by central banks. The firms then have to synchronise their internal ledgers. The whole process is time-consuming, increases risk and requires capital. In a decentralised system, transactions can be settled in a matter of minutes or seconds. This could save firms up to $20 billion a year by 2022, according to Santander bank. Large banks do not want to miss that opportunity. UBS, Goldman Sachs, JP Morgan and 22 other institutions have invested in R3 CEV, a start-up that is developing a standardised architecture for private ledgers. Nasdaq, the U.S. tech-sector stock exchange, has introduced a system built on blockchain technology to record trading in unlisted companies. The first transactions using the new platform were carried out at the end of 2015. Electronic voting: a matter of security Security remains the main hurdle to online voting. Decentralised ledgers are safe and cannot be tampered with. And those are key advantages when applying the technology to e-voting systems. "We definitely need to move in that direction," says Eric Dubuis, a professor at the University of Applied Sciences and Arts in Bern. "But the Bitcoin mechanism can't simply be replicated. We need to create a new system specifically for online voting." That would mean distributing the virtual "display table" of voting results on multiple computers that are connected but independent. All ledgers would have to be synchronised to eliminate any discrepancies in databases. "Given the amount of information, it wouldn't be efficient to synchronise the ledgers continuously, as is the case with Bitcoin," says Dubuis. "The databases should be harmonised at regular intervals." Who would take part in the confirmation process? "To achieve a decentralised voting ledger using blockchain, we need more oracles, i.e. referees who will use their reputation to confirm what's really happening in digital transactions," says Alexis Roussel, co-founder of Bity. "Once they're required all over the world, the trust mechanism will be so strong that governments will be able to give up certain powers to build the digital world." Intellectual property: a system of proof The artist who writes a piece of music and the scientist who produces a research paper share the same concern: protecting their baby against theft. Blockchain could do it for them effectively by providing proof that they are the authors of their work. "It will still be possible to copy it, but the file's first reference will forever be engraved in the marble of the blockchain,"says Alexis Roussel, cofounder of Bity. "If you produce something that you record in the decentralised database, and you're the first one to do it, you'll always be able to prove that you're the author." Where should you record your asset? The website alexandria.media, for example, is a self-archiving and distribution platform. Actively supported by the electro diva Imogen Heap, this platform offers payment systems (in bitcoins, of course) to remunerate authors. "When you archive a music fi le on the network," says Roussel, "you define how it will be shared – pay-per view, tip, etc. And if you share your music, you don't pay to listen to it. As with Bitcoin, participation in the network is rewarded." This initiative shows how blockchain technology can revolutionise digital rights. "The information required to certify and pay authors is provided in the public ledger. Automating the process will allow authors to circumvent the complex companies that manage rights." The mysterious Mr Nakamoto First released in 2009, Bitcoin still holds a few secrets. The creator of the cryptocurrency – and blockchain technology –who goes by the pseudonym Satoshi Nakamoto has never revealed his, her or their identity. Journalists have sworn they have found the inventor. In December 2015 Wired magazine believed it had its man, Craig Steven Wright, an unknown 44-year old Australian computer scientist with a collection of degrees. But other sources claim that it's a hoax. The mystery remains to be solved. Maxime Augier (EPFL), Alexis Roussel (Bity), Andreas Lenzhofer (PwC Strategy&.), Eric Dubuis (University of Bern), alexandria.media The perils of ranking Global lists are a key source of information for students choosing a university. But how relevant are they to the learning experience? No super-vegetables for Europe BY Lorène Mesot The gene-editing tool CRISPR could help farmers overcome the challenges of malnutrition. But European legislation has closed the door to that technology.
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
7,978
<?xml version="1.0" encoding="UTF-8"?> <!-- ~ Copyright 2016-present Open Networking Laboratory ~ ~ Licensed under the Apache License, Version 2.0 (the "License"); ~ you may not use this file except in compliance with the License. ~ You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> <artifactId>onos-dependencies</artifactId> <groupId>org.onosproject</groupId> <version>1.10.0-SNAPSHOT</version> <relativePath>../../lib/pom.xml</relativePath> </parent> <artifactId>onos-incubator-protobuf-dependencies</artifactId> <packaging>pom</packaging> <description>ProtoBuf dependecies</description> <url>http://onosproject.org</url> <properties> <protobuf.version>3.0.0</protobuf.version> </properties> <dependencies> <dependency> <groupId>com.google.protobuf</groupId> <artifactId>protobuf-java</artifactId> <version>${protobuf.version}</version> </dependency> </dependencies> <build> <extensions> <extension> <groupId>kr.motd.maven</groupId> <artifactId>os-maven-plugin</artifactId> <version>1.4.1.Final</version> </extension> </extensions> <pluginManagement> <plugins> <plugin> <groupId>org.apache.karaf.tooling</groupId> <artifactId>karaf-maven-plugin</artifactId> <version>3.0.8</version> <extensions>true</extensions> </plugin> <plugin> <groupId>org.xolstice.maven.plugins</groupId> <artifactId>protobuf-maven-plugin</artifactId> <version>0.5.0</version> <configuration> <protocArtifact>com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}</protocArtifact> </configuration> <executions> <execution> <goals> <goal>compile</goal> </goals> </execution> </executions> </plugin> <plugin> <groupId>org.apache.felix</groupId> <artifactId>maven-scr-plugin</artifactId> <executions> <execution> <id>generate-scr-srcdescriptor</id> <goals> <goal>scr</goal> </goals> </execution> </executions> <configuration> <!-- avoid searching into wrong source path --> <scanClasses>true</scanClasses> <supportedProjectTypes> <supportedProjectType>bundle</supportedProjectType> <supportedProjectType>war</supportedProjectType> </supportedProjectTypes> </configuration> </plugin> <plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>build-helper-maven-plugin</artifactId> <version>1.12</version> <executions> <execution> <id>add-source</id> <phase>generate-sources</phase> <goals> <goal>add-source</goal> </goals> <configuration> <sources> <source>${project.build.directory}/generated-sources/protobuf/java</source> </sources> </configuration> </execution> </executions> </plugin> </plugins> </pluginManagement> </build> </project>
{ "redpajama_set_name": "RedPajamaGithub" }
3,282
/** * Created by Andrey on 30.05.2017. * Storage class. */ package ru.apetrov.OrderBook.Storage;
{ "redpajama_set_name": "RedPajamaGithub" }
1,578
Overview In Depth Galleries Comets are cosmic snowballs of frozen gases, rock and dust that orbit the Sun. When frozen, they are the size of a small town. When a comet's orbit brings it close to the Sun, it heats up and spews dust and gases into a giant glowing head larger than most planets. The dust and gases form a tail that stretches away from the Sun for millions of miles. There are likely billions of comets orbiting our Sun in the Kuiper Belt and even more distant Oort Cloud. The current number of known comets is: Go farther. Explore Comets in Depth › Key Science Targets Kid-Friendly Comets Comets orbit the Sun just like planets and asteroids do, except a comet usually has a very elongated orbit. As the comet gets closer to the Sun, some of the ice starts to melt and boil off, along with particles of dust. These particles and gases make a cloud around the nucleus, called a coma. The coma is lit by the Sun. The sunlight also pushes this material into the beautiful brightly lit tail of the comet. Visit NASA Space Place for more kid-friendly facts. NASA Space Place: All About Comets › NASA's Planetary Photojournal: Asteroids & Comets National Space Science Data Center: Comets To celebrate the 10th anniversary of the spacecraft's launch, the mission team has gathered their top 10 images and graphics based on WISE and NEOWISE data. Celebrating 10 Years of the WISE Spacecraft Using data from NASA's TESS, astronomers have captured an image of an explosive emission of dust, ice and gases from comet 46P/Wirtanen in late 2018. NASA's Exoplanet-Hunting Mission Catches a Natural Comet Outburst in Unprecedented Detail NASA's Hubble Space Telescope has given astronomers their best look yet at an interstellar visitor — comet 2I/Borisov. Hubble Observes 1st Confirmed Interstellar Comet Comet C/2019 Q4 (Borisov) has excited the astronomical community because it appears to have originated from outside the solar system. Newly Discovered Comet Is Likely Interstellar Visitor Twenty-five years ago, humanity first witnessed a collision between a comet and a planet. How Historic Jupiter Comet Impact Led to Planetary Defense After nearly 16 years of exploring the cosmos in infrared light, NASA's Spitzer Space Telescope will be switched off permanently on Jan. 30, 2020. How NASA's Spitzer Has Stayed Alive for So Long The mystery of why Earth has so much water, allowing our "blue marble" to support an astounding array of life, is clearer with new research into comets. Comet Provides New Clues to Origins of Earth's Oceans Five years of data have significantly advanced scientists' knowledge of asteroids and comets in our solar system, as well as the stars and galaxies beyond. NEOWISE Celebrates Five Years of Asteroid Data ESA's Rosetta mission revealed geological stress sculpted comet 67P/Churyumov–Gerasimenko over billions of years. Rosetta's Comet Sculpted by Stress As the brilliant comet 46P/Wirtanen streaked across the sky, NASA telescopes caught it on camera from multiple angles. NASA Telescopes Take a Close Look at the Brightest Comet of 2018 On Sunday, Dec. 16, the comet known as 46P/Wirtanen will make one of the 10 closest comet flybys of Earth in 70 years, and you may even be able to see it without a telescope. See a Passing Comet This Sunday On New Year's Day 2019, NASA's New Horizons will fly by a distant Kuiper Belt Object—and open a new chapter in how we define our place in the cosmos. To Ultima and Beyond: Our Solar System's Small Worlds December brings the Geminids, a visible comet, and a fond farewell. What's Up - December 2018 Here's a lineup of the various kinds of small bodies that orbit the Sun and sometimes impact planets and each other. The Usual Suspects: a Rogues Gallery of Asteroids, Comets and Other Witnesses to History Small worlds witnessed dramatic changes in our solar system that occurred long before humans. Here's how we're studying them. Cosmic Detective Work: Why We Care About Space Rocks November brings planets, an asteroid, a comet and the Leonids meteor shower. What's Up - November 2018 A mission of firsts makes history at a comet despite setbacks. How We Saved the Deep Space 1 Spacecraft The path through the solar system is a rocky road. Asteroids, comets, Kuiper Belt Objects—all kinds of small bodies of rock, metal and ice are in constant motion as they orbit the Sun. But what's the difference between them? Why do these miniature worlds fascinate space explorers so much? 10 Things: What's That Space Rock? To most of us, dust is an annoyance. But these tiny particles that float about and settle on surfaces play an important role across the solar system. 10 Things: Dust in the Wind (on Mars and Well Beyond) Five things we know and five things we don't know about the first confirmed interstellar object to pass through our solar system. 10 Things: Mysterious 'Oumuamua 'Oumuamua, the first known interstellar object to travel through our solar system, got an unexpected boost in speed and shift in trajectory last year, a new study shows. Our Solar System's First Known Interstellar Object Gets Unexpected Speed Boost Asteroid 2018 LA entered Earth's atmosphere at about 9:44 a.m. PDT (12:44 p.m. EDT, 16:44 UTC), and later disintegrated in the upper atmosphere near Botswana, Africa. Tiny Asteroid Discovered Disintegrates Hours Later Comets that take more than 200 years to make one revolution around the Sun are notoriously difficult to study. NASA's WISE spacecraft, scanning the entire sky at infrared wavelengths, has delivered new insights about these distant wanderers. Large, Distant Comets More Common Than Previously Thought NASA has selected two finalist concepts for a robotic mission planned to launch in the mid-2020s: a comet sample return mission and a drone-like rotorcraft that would explore potential landing sites on Saturn's largest moon, Titan. NASA Invests in Concept Development for Missions to Comet, Saturn Moon Titan
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
6,118
namespace CornerFlag.Data { using System.Data.Entity; using System.Data.Entity.Infrastructure; using CornerFlag.Data.Models; using CornerFlag.Data.Models.Entities; using CornerFlag.Data.Models.People; public interface ICornerFlagDbContext { IDbSet<ApplicationUser> Users { get; set; } IDbSet<Country> Countries { get; } IDbSet<Stadium> Stadiums { get; } IDbSet<Match> Games { get; } IDbSet<Player> Players { get; } IDbSet<Team> Teams { get; } IDbSet<Club> Clubs { get; } IDbSet<Competition> Competitions { get; } IDbSet<Round> Rounds { get; } IDbSet<Season> Seasons { get; } DbContext DbContext { get; } int SaveChanges(); void Dispose(); DbEntityEntry<TEntity> Entry<TEntity>(TEntity entity) where TEntity : class; IDbSet<T> Set<T>() where T : class; } }
{ "redpajama_set_name": "RedPajamaGithub" }
7,733
Q: Dynamic stored Procedure pulling data from 3 different table i have three different sql query, and i want to convert it in single stored Procedure. based on passing parameter we can get output from either 1st, 2nd or 3rd. 1st query: SELECT NAME, ADDRESS, CITY FROM LOCATION WHERE REGION <> 3 2nd query: SELECT NAME, ADDRESS, CITY FROM LOCATION WHERE REGION = 3 3rd query: SELECT NAME, OFFICE_ADDRESS, RES_ADDRESS,PROPERTY_TAX FROM COUNTRY WHERE REGION = 'ZUR' May anyone please help me how to acheive this. Thanks a ton A: To the first two selects it makes sense to make a IF statement. But not to the last because the number of fields is different. Even if most of the columns on LOCATION and COUNTRY have the same meaning that can add problems in maintenance and add complexity to your code. Also using more than a table for same purpose is a symptom of bad design. IF (@FLAG = 1) BEGIN SELECT NAME, ADDRESS, CITY FROM LOCATION WHERE REGION != 3 END ELSE BEGIN SELECT NAME, ADDRESS, CITY FROM LOCATION WHERE REGION = 3 END Create another method/SP to fetch the data on COUNTRY Note: for the last MS SQL Server versions IF is the recomended way for improve performance. Also using CASE statements can mean it need to eval the REGION column for each row in the table. Note: Learn about parameter sniffing to avoid it. Conditionals messing with SP behaviour are a good way to get parameter sniffing problems.
{ "redpajama_set_name": "RedPajamaStackExchange" }
1,026
Q: in Redux RTK getting " non-serializable value was detected in an action" for a Date? With Redux RTK I'm getting this error "non-serializable value was detected in an action", however I just have Date's here, and they are serializable right? So wondering why this is? A: Redux does not serialize objects for you You are currently putting the class instance of the date so it's not serializable (because it's the class instance) however you can serialize a Date in Javascript Are dates serializable? You can call new Date().toISOString() Which will produce a string in the store
{ "redpajama_set_name": "RedPajamaStackExchange" }
2,487
Q: I have to create a 2 dimensional list for a board game with n rows and p columns in a rectangular shape without numpy and GUI I succeeded in creating the empty board. But the problem is that I want to display it in a rectangular shape without the use of numpy or any other libraries. I tried the 'join.str' but i cannot manipulate the list afterwards. Is there a way to do it. Here is the code n = 3#number of rows p = 2#number of columns board = [] def newBoard(n, board, p): board = [[0] * p for i in range(n)] print(board) newBoard(n, board, p) The output of the code is displayed in this form. [[0,0],[0,0],[0,0]] The display that i want to obtain should be in this form. [0,0] [0,0] [0,0] A: Iterate over the rows and print them individually: >>> board = [[0, 0], [0, 0], [0, 0]] >>> for r in board: ... print(r) ... [0, 0] [0, 0] [0, 0] Or you could convert each row to a string, and then join with new-line (\n) characters: >>> print('\n'.join(str(r) for r in board)) [0, 0] [0, 0] [0, 0] Or if you wanted it took look more like that of numpy arrays: >>> print('['+'\n '.join(str(r) for r in board)+']') [[0, 0] [0, 0] [0, 0]] A: This is one way: n = 3 #number of rows p = 2 #number of columns def newBoard(n, board, p): board = [[0] * p for i in range(n)] for i in range(len(board)): print(board[i]) newBoard(n, board, p) # [0, 0] # [0, 0] # [0, 0]
{ "redpajama_set_name": "RedPajamaStackExchange" }
9,517
"use strict"; quail.strings.skipContent = [/(jump|skip) (.*) (content|main|post)/i];
{ "redpajama_set_name": "RedPajamaGithub" }
7,322
Home › Christmas in Pennsylvania, 50th Anniversary Edition Christmas in Pennsylvania, 50th Anniversary Edition By Alfred L. Shoemaker and Don Yoder - Afterword by Don Yoder The return of a bestselling classic with new material. Full-color vintage images for the first time. A new selection of recipes from Pennsylvania's Christmas past. Alfred L. Shoemaker and Don Yoder were founders of the Pennsylvania Folklife Society and both served as editor of its serial, Pennsylvania Folklife. The late Shoemaker wrote several ground-breaking books on the Pennsylvania Dutch. Don Yoder was cofounder of the Pennsylvania Folklife Society, longtime editor of its journal Pennsylvania Folklife, and Professor of Folklife Studies at the University of Pennsylvania from 1956 to 1996. He currently resides in Devon, Pennsylvania.
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
1,623
En variabel rente er en rente, der bevæger sig op og ned med den generelle markedsrentes udvikling. Den fastsættes for ind- og udlån typisk enten hver 3., 6. eller 12. måned eller op til hvert 10. år. Lån med variabel rente kaldes også for et tilpasningslån, fordi renten tilpasses løbende. Lån med variabel rente Fordele Som regel vil man have en lavere rente på lånet end ved fast forrentede lån, og derfor bliver ydelsen lavere. Når renten falder, falder ydelsen automatisk svarende hertil. Den lavere rente betyder, at man i starten afdrager relativt mere på lånet i forhold til et tilsvarende lån med samme løbetid og med en (højere) fast rente. Risici Renten på et lån med variabel rente tilpasses løbende, og der er ikke nogen øvre eller nedre grænse. Det betyder at man er helt afhængig af markedsrenten, og det kan der være usikkerhedselementer i. Hvis man ønsker at minimere sin risiko for stigende renter, findes der forskellige produkter med renteloft, så renten aldrig kan blive højere end det loft, der er aftalt med banken eller realkreditinstituttet. Variationer Man kan vælge lån med delvis rentetilpasning, således at kun dele af lånet tilpasses ved hver rentetilpasning. Eksempel: Udgangspunktet er et lån med en rente før rentetilpasning på 4%, og markedsrenten er 5% på tilpasningstidspunktet. På et tilpasningslån med fuld rentetilpasning hvert år (et såkaldt F1-lån), bliver lånets rente i det kommende år 5%. På et tilpasningslån med 30% delvis rentetilpasning hvert år (et såkaldt P30-lån), er lånets rente i det kommende år 4,3% (0,30*5%+0,70*4%). Se også Renteswap Renteformer
{ "redpajama_set_name": "RedPajamaWikipedia" }
4,119
\section{Introduction} The BL Lac object H\,2356-309 (z=0.165) is a High-frequency-peaked BL Lac (HBL), also called High-Synchrotron-Peaked blazar (HSP, \citep{2lac}). It is usually bright in the X-ray band, and during BeppoSAX observations in 1998 it presented a synchrotron peak in the spectral energy distribution (SED) above few keV, which is the defining characteristic of the so-called ``extreme BL Lacs" \citep{extreme}. At VHE, H\,2356-309 was detected for the first time in 2004 by HESS \citep{nature,hess2356}. The VHE spectrum was found to be significantly harder than expected from a source at that redshift, considering the softening effects of $\gamma$-$\gamma$ absorption on the diffuse extragalactic background light (EBL), in the detected energy range. Together with other even harder sources, this fact lead to the discovery of a low intensity of the EBL in the component produced by the direct starlight \citep{nature}. Since 2004, H\,2356-309 has been monitored by HESS for several years, reaching a total detection of $\sim$13 $\sigma$ in the timespan 2004-2007, with an average flux at the level of $\sim$1.6\% of the Crab flux (above 240 GeV). In this epoch, three simultaneous multi-wavelength campaigns were performed, in 2004 with RXTE and 2005 with XMM-Newton \citep{hess2356,icrc08}, which allowed the characterization of its SED. The results of these campaigns, the VHE monitoring and a synchrotron self-Compton (SSC) modeling of the data were published by the HESS Collaboration in Abramowski et al. 2010 \citep{2356mwl}. In that paper, however, while all the data analysis is correct, the HESS Collaboration has apparently misinterpreted the VHE data, providing a wrong assessment of the intrinsic VHE properties of the source. It is also not consistent with previous publications on the same results. This contribution presents the arguments against the interpretation in \citep{2356mwl} and tries to provide a more accurate description of the actual gamma-ray properties (from the GeV to the TeV band) of this BL Lac object. \section{Wrong assessment of the intrinsic VHE properties by HESS} Fig. \ref{f1} shows the overall SED of H\,2356-309 during the HESS multiwavelength campaigns in 2004 and 2005 (from \citep{2356mwl}). The two lines shows the single-zone SSC modeling proposed by the HESS Collaboration, with the dashed lines representing the source intrinsic emission (i.e. before absorption effects on the EBL calculated according to \cite{franceschini}). This SSC model is claimed by HESS to represent well the gamma-ray SED properties, with a Compton peak around 50-100 GeV and a steep intrinsic VHE spectrum: the local slope of the intrinsic SSC model (dashed line) in the detected VHE range is $\Gamma_{int}\approx2.65-2.7$. However, Fig \ref{f2} shows this not to be the case. Accounting for EBL absorption --either by correcting the observed data points, as done here, or by fitting an absorbed model-- the intrinsic spectrum is in fact much harder. Fig. \ref{f2} presents the overall 2004-2006 average HESS data from \citep{2356mwl}, corrected for EBL absorption with the same EBL calculation \citep{franceschini}. Using a power-law model, the intrinsic photon index of the absorption-corrected data is $\Gamma_{int}=1.97\pm0.16$ (statistical error only), or $\Gamma_{int}=1.91\pm0.18$ excluding the last point (which is more like an upper limit, see \citep{2356mwl}). The discrepancy with the slope of the SSC model is large and highly significant: the $\Delta\chi^2$ needed to recover an index of 2.65 is 15, corresponding to a probability of $\sim$1E-4 (for 1 parameter). The HESS SSC model, therefore, is excluded by the data at $\sim$99.99\% confidence level. Note that the 2004-2006 overall spectrum is mostly dominated --90\% of the excess signal-- by the 2004-2005 data set. Moreover, the spectrum in 2006 seems to have been steeper than in 2004-2005 (see Table 4 in \cite{2356mwl}), thus the 2004-2005 spectrum should actually be slightly harder than the shown average, increasing the discrepancy. The importance of this discrepancy is not merely quantitative, but qualitatively: it changes dramatically the luminosity and the location of the gamma-ray peak. The latter is in fact at energies one-to-two orders of magnitude higher than claimed by HESS, and at lower apparent fluxes (see Fig. \ref{f3}). This changes completely the physical parameters and the character of the SED. It cannot therefore be accepted the explanation in the HESS paper about the ``slight difference between model and data" as due to ``the inclusion of all the multiwavelength data in the curved SSC fits", because therein lies the core of the problem: precisely because constrained by the observed shape of the synchrotron emission, a one-zone SSC model (with these parameters) is NOT able to reproduce the intrinsically hard HESS data. \begin{figure} \includegraphics[angle=-90,width=85mm]{hesspapersed.eps} \caption{The SED of H\,2356-309 in different epochs, from the HESS paper (Fig. 8 in \citep{2356mwl}). The XMM observations in 2005 are plotted in color (blue for June 13, red for June 15), while the RXTE 2004 data are plotted as black triangles. In the VHE band, the observed (i.e. EBL-absorbed) HESS data are plotted, as black triangles and red dots for the years 2004 and 2005, respectively. The curves are the HESS modeling of the 2005 SEDs with a single-zone SSC model, as described in \citep{2356mwl}, with (solid lines) and without (dashed lines) the EBL effects included \citep{franceschini}. The SSC model for the 2004 data (RXTE and HESS) is identical to the fit to the June 15, 2005 data (red curve). } \label{f1} \end{figure} \begin{figure} \includegraphics[width=85mm]{costam2356_f2.eps} \caption{Zoom of the SED in the VHE band: the average 2004-2006 HESS data are plotted, this time corrected for absorption with the EBL \citep{franceschini}. The dashed line correspond to the SSC model by HESS (the red dashed curve in Fig. \ref{f1}). The discrepancy is clearly visible: the model photon index is around 2.65-2.7 in the HESS energy band, while the data photon index is $\Gamma=1.97\pm0.16$ (statistical error only, shown by the grey butterfly). The model slope is excluded by the data at 99.99\% confidence level. } \label{f2} \end{figure} \subsection{Further arguments} The VHE data shown in Fig. \ref{f1} are not the overall average, but correspond to the two years 2004 and 2005 considered separately. It could be argued that the HESS SSC modeling aims to reproduce these two single epochs, and that the lower statistics of each single dataset makes the model viable, despite being excluded by the sum. However, this is not a valid argument: since the model gives identical slopes/characteristics in the two years, if it represented well the spectrum in both epochs it should fit well also the sum of the two, by definition. The situation is similar to having a constant source, properties-wise: the accumulation of statistics is revealing if the model is compatible or not with the data, and in this case it is not. If this SSC modeling reproduced the actual source spectrum in just one of the two epochs, the spectrum in the other epoch should be much harder than the average, to compensate, and this should show in the data (as well as in the models, i.e. the SSC models for 2004 and 2005 should be very different from each other, to be consistent with the average). Again this seems not the case: the measured slopes of the 2004 and 2005 data sets are identical within the errors ($\Gamma_{\rm VHE}=2.97\pm0.19$ vs $2.99\pm0.39$ respectively, see Table 4 in \cite{2356mwl}). In other words, there is no way that the HESS modeling can represent well the HESS data. Simply stated, the proposed SSC model does not correspond to the SED of H\,2356-309, and thus the physical analysis based on its parameters should be discarded. \section{The problem of plotting VHE data} There is a general issue in the interpretation of the VHE data. When presenting the SED of TeV blazars, it is becoming customary to plot only the observed (i.e. not EBL-corrected) data points and to absorb the possible model/curve, plotting at most the curves before EBL absorption. {\it This is often misleading and a dangerous habit, and should be avoided.} Even if in principle it is the most proper method for a statistical analysis of the data and to derive fitting parameters, for plotting purposes it does not accurately display the source SED features. It misleads the viewers on the true properties of the object by mixing them with those of intervening systems. It also creates confusion when trying to find an adequate set of parameters with an emission model, because all curves seem to fit well the SED data when spectra are very steep. The alternative approach is to correct the data points for the EBL (e.g. with the optical depth calculated at the average photon energy in the bin, which provides the same fit parameters of absorbing the model; see e.g. \citep{nature}), and plotting the EBL de-absorbed data in the SED. Both methods, if done correctly, should and do give the same results. This second approach, however, provides visually the most accurate representation of the true properties of the source (which is the focus of a SED study), without confusion with those of intervening systems. Furthermore, this method achieves consistency with the conventions used in the other energy bands on the SED: in the Optical-UV bands, data are usually plotted after correction for at least Galactic extinction. In the X-ray band, data/slopes are always shown with correction for Galactic or line-of-sight intervening column densities. The reason is precisely to focus on the source physics, if that is the goal of the study. Otherwise any extragalactic object would always appear with fake peaks in the SED typically at ~1-2 keV (or more, depending on the column density) and in the red-infrared band. In conclusion, EBL-absorption should be treated and accounted for in the SED as any other absorption effect from intervening systems in the electromagnetic spectrum. \begin{figure} \includegraphics[width=85mm]{costam2356_f3.eps} \caption{The updated (correct) SED of H\,2356-309. Black markers represent historical data (including 1998 BeppoSAX and 2004 RXTE data). The XMM-Newton 2005 data (blue and red circles), re-analyzed with SAS v11 using the same procedures described in \citep{2356mwl}, are shown as in Fig. \ref{f1}. In gamma-rays, the HESS data (blue squares) are shown as in Fig. \ref{f2}. The LAT data (magenta point and upper limits) are from the 1-FGL catalog \citep{1fgl}. Data obtained using the web tools of the ASI Science Data Center.} \label{f3} \end{figure} \section{\label{sed}The correct SED of H\,2356-309} The real intrinsic properties of H\,2356-309, as given by the HESS data, are shown in Fig. \ref{f3}. These are: \begin{enumerate} \item a flat VHE spectrum of photon index $\Gamma_{int}\approx$1.9-2 over the whole HESS range; \item a Compton peak either around $\sim$600 GeV (as determined by a log-parabolic fit of the HESS data), or $\gtrsim$1 TeV (assuming the single power-law model). The statistics of the data do not allow yet to distinguish between the two cases (the curvature parameter can be zero within the errors). \end{enumerate} Quite interestingly, the extrapolation of the HESS 2004-2006 average spectrum to the Fermi-LAT band corresponds almost "spot on" to the fluxes in the 1FGL catalog (2008-2009, TS=50, detection mainly in the 1-3 GeV bin; \citep{1fgl}). A single power-law model over 3 decades in energy provides surprisingly a good fit ($\chi^2_r\sim0.3$) with $\Gamma_{int}=1.94\pm0.03$. The 2FGL values, instead, show a reduced overall flux (by roughly 2x), but retaining very similar spectral properties ($\Gamma=1.89\pm0.17$, \citep{2fgl}). With the big caveats of the non-simultaneity of the gamma-ray data and of the different integration times of the Optical-to-X-ray data vs the gamma-ray data, such a Gev-TeV gamma-ray spectrum is not easy to model within one-zone SSC scenarios, given the very different synchrotron spectra in the optical to X-ray bands, which trace directly the shape of the electron distribution. Regardless, the average SED seems synchrotron-dominated in luminosity, and thus the electron cooling as well. \begin{figure} \includegraphics[width=80mm]{1553rome.eps} \caption{Typical SED of a Fermi-bright ``100-GeV peaked" HBL: 1ES\,1553+113. They are characterized by soft intrinsic VHE spectra locating the gamma-ray peak close to 100 GeV (like PKS 2005-489 or PKS 2155-304). The redshift is constrained by HST/COS data \citep{danforth}. Fermi-LAT spectrum from \citep{lat1553}. Other data from \citep{reimer1553}. VHE data corrected for EBL according to \citep{franceschini}. } \label{f4} \end{figure} \begin{figure} \includegraphics[width=80mm]{1101rome_w3.eps} \caption{Typical SED of a Fermi-faint TeV-peaked HBL: 1ES\,1101-232. This was the first HBL object discovered with a gamma-ray peak above few TeV (i.e. with a hard VHE spectrum irrespective of the EBL model used; \citep{nature}). Fermi-LAT data from the 1-FGL catalog \citep{1fgl}. The LAT point corresponds to a flux estimate for the $\sim4$ $\sigma$ signal in the 1-FGL catalog. Other multiwavelength data/campaigns from \citep{reimer1553} and refs therein. VHE observed data (red) corrected for absorption (blue) according to \citep{franceschini}.} \label{f5} \end{figure} \section{Comparison with other HBL} HBL objects are typically characterized by hard ($\Gamma<2$) Fermi-LAT spectra \citep{2lac} but show a wide range of VHE slopes, depending on the location of the gamma-ray SED peak. To this respect, H\,2356-309 seems intermediate between two types of HBL objects: those with the peak around $\sim100$ GeV, i.e. inbetween the Fermi-LAT and VHE bands (see for example Fig. \ref{f4}), and those with a gamma-ray peak above 1 TeV, or ``TeV-peaked" HBL (Fig. \ref{f5}). The formers are characterized by soft intrinsic VHE spectra, and are more easily detected in Fermi because the peak of the SED is close to the Fermi-LAT band. The TeV-peaked HBL, instead, are characterized by hard intrinsic VHE spectra ($\Gamma_{\rm VHE}<2$), irrespective of the EBL model, locating the peak all the way beyond the detected VHE range. They remain often very weak or undetected in Fermi, because the LAT band falls now deep into the valley between the sycnhrotron and gamma-ray humps. These objects however, despite being rarer among HBLs, are at present the most challenging AGN for SSC modeling and to gain new insigths on particle acceleration and emission mechanisms. (e.g. \citep{katar,lefa,tav0229,olga}). \bigskip \noindent {\bf Disclaimer:} {\small As co-author of the HESS paper on H\,2356-309, these findings were already provided to the HESS Collaboration, in the early stage and also after publication, to no positive effect. The co-authorship, therefore, is to be intended as data-related only, as proposer and PI of the multi-wavelength campaigns and for providing the XMM-Newton data analysis. It does not extend to the interpretation, which was finalized after I left the collaboration.} \bigskip
{ "redpajama_set_name": "RedPajamaArXiv" }
5,552
\section{Introduction} \let\thefootnote\relax\footnotetext{Funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) – SFB 1283/2 2021 – 317210226} Let $M$ be a complete connected non-compact Riemannian manifold and $p_{t}(x,y) $ be the \emph{heat kernel} on $M$, that is, the minimal positive fundamental solution of the heat equation $\partial _{t}u=\Delta u$, where $ \Delta $ is the Laplace-Beltrami operator on $M$. In this paper, we investigate the long time behaviour of $p_{t}(x, x)$ for $t\to +\infty$, $x\in M$. Especially, we are interested in lower bounds for large enough $t$ of the form \begin{equation}\label{polydecayinintr}p_{t}(x, x)\geq c_{x}t^{-\alpha},\end{equation} where $\alpha$ and $c_{x}$ are positive reals and $c_{x}$ may depend on $x$. Let $V(x, r)=\mu(B(x, r))$ be the volume function of $M$ where $B(x, r)$ denotes the geodesic balls in $M$ and $\mu$ the Riemannian measure on $M$. It was proved by A. Grigor'yan and T. Coulhon in \cite{Coulhon1997}, that if for some $x_{0}\in M$ and all large enough $r$, \begin{equation}\label{polynomialVGinintro}V(x_{0}, r)\leq Cr^{N}\end{equation} where $C$ and $N$ are positive constants, then \begin{equation}\label{heatkerneldecayloginintro}p_{t}(x, x)\geq \frac{c_{x}}{(t\log t)^{N/2}},\end{equation} which obviously implies (\ref{polydecayinintr}). It is rather surprising that such a weak hypothesis as $(\ref{polynomialVGinintro})$ implies a pointwise lower bound (\ref{heatkerneldecayloginintro}) of the heat kernel. In this paper we obtain heat kernel bounds assuming even weaker hypotheses about $M$. We say that an open connected proper subset $\Omega$ of $M$ is an \emph{end} of $M$ if $\partial\Omega$ is compact but $\overline{\Omega}$ is non-compact (see also Section \ref{On-diagonal heat kernel lower bounds}). One of our aims here is to obtain lower bounds for the heat kernel assuming only hypotheses about the intrinsic geometry of $\Omega$, although a priori it was not obvious at all that such results can exist. One of motivations was the following question asked by A. Boulanger in \cite{Boulanger2020CountingPO} (although for a more restricted class of manifolds). Considering the volume function in $\Omega$ given by $$V_{\Omega}(x, r)=\mu(B(x, r)\cap\Omega),$$ Boulanger asked if the heat kernel satisfies (\ref{polydecayinintr}) provided it is known that \begin{equation}\label{polynomialVOmGinintro}V_{\Omega}(x_{0}, r)\leq Cr^{N},\end{equation} for some $x_{0}\in \Omega $ and all $r$ large enough. A first partial answer to this question was given by A. Grigor'yan, who showed in \cite{Grigoryan2021}, that if (\ref{polynomialVOmGinintro}) holds and $\overline{\Omega}$, considered as a manifold with boundary, is \textit{non-parabolic}, (and hence, $N>2$ in (\ref{polynomialVOmGinintro})) then (\ref{heatkerneldecayloginintro}) is satisfied. More precisely, denoting by $p_{t}^{\Omega}(x, y)$ the heat kernel in $\Omega$ with the Dirichlet boundary condition on $\partial \Omega$, it was proved in \cite{Grigoryan2021} that, for all $x\in \Omega$ and large enough $t$, \begin{equation}\label{dirichlethatkernelinintro}p_{t}^{\Omega}(x, x)\geq \frac{c_{x}}{(t\log t)^{N/2}},\end{equation} which implies (\ref{heatkerneldecayloginintro}) by the comparison principle. From a probabilistic point of view, the estimate (\ref{dirichlethatkernelinintro}) for non-parabolic $\overline{\Omega}$ is very natural if one compares it with (\ref{heatkerneldecayloginintro}), since the non-parabolicity of $\overline{\Omega}$ implies that the probability that Brownian motion started in $\Omega$ never hits the boundary $\partial \Omega$ is positive (see [\cite{Grigorextquotesingleyan1999}, Corollary 4.6]). Hence, one expects that the heat kernel in $\overline{\Omega}$ and the heat kernel in $\Omega$ with Dirichlet boundary condition are comparable. The main direction of research in this paper is the validity of the estimate (\ref{polydecayinintr}) in the case when $\overline{\Omega}$ is parabolic and the volume function of $\Omega $ satisfies (\ref{polynomialVOmGinintro}). We prove (\ref{polydecayinintr}) for a certain class of manifolds $M$ when $\overline{\Omega}$ is parabolic as well as construct a class of manifolds $M$ with parabolic ends where (\ref{polydecayinintr}) does not hold. In Section \ref{On-diagonal heat kernel lower bounds} we are concerned with positive results. One of our main results - \textbf{Theorem \ref{thmlocharsph}}, ensures the estimate (\ref{polydecayinintr}) when $\overline{\Omega}$ is a \emph{locally Harnack} manifold (see Subsection \ref{positiveresultslower} for the definition). In order to handle difficulties that come from the parabolicity of the end, we use the method of $h$-\emph{transform} (see Subsection \ref{subsechtrans}). For that we construct a positive harmonic function $h$ in $\Omega$ and define a new measure $\widetilde{\mu}$ by $d\widetilde{\mu}=h^{2}d\mu$. Thus, we obtain a \emph{weighted manifold} $\left(\overline{\Omega},\widetilde{\mu}\right)$. We prove that this manifold is non-parabolic, satisfies the polynomial volume growth and, hence, the heat kernel $\widetilde{p}_{t}^{\Omega}$ of $\left(\Omega,\widetilde{\mu}\right)$ satisfies the lower bound (\ref{dirichlethatkernelinintro}). Then a similar lower bound for $p_{t}^{\Omega}$ and, hence, for $p_{t}$, follows from the identity $$p_{t}^{\Omega}(x, x)=h^{2}(x)\widetilde{p}^{\Omega}_{t}(x, x)$$ (see Lemma \ref{relheattildeohne}). Note that the techniques of $h$-transform for obtaining heat kernel bounds was used in \cite{grigor2009heat} and \cite{Grigorextquotesingleyan2001} although in different settings. In Section \ref{On-diagonal heat kernel upper bounds} we construct examples of manifolds $M$ having a parabolic end $\Omega$ with finite volume (in particular, satisfying (\ref{polynomialVOmGinintro})) but such that the heat kernel $p_{t}\left( x,x\right)$ decays \emph{superpolynomially} as $t\rightarrow \infty.$ In fact, the end $\Omega$ is constructed as a \textit{model manifold} (see Section \ref{secisobdry} for the definition of this term) that topologically coincides with $\left( 0,+\infty \right) \times \mathbb{S}^{n-1}$, $n\geq 2$, while the Riemannian metric on $\Omega$ is given by \begin{equation}\label{metricinintrowarp}ds^{2}=dr^{2}+\psi^{2}(r)d\theta^{2},\end{equation} where $d\theta ^{2}$ is a standard Riemannian metric on $\mathbb{S}^{n-1}$ and \begin{equation}\label{defvonpisinintro}\psi(r)=e^{-\frac{1}{n-1}r^{\alpha}},\end{equation} with $0<\alpha \leq 1.$ Our second main result -\textbf{Theorem \ref{heatkernelforsmallendviah}}, says that for a certain manifold $M$ with this end $\Omega $ the following heat kernel estimate holds: \begin{equation}\label{introheatkernelupper}p_{t}(x, x)\leq C_{x}\exp\left(-Ct^{\frac{\alpha}{2-\alpha}}\right),\end{equation} for all $x\in M$ and large enough $t$. The estimate $(\ref{introheatkernelupper})$ follows from \textbf{Theorem \ref{heatonsumweightfaber}} where we obtain the upper bound of the heat kernel $\widetilde{p}_{t}$ of a weighted manifold $\left( M,\widetilde{\mu }\right) $ after an appropriate $h$-transform. In this theorem we prove that \begin{equation}\label{weightedheatupperinintro}\widetilde{p}_{t}(x, x)\leq C\exp\left(-C_{1}t^{\frac{\alpha}{2-\alpha}}\right).\end{equation} In fact, this decay is sharp, meaning that we have a matching lower bound $$\sup_{x\in M}\widetilde{p}_{t}(x, x)\geq c\exp\left(-C_{2}t^{\frac{\alpha}{2-\alpha}}\right)$$ (see the remark after Theorem \ref{heatonsumweightfaber}). The key ingredient in the proof of \textbf{Theorem \ref{heatonsumweightfaber}} is obtaining a \textit{lower isoperimetric} function $J$ on $(\overline{\Omega}, \widetilde{\mu})$, which yields then the heat kernel upper bound (\ref{weightedheatupperinintro}) by a well-known technique (see [\cite{Grigoryan1999}, Proposition 7.1] and Proposition \ref{thmfaberheatupper}). We say that a function $J$ on $[0, +\infty)$ is a lower isoperimetric function for $(\overline{\Omega}, \widetilde{\mu})$ if, for any precompact open set $U\subset \overline{\Omega}$ with smooth boundary, \begin{equation}\label{isoperimetricdefJintro}\widetilde{\mu}^{+}(U)\geq J(\widetilde{\mu}(U)),\end{equation} where $\widetilde{\mu}^{+}$ denotes the perimeter with respect to the measure $\widetilde{\mu}$ (see Section \ref{Isoperimetric inequalities for warped products} for more details). In Section \ref{Isoperimetric inequalities for warped products} we present a technique for obtaining isoperimetric inequalities on \emph{warped products} of weighted manifolds. The isoperimetric inequality on \emph{Riemannian products} was proved in \cite{grigor1985isoperimetric}. We develop further the method of \cite{grigor1985isoperimetric} to deal with warped products, in particular, with the metric (\ref{metricinintrowarp}). The main result here is stated in \textbf{Theorem \ref{thm1iso}}. Given two weighted manifolds $(M_{1}, \mu_{1})$ and $(M_{2}, \mu_{2})$ consider the weighted manifold $(M, \mu)$ such that $M=M_{1}\times M_{2}$ as topological spaces, the Riemannian metric $ds^{2}$ on $M$ is defined by $$ds^{2}=dx^{2}+\psi^{2}(x)dy^{2},$$ with $\psi$ being a smooth positive function on $M_{1}$ and $dx^{2}$ and $dy^{2}$ denoting the Riemannian metrics on $M_{1}$ and $M_{2}$, respectively and measure $\mu$ on $M$ is defined by $\mu=\mu_{1}\times \mu_{2}$. Assume that the function $\psi$ is bounded and $(M_{1}, \mu_{1})$ and $(M_{2}, \mu_{2})$ admit continuous lower isoperimetric functions $J_{1}$ and $J_{2}$, respectively. Then we prove in \textbf{Theorem \ref{thm1iso}}, that $(M, \mu)$ admits a lower isoperimetric function $$J(v)=c\inf_{\varphi, \phi}\left(\int_{0}^{\infty} {J_{1}(\varphi(t))dt}+\int_{0}^{\infty}{J_{2}(\phi(s))ds}.\right),$$ for some positive constant $c>0$ and where $\varphi$ and $\phi$ are \textit{generalized mutually inverse} functions such that $$v=\int_{0}^{\infty} {\varphi(t) dt}=\int_{0}^{\infty}{\phi(s)ds}.$$ As a consequence of \textbf{Theorem \ref{thm1iso}}, we obtain in Theorem \ref{propisoformod} that the aforementioned weighted model manifold $\left(\overline{\Omega}, \widetilde{\mu}\right)$ admits a lower isoperimetric function $J$ such that for large enough $v$, $$J(v)=\frac{cv}{(\log v)^{\frac{2-2\alpha}{\alpha}}},$$ for some positive constant $c>0$, which leads to the estimate (\ref{weightedheatupperinintro}). Even though we managed to give both positive and negative results for manifolds with parabolic end concerning the estimate (\ref{polydecayinintr}), a gap still remains. Closing this gap seems to be interesting for future work, for example, it might be desirable to construct a manifold with parabolic end of infinite volume for which (\ref{polydecayinintr}) does not hold. NOTATION. For any nonnegative functions $f, g$, we write $f\simeq g$ if there exists a constant $C>1$ such that $$C^{-1}f\leq g\leq Cf.$$ \section{On-diagonal heat kernel lower bounds}\label{On-diagonal heat kernel lower bounds} Let $M$ be a non-compact Riemannian manifold with boundary $\delta M$ (which may be empty). Given a smooth positive function $\omega$ on $M$, let $\mu$ be the measure defined by \begin{equation*}\label{defweightmeasureome}d\mu=\omega^{2} d\textnormal{vol},\end{equation*} where $d\textnormal{vol}$ denotes the Riemannian measure on $M$. Similarly. we define $\mu'$ as the measure with density $\omega^{2}$ with respect to the Riemannian measure of codimension 1 on any smooth hypersurface. The pair ($M, \mu$) is called \textit{weighted manifold}. The Riemannian metric induces the Riemannian distance $d(x, y),~ x, y\in M$. Let $B(x, r)$ denote the geodesic ball of radius $r$ centered at $x$, that is $$B(x, r)=\{x\in M: d(x, y)<r\}$$ and $V(x, r)$ its volume on ($M, \mu$) given by $$V(x, r)=\mu(B(x, r)).$$ We say that $M$ is complete if the metric space ($M, d$) is complete. It is known that $M$ is complete, if and only if, all balls $B(x, r)$ are precompact sets. In this case, $V(x, r)$ is finite. The Laplace operator $\Delta_{\mu}$ is the second order differential operator defined by $$\Delta_{\mu}f=div_{\mu}(\nabla f)=\omega^{-2}div(\omega^{2}\nabla f).$$ If $\omega\equiv 1$, then $\Delta_{\mu}$ coincides with the Laplace-Beltrami operator $\Delta=div \circ \nabla$. Consider the Dirichlet form $$\mathcal{E}(u, v)=\int_{M}{(\nabla u, \nabla v)d\mu},$$ defined on the space $C_{0}^{\infty}(M)$ of smooth functions with compact support. The form $\mathcal{E}$ is closable in $L^{2}(M, \mu)$ and positive definit. Let us from now on denote by $\Delta_{\mu}$ its infinitisemal generator. By integration by parts, we obtain for all $u, v \in C_{0}^{\infty}(M)$, \begin{equation}\label{intbypartsdelta}\mathcal{E}(u, v)=\int_{M}{(\nabla u, \nabla v)d\mu}=-\int_{M}{v\Delta ud\mu}-\int_{\delta M}{v\frac{\partial u}{\partial \nu}d\mu'},\end{equation} where $\nu$ denotes the inward unit normal vector field on $\delta M$. A function $u$ is called \textit{harmonic} if $u\in C^{2}(M)$, $\Delta u=0$ in $M\setminus \delta M$ and $\frac{\partial u}{\partial \nu}=0$ on $\delta M$. The operator $\Delta_{\mu}$ generates the heat semi-group $P_{t}:=e^{t\Delta_{\mu}}$ which possesses a positive smooth, symmetric kernel $p_{t}(x, y)$. Let $\Omega$ be an open subset of $M$ and denote $\delta \Omega:=\delta M\cap \Omega$. Then we can consider $\Omega$ as a manifold with boundary $\delta \Omega$. Hence, using the same constructions as above for $\Omega$ instead of $M$, we obtain the heat semigroup $P_{t}^{\Omega}$ with the heat kernel $p_{t}^{\Omega}(x, y)$, which satisfies the Dirichlet boundary condition on $\partial \Omega$ and the Neumann boundary condition on $\delta \Omega$. \begin{defin}\normalfont Let $M$ be a complete non-compact manifold. Then we call $\Omega$ an \textit{end} of $M$, if $\Omega$ is an open connected proper subset of $M$ such that $\overline{\Omega}$ is non-compact but $\partial \Omega$ is compact (in particular, when $\partial\Omega$ is a smooth closed hypersurface). \end{defin} In many cases, the end $\Omega$ can be considered as an exterior of a compact set of another manifold $M_{0}$, that means, $\Omega$ is isometric to $M_{0}\setminus K_{0}$ for some compact set $K_{0}\subset M_{0}$. If $(M, \mu)$ and $(M_{0}, \mu_{0})$ are weighted manifolds, with $\omega^{2}$ being the smooth density of measure $\mu$ and the measure $\mu_{0}$ having smooth density $\omega_{0}^{2}$, the isometry is meant in the sense of weighted manifolds, that is, this isometry maps measure $\mu$ to $\mu_{0}$ so that $\omega_{0}=\omega$ on $\Omega$. A function $u\in C^{2}(M)$ is called \textit{superharmonic} if $\Delta_{\mu} u\leq 0$ in $M\setminus \delta M$ and $\frac{\partial u}{\partial \nu}\geq 0$ on $\delta M$, where $\nu$ is the outward normal unit vector field on $\delta M$. A \textit{subharmonic} function $u\in C^{2}(M)$ satisfies the opposite inequalities. \begin{defin}\normalfont We say that a weighted manifold $(M, \mu)$ is \textit{parabolic} if any positive superharmonic function on $M$ is constant, and \textit{non-parabolic} otherwise. \end{defin} \begin{defin}\normalfont Let $(M, \mu)$ be a weighted manifold and $\Omega$ be a subset of $M$. Then we define \textit{the volume function} of $\Omega$, for all $x\in M$ and $r>0$, by $$V_{\Omega}(x, r)=\mu(B_{\Omega}(x, r)),$$ where $B_{\Omega}(x, r)=B(x, r)\cap \Omega$. \end{defin} \begin{defin}\normalfont Let $(M, \mu)$ be a weighted manifold. We say that $\Omega\subset M$ satisfies the \textit{polynomial volume growth condition}, if there exist $x_{0}\in \Omega$ and $r_{0}>0$ such that for all $r\geq r_{0}$, \begin{equation}\label{PolygrowthOM}V_{\Omega}(x_{0}, r)\leq Cr^{N},\end{equation} where $N$ and $C$ are positive constants. \end{defin} \begin{thm}[\cite{Grigoryan2021}, Theorem 8.3]\label{thmlowerptbd} Let $M$ be a complete non-compact manifold with end $\Omega$. Assume that $\left(\overline{\Omega}, \mu\right)$ is a weighted manifold such that \begin{itemize} \item $\left(\overline{\Omega}, \mu\right)$ is non-parabolic as a manifold with boundary $\partial \Omega\cup \delta \Omega$. \item $\Omega$ satisfies the polynomial volume growth condition (\ref{PolygrowthOM}) with $N>2$. \end{itemize} Then for any $x\in \Omega$ there exist $c_{x}>0$ and $t_{x}>0$ such that for all $t\geq t_{x}$, \begin{equation}\label{lowerbndend}p_{t}^{\Omega}(x, x)\geq \frac{c_{x}}{(t\log t)^{N/2}},\end{equation} where $c_{x}$ and $t_{x}$ depend on $x$. Consequently, if $(M, \mu)$ is a complete non-compact weighted manifold with end $\Omega$ such that the above conditions are satisfied, we have for any $x\in M$ and all $t\geq t_{x}$, \begin{equation}\label{nonparaendpoldecglob}p_{t}(x, x)\geq \frac{c_{x}}{(t\log t)^{N/2}}.\end{equation} \end{thm} \subsection{$h$-transform}\label{subsechtrans} Recall that any smooth positive function $h$ induces a new weighted manifold $(M, \widetilde{\mu})$, where the measure $\widetilde{\mu}$ is defined by \begin{equation}\label{defvonmeasuremith}d\widetilde{\mu}=h^{2}d\mu\end{equation} and we denote, for all $r>0$ and $x\in M$, by $\widetilde{V}(x, r)$ the volume function of measure $\widetilde{\mu}$. The Laplace operator $\Delta_{\widetilde{\mu}}$ on $(M, \widetilde{\mu})$ is then given by $$\Delta_{\widetilde{\mu}}f=h^{-2}div_{\mu}(h^{2}\nabla f)=(h\omega)^{-2}div((h\omega)^{2}\nabla f).$$ \begin{lemma}[\cite{Grigorextquotesingleyan2001}, Lemma 4.1]\label{neuesmasdelta} Assume that $\Omega\subset M$ is open and $\Delta_{\mu} h=0$ in $\Omega$. Then for any smooth function $f$ in $\Omega$, we have \begin{equation}\label{doobtrnsfmit}\Delta_{\widetilde{\mu}}f=h^{-1}\Delta_{\mu}(hf).\end{equation} \end{lemma} \begin{lemma}[\cite{Grigorextquotesingleyan2001}, Lemma 4.2]\label{relheattildeohne} Assume that $h$ is a harmonic function in an open set $\Omega\subset M$. Then the Dirichlet heat kernels $p^{\Omega}_{t}$ and $\widetilde{p}^{\Omega}_{t}$ in $\Omega$, associated with the corresponding Laplace operators $\Delta_{\mu}$ and $\Delta_{\widetilde{\mu}}$, are related by \begin{equation}\label{relhtransformheat} p^{\Omega}_{t}(x, y)=h(x)h(y)\widetilde{p}^{\Omega}_{t}(x, y),\end{equation} for all $t>0$ and $x, y\in \Omega$. \end{lemma} \begin{rem}\normalfont In particular, if we assume that $h$ is harmonic in $M$, we get that the heat kernels are related by \begin{equation}\label{relationofheatkernelsends}\widetilde{p}_{t}(x, y)=\frac{p_{t}(x, y)}{h(x)h(y)}\end{equation} for all $t>0$ and $x, y\in M$. \end{rem} \begin{defin}\normalfont Let $\Omega$ be an open set in $M$ and $K$ be a compact set in $\Omega$. Then we call the pair $(K, \Omega)$ a \textit{capacitor} and define the capacity $\textnormal{cap}(K, \Omega)$ by \begin{equation}\label{Defcap}\textnormal{cap}(K, \Omega)=\inf_{\phi\in \mathcal{T}(K, \Omega)}\int_{\Omega}{|\nabla \phi|^{2}d\mu},\end{equation} where $\mathcal{T}(K, \Omega)$ is the set of test functions defined by \begin{equation}\label{testfunkcap}\mathcal{T}(K, \Omega)=\{\phi\in C_{0}^{\infty}(\Omega): \phi|_{K}=1\}.\end{equation} \end{defin} Let $\Omega$ be precompact. Then it is known that the Dirichlet integral in (\ref{Defcap}) is minimized by a harmonic function $\varphi$, so that the infimum is attained by the weak solution to the Dirichlet problem in $\Omega\setminus \overline{K}$: $$\left\{ \begin{array}{l} \Delta \varphi=0 \\ \varphi|_{\partial K}=1 \\\varphi|_{\partial \Omega}=0.\\\frac{\partial \varphi}{\partial \nu}|_{\delta (\Omega\setminus \overline{K})}=0 \end{array}\right. $$ The function $\varphi$ is called the \textit{equilibrium potential} of the capacitor $(K, \Omega)$. We always have the following identity: \begin{equation}\label{fluxvscap}\textnormal{cap}(K, \Omega)=\int_{\Omega}{|\nabla \varphi|^{2}d\mu}=\int_{\Omega\setminus \overline{K}}{|\nabla \varphi|^{2}d\mu}=-\textnormal{flux}(\varphi), \end{equation} where $\textnormal{flux}(\varphi)$ is defined by $$\textnormal{flux}(\varphi):=\int_{\partial W}{\frac{\partial \varphi}{\partial \nu}d\mu'},$$ where $W$ is any open region in the domain of $\varphi$ with smooth precompact boundary such that $\overline{K}\subset W$ and $\nu$ is the outward normal unit vector field on $\partial W$. By the Green formula (\ref{intbypartsdelta}) and the harmonicity of $\varphi$, $\textnormal{flux}(\varphi)$ does not depend on the choice of $W$. \begin{defin}\normalfont We say that a precompact open set $U\subset M$ has locally positive capacity, if there exists a precompact open set $\Omega$ such that $\overline{U}\subset \Omega$ and $\textnormal{cap}(U, \Omega)>0$. \end{defin} It is a consequence of the local Poincaré inequality, that if $\textnormal{cap}(U, \Omega)>0$ for some precompact open $\Omega$, then this is true for all precompact open $\Omega$ containing $\overline{U}$. \begin{lemma}\label{Lemmaconstructionh} Let $(M, \mu)$ be a complete, non-compact weighted manifold and $K$ be a compact set in $M$ with locally positive capacity and smooth boundary $\partial K$. Fix some $x_{0}\in M$ and set $B_{r}:=B(x_{0}, r)$ for all $r>0$ and assume that $K$ is contained in a ball $B_{r_{0}}$ for some $r_{0}>0$. Let us also set $\Omega=M\setminus K$, so that $\left(\overline{\Omega}, \mu\right)$ becomes a weighted manifold with boundary. Then there exists a positive smooth function $h$ in $\overline{\Omega}$ that is harmonic in $\Omega$ and satisfies for all $r\geq r_{0}$, \begin{equation}\label{vergleichminicapah}\min_{\partial B_{r}}h\leq C~\textnormal{cap}(K, B_{r})^{-1},\end{equation} for some constant $C>0$. Moreover, the weighted manifold $\left(\overline{\Omega}, \widetilde{\mu}\right)$ is non-parabolic, where measure $\widetilde{\mu}$ on $\overline{\Omega}$ is defined by (\ref{defvonmeasuremith}).\end{lemma} \begin{proof} For any $R>r_{0}$, let $\varphi_{R}$ be the equilibrium potential of the capacitor $(K, B_{R})$. It follows from (\ref{fluxvscap}), that \begin{equation}\label{capgleichminsfluxK}\textnormal{cap}(K, B_{R})=-\textnormal{flux}(\varphi_{R}).\end{equation} By our assumption on $K$, we have for all $R> r_{0}$, \begin{equation*}\label{assumptionboundary}\textnormal{cap}(K, B_{R})>0,\end{equation*} whence we can consider the sequence $$v_{R}=\frac{1-\varphi_{R}}{\textnormal{cap}(K, B_{R})}.$$ By (\ref{capgleichminsfluxK}) this sequence satisfies \begin{equation}\label{fluxvReins}\textnormal{flux}(v_{R})=1.\end{equation} Let us extend all $v_{R}$ to $K$ by setting $v_{R}\equiv 0$ on $K$. We claim that for all $R>r>r_{0}$, \begin{equation}\label{vergleichminicapav}\min_{\partial B_{r}}v_{R}\leq \textnormal{cap}(K, B_{r})^{-1}.\end{equation} For $R>r>r_{0}$, denote $m_{r}=\min_{\partial B_{r}}v_{R}$. It follows from the minimum principle and the fact that $v_{R}\equiv 0$ on $K$, that the set $$U_{r}:=\{x\in M:v_{R}(x)<m_{r}\}$$ is inside $B_{r}$ and contains $K$. Then observe that the function $1-\frac{v_{R}}{m_{r}}$ is the equilibrium potential for the capacitor $(K, U_{r})$, whence $$\textnormal{cap}(K, B_{r})\leq \textnormal{cap}(K, U_{r})=\textnormal{flux}\left(\frac{v_{R}}{m_{r}}\right)=\frac{1}{m_{r}},$$ which proves (\ref{vergleichminicapav}). Note that, since $v_{R}$ vanishes on $\partial \Omega$, the maximum principle implies that for all $R>r>r_{0}$, \begin{equation}\label{infinoderrand}\sup_{B_{r}\setminus K}v_{R}=\max_{\partial B_{r}}v_{R}.\end{equation} Hence, we obtain from (\ref{infinoderrand}), the local elliptic Harnack inequality and (\ref{vergleichminicapav}), that for every $R>r>r_{0}$, \begin{equation}\label{uniformboundvR}\sup_{B_{r}\setminus K}v_{R}\leq C(r)\min_{\partial B_{r}}v_{R}\leq C(r)\textnormal{cap}(K, B_{r})^{-1},\end{equation} where the constant $C(r)$ depends only on $r$. Thus, the bound in (\ref{uniformboundvR}) is uniform in $R$, when $R\gg r$, so that in this case, the sequence $v_{R}$ is uniformly bounded in $B_{r}\setminus K$. Now define $V_{k}:=B_{r_{k}}\setminus K$, where $B_{r_{k}}$ are open balls of radius $r_{k}\geq r_{0}$ with $\lim_{k\to \infty}r_{k}=+\infty$ so that $\{V_{k}\}_{k}$ gives a sequence of precompact open sets that covers $\Omega$. By using a diagonal process, we obtain a subsequence $v_{R_{k}}$ of $v_{R}$ that converges in all $V_{k}$, and hence, in $\Omega$. In addition, the limit $v:=\lim_{k\to \infty}v_{R_{k}}$ is a harmonic function in $\Omega$. Since \begin{equation}\label{vRundvgleichinBR}v=v_{R} \quad\textnormal{in}~B_{R}\setminus K,\end{equation} we have $$\int_{\partial B_{r}}{\frac{\partial v}{\partial \nu}d\mu'}=\int_{\partial B_{r}}{\frac{\partial v_{R}}{\partial \nu}d\mu'},$$ which together with (\ref{fluxvReins}) implies \begin{equation}\label{fluxvonhimbeweis} \textnormal{flux}(v)=1,\end{equation} whence $v$ is non-constant. Furthermore, $v$ is non-zero and by (\ref{vRundvgleichinBR}), non-negative in $\overline{\Omega}$. Let us define the function $h=1+v$ in $\overline{\Omega}$ so that $h$ is positive and smooth in $\overline{\Omega}$. Also, it follows from (\ref{vergleichminicapav}), that for all $r>r_{0}$, $$\min_{\partial B_{r}}h\leq 1+\textnormal{cap}(K, B_{r})^{-1}\leq (1+\textnormal{cap}(K, B_{r_{0}}))\textnormal{cap}(K, B_{r})^{-1},$$ which proves (\ref{vergleichminicapah}) with $C=1+\textnormal{cap}(K, B_{r_{0}})$. Let us now show that the weighted manifold $(\overline{\Omega}, \widetilde{\mu})$ is non-parabolic. For that purpose, consider in $\overline{\Omega}$ the positive smooth function $w=\frac{1}{h}$. Then we have by Lemma \ref{neuesmasdelta}, that function $w$ satisfies in $\Omega$, $$\Delta_{\widetilde{\mu}}(w)=\Delta_{\widetilde{\mu}}\left(\frac{1}{h}\right)=\frac{1}{h}\Delta_{\mu}1=0.$$ so that the function $w$ is $\Delta_{\widetilde{\mu}}$-harmonic in $\Omega$. Observe that \begin{equation}\label{ableitungvonw}\frac{\partial w}{\partial \nu}=-\frac{\partial h}{\partial \nu}{\frac{1}{h^{2}}},\end{equation} where $\nu$ denotes the outward normal unit vector field on $\partial \Omega$. Since $v$ is non-negative in $\Omega$ and $v=0$ on $\partial \Omega$, we have $\frac{\partial h}{\partial \nu}\leq0$ on $\partial \Omega$, whence we get by (\ref{ableitungvonw}), $$\frac{\partial w}{\partial \nu}\geq0\quad \textnormal{on}~ \partial \Omega.$$ Hence, we conclude that $w$ is $\Delta_{\widetilde{\mu}}$-superharmonic in $\overline{\Omega}$, positive and non-constant, which implies that $(\overline{\Omega}, \widetilde{\mu})$ is non-parabolic. \end{proof} \begin{rem}\normalfont Note that the function $h$ constructed in Lemma \ref{Lemmaconstructionh} is $\Delta_{\mu}$-subharmonic in $\overline{\Omega}$. If we assume that the weighted manifold $(\overline{\Omega}, \mu)$ is parabolic, we obtain that $h$ is unbounded since a non-constant bounded subharmonic function can only exist on non-parabolic manifolds. \end{rem} \subsection{Locally Harnack case}\label{positiveresultslower} \begin{defin}\label{deflocallyharnack}\normalfont The weighted manifold $(M, \mu)$ is said to be a \textit{locally Harnack manifold} if there is $\rho>0$, called the \textit{Harnack radius}, such that for any point $x\in M$ the following is true: \begin{enumerate} \item for any positive numbers $r<R<\rho$ \begin{equation}\label{locHarvD} \frac{V(x, R)}{V(x, r)}\leq a \left(\frac{R}{r}\right)^{n}.\end{equation} \item Poincaré inequality: for any Lipschitz function $f$ in the ball $B(x, R)$ of a radius $R<\rho$ we have \begin{equation}\label{zwopoincare}\int_{B(x, R)}{|\nabla u|^{2}d\mu}\geq\frac{b}{R^{2}}\int_{B(x, R/2)}{(f-\overline{f})^{2}d\mu},\end{equation} where we denote $$\overline{f}:=\dashint_{B(x, R/2)}{f}d\mu:=\frac{1}{V(x, R/2)}\int_{B(x, R/2)}{f}d\mu$$ \end{enumerate} and $a, b$ and $n$ are positive constants and $V(x, r)$ denotes the volume function of $(M, \mu)$. \end{defin} For example, the conditions 1. and 2. are true in the case when the manifold $M$ has Ricci curvature bounded below by a (negative) constant $-K$ (see \cite{Buser1982}). Hence, for example, any manifold $M$ of \textit{bounded geometry} is a locally Harnack manifold. \begin{lemma}[\cite{grigor1994heat}, Theorem 2.1]\label{thmlocallyharnack} Let $(M, \mu)$ be a locally Harnack manifold. Then we have, for any precompact open set $U\subset M$, \begin{equation}\label{lowerbdlambdaone}\lambda_{1}(U)\geq \frac{c}{\rho^{2}}\min\left(\left(\frac{V_{0}}{\mu(U)}\right)^{2}, \left(\frac{V_{0}}{\mu(U)}\right)^{2/n}\right),\end{equation} where $$V_{0}=\inf_{x\in M}\{V(x, \rho):B(x, \rho)\cap U\ne \emptyset\}$$ and the constant $c$ depends on $a, b, n$ from (\ref{locHarvD}) and (\ref{zwopoincare}). \end{lemma} \begin{defin}\normalfont We say that a manifold $M$ satisfies the \textit{spherical Harnack inequality} if there exist $x_{0}\in M$ and constants $r_{0}>0$, $C_{H}>0$, $N_{H}>0$ and $A>1$, so that for any positive harmonic function $u$ in $M\setminus \overline{B(x_{0}, A^{-1}r)}$ with $r\geq r_{0}$, \begin{equation}\label{annuliharnack2}\sup_{\partial B(x_{0}, r)}u\leq C_{H}r^{N_{H}} \inf_{\partial B(x_{0}, r)}u.\end{equation} \end{defin} \textbf{Assumption:} In this section, when considering an end $\Omega$ of a complete non-compact weighted manifold $(M, \mu)$, we always assume that there exists a complete weighted manifold $(M_{0}, \mu_{0})$ and a compact set $K_{0}\subset M_{0}$ that is the closure of a non-empty open set, such that $\Omega$ is isometric to $M_{0}\setminus K_{0}$ in the sense of weighted manifolds. For simplicity and since we only use the intrinsic geometry of $M_{0}$, we denote by $B(x, r)$ the geodesic balls in $M_{0}$ and by $V(x, r)$ the volume function of $M_{0}$. \begin{thm}\label{thmlocharsph} Let $\Omega$ be an end of a complete non-compact weighted manifold $(M, \mu)$. Assume that $M_{0}$ is a locally Harnack manifold with Harnack radius $\rho>0$, where $M_{0}$ is defined as above, and that there exists $x_{0}\in M_{0}$ so that \begin{itemize} \item $M_{0}$ satisfies the spherical Harnack inequality (\ref{annuliharnack2}). \item $M_{0}$ satisfies the polynomial volume growth condition (\ref{PolygrowthOM}). \item There are constants $v_{0}>0$ and $\theta\geq 0$ so that for any $x\in M_{0}$, if $d(x, x_{0})\leq R$ for some $R>\rho$, it holds that \begin{equation}\label{upperboundVOm}V(x, \rho)\geq v_{0}R^{-\theta}.\end{equation} \end{itemize} Then, for any $x\in M$, there exist $\alpha>0$, $t_{x}>0$ and $c_{x}>0$ such that for all $t\geq t_{x}$, \begin{equation}\label{lowerbndendparamitharnaanuohneomloc}p_{t}(x, x)\geq \frac{c_{x}}{t^{\alpha}},\end{equation} where $\alpha=\alpha(N, \theta, n, N_{H})$ and $n$ is as in (\ref{locHarvD}). \end{thm} \begin{proof} Let us set $B_{r}=B(x_{0}, r)$ and $V(r)=V(x_{0}, r)$ and $K_{0}$ be contained in a ball $B_{\delta}$ for some $\delta>0$. It follows from ([\cite{heinonen2018nonlinear}, Theorem 2.25]) that $K_{0}$ has locally positive capacity. Then by Lemma \ref{Lemmaconstructionh} there exists a positive smooth function $h$ in $\overline{\Omega}$ that is harmonic in $\Omega$ and such that the weighted manifold $\left(\overline{\Omega}, \widetilde{\mu}\right)$ is non-parabolic, where measure $\widetilde{\mu}$ is defined by (\ref{defvonmeasuremith}). Now, our aim is to apply the estimate (\ref{lowerbndend}) in Theorem \ref{thmlowerptbd} to the weighted manifold $(\overline{\Omega}, \widetilde{\mu})$. For that purpose, it is sufficient to show that there are positive constants $\widetilde{r_{0}}, \widetilde{C}$ and $\widetilde{N}>2$ such that for all $r\geq \widetilde{r_{0}}$, \begin{equation}\label{Volumegrowththeotilde2}\widetilde{V}_{\Omega}(r)=\int_{B_{r}\cap \Omega}{h^{2}d\mu}\leq \widetilde{C}r^{\widetilde{N}}.\end{equation} Firstly, by (\ref{vergleichminicapah}), there is a constant $C_{\delta}>0$ such that for all $r\geq \delta$, \begin{equation}\label{kettemitcapunmax2l}\min_{\partial B_{r}}h\leq C_{\delta}\textnormal{cap}(K_{0}, B_{r})^{-1}.\end{equation} As $h$ is harmonic in $M_{0}\setminus \overline{B_{\delta}}$, the hypothesis (\ref{annuliharnack2}) implies that there exists a constant $C_{H}>0$, so that for every $r\geq \max(r_{0}, A\delta)$, $$\max_{\partial B_{r}}h\leq C_{H}r^{N_{H}}\min_{\partial B_{r}}h.$$ Combining this with (\ref{kettemitcapunmax2l}), we obtain for all $r\geq \max(r_{0}, A\delta)$ with $C_{0}=C_{H}C_{\delta}$, \begin{equation}\label{Folgerungfuerh2l}\max_{\partial B_{r}}h\leq C_{0}r^{N_{H}}\textnormal{cap}(K_{0}, B_{r})^{-1}.\end{equation} For any $r\geq\delta$, let $\varphi_{r}$ be the equilibrium potential of the capacitor $(K_{0}, B_{r})$. Since $$\int_{B_{r}}{|\nabla \varphi_{r}|^{2}d\mu_{0}}=\textnormal{cap}(K_{0}, B_{r})$$ and $$\int_{B_{r}}{\varphi_{r}^{2}d\mu_{0}}\geq \mu_{0}(K_{0}),$$ we obtain $$\lambda_{1}(B_{r})\leq \frac{\int_{B_{r}}{|\nabla \varphi_{r}|^{2}d\mu_{0}}}{\int_{B_{r}}{\varphi_{r}^{2}d\mu_{0}}}\leq\frac{\textnormal{cap}(K_{0}, B_{r})}{\mu(K_{0})},$$ whence, together with (\ref{Folgerungfuerh2l}), we deduce \begin{equation}\label{vergleichcapla1}\max_{\partial B_{r}}h\leq C_{0}\mu(K_{0})r^{N_{H}}\lambda_{1}(B_{r})^{-1}.\end{equation} Since $M_{0}$ is a locally Harnack manifold, we can apply Lemma \ref{thmlocallyharnack} and obtain from (\ref{lowerbdlambdaone}), that for all $r\geq \delta$, \begin{equation}\label{anwendunglowebdlambdae}\lambda_{1}(B_{r})\geq \frac{c}{\rho^{2}}\min\left(\left(\frac{V_{0}}{V(r)}\right)^{2}, \left(\frac{V_{0}}{V(r)}\right)^{2/n}\right),\end{equation} where $$V_{0}=\inf_{x\in M_{0}}\{V(x, \rho):B(x, \rho)\cap B_{r}\ne \emptyset\}.$$ Note that the condition $B(x, \rho)\cap B_{r}\ne \emptyset$ implies that $d(x_{0}, x)\leq r+\rho$. Thus, we obtain from the hypothesis (\ref{upperboundVOm}), assuming $r\geq\rho$, $$V(x, \rho)\geq v_{0}(r+\rho)^{-\theta}\geq v_{0}2^{-\theta}r^{-\theta}.$$ Therefore, we have for all $r\geq \rho$, $$V_{0}\geq C_{\theta}r^{-\theta},$$ with $C_{\theta}=v_{0}2^{-\theta}$. Hence, using the polynomial volume growth condition (\ref{PolygrowthOM}), we obtain from (\ref{anwendunglowebdlambdae}), that for all $r\geq\max(r_{0}, \rho, A\delta)$, $$\lambda_{1}(B_{r})\geq C_{1}\min \left(r^{-2(N+\theta)}, r^{-2(N+\theta)/n}\right),$$ where $$C_{1}=\frac{c}{\rho^{2}}\min\left(\left(\frac{C_{\theta}}{C}\right)^{2}, \left(\frac{C_{\theta}}{C}\right)^{2/n}\right),$$ so that by setting \begin{equation}\label{defvonbetfornh}\beta=2\max\left(N+\theta, \frac{N+\theta}{n}\right),\end{equation} we deduce for $r\geq \max(r_{0}, \rho, A\delta, 1)$, $$\lambda_{1}(B_{r})\geq C_{1}r^{-\beta}.$$ Combining this with (\ref{vergleichcapla1}), we obtain for every $r\geq \max(r_{0}, \rho, A\delta, 1)$, \begin{equation}\label{fuervolgrowth2l}\max_{\partial B_{r}}h\leq C_{2}r^{\beta+N_{H}},\end{equation} where $$C_{2}=C_{0}C_{1}^{-1}\mu_{0}(K_{0})^{-1}.$$ Hence, (\ref{fuervolgrowth2l}), the polynomial volume growth condition (\ref{PolygrowthOM}) and the maximum principle imply that for all $r\geq \max(r_{0}, \rho, A\delta, 1)$, $$\widetilde{V}_{\Omega}(r)=\int_{B_{r}\cap \Omega}{h^{2}d\mu}\leq V(r)\max_{\partial B_{r}}h^{2}\leq C_{2}^{2}Cr^{N+2(\beta+N_{H})},$$ which proves (\ref{Volumegrowththeotilde2}) with $\widetilde{r}_{0}=\max(r_{0}, \rho, A\delta, 1)$, $\widetilde{N}=2(\beta+N_{H})+N$ and $\widetilde{C}=C_{2}^{2}C,$ and implies that the weighted manifold $(\Omega, \widetilde{\mu})$ has a polynomial volume growth. Thus, the hypotheses of Theorem \ref{thmlowerptbd} are fulfilled and we obtain by (\ref{lowerbndend}), that for any $x\in \Omega$, there exist $\widetilde{t}_{x}>0$ and $\widetilde{c}_{x}>0$, such that for all $t\geq \widetilde{t}_{x}$, $$\widetilde{p}_{t}^{\Omega}(x, x)\geq \frac{\widetilde{c}_{x}}{(t\log t)^{\beta+N_{H}+N/2}},$$ where $\beta$ is defined by (\ref{defvonbetfornh}). Since $h$ is harmonic in $\Omega$, we therefore conclude by (\ref{relhtransformheat}) that for any $x\in \Omega$ and all $t\geq \widetilde{t}_{x}$, $$p^{\Omega}_{t}(x, x)=h^{2}(x)\widetilde{p}^{\Omega}_{t}(x, x)\geq \frac{\widetilde{c}_{x}h^{2}(x)}{(t\log t)^{\beta+N_{H}+N/2}},$$ which yields (\ref{lowerbndendparamitharnaanuohneomloc}) for all $x\in M$ by using $p_{t}^{\Omega}\leq p_{t}$ and by means of the local parabolic Harnack inequality. \end{proof} \begin{rem}\normalfont Note that it follows from the non-parabolicity of $\left(\overline{\Omega}, \widetilde{\mu}\right)$, that $4\max\left(N+\theta, \frac{N+\theta}{n}\right)+2N_{H}+N>2$. \end{rem} \subsection{End with relatively connected annuli} \begin{defin}\normalfont We say that a manifold $M$ with fixed point $x_{0}\in M$ satisfies \textit{the relatively connected annuli condition (RCA)} if there exists $A>1$ such that, for any $r>A^{2}$ and all $x, y$ with $d(x_{0}, x)=d(x_{0}, y)=r$, there exists a continuous path $\gamma:[0, 1]\to M$ with $\gamma(0)=x$ and $\gamma(1)=y$, whose image is contained in $B(x_{0}, Ar)\setminus B(x_{0}, A^{-1}r)$. \end{defin} \begin{rem}\normalfont Note that, even though the condition (RCA) is formulated for the specific point $x_{0}$, it is equivalent to the (RCA) condition with respect to any other point $x_{1}$ with possibly a different constant $A$. \end{rem} \begin{example}\normalfont Any \textit{Riemannian model} (see Subsection \ref{exindimtwo} and Section \ref{secisobdry}) with dimension $n\geq 2$ has relatively connected annuli. \end{example} \begin{cor}\label{thmlowerptbdgeo} Let $\Omega$ be an end of a complete non-compact weighted manifold $(M, \mu)$ and assume that $M_{0}$ is a locally Harnack manifold with Harnack radius $\rho>0$, where $M_{0}$ is defined as above. Also assume that there exists $x_{0}\in M_{0}$ so that \begin{itemize} \item $M_{0}$ satisfies (RCA) with some constant $A>1$. \item There exist constants $L>0$ and $C>0$ so that for all $r\geq L$, \begin{equation}\label{volumeannuli}V(Ar)-V(A^{-1}r)\leq C\log r,\end{equation} where we denote $V(r)=V(x_{0}, r)$. \item There exists a constant $v_{0}>0$ such that for any $y\in M_{0}$, \begin{equation}\label{lowerboundball}V(y, \rho/3)\geq v_{0}.\end{equation} \end{itemize}Then, for any $x\in M$, there exist $\alpha>0$, $t_{x}>0$ and $c_{x}>0$ such that for all $t\geq t_{x}$, \begin{equation}\label{lowerbndendpara}p_{t}(x, x)\geq \frac{c_{x}}{t^{\alpha}},\end{equation} where $\alpha=\alpha(n, v_{0}, \rho, C)$. \end{cor} \begin{proof} As before, we denote $B_{r}=B(x_{0}, r)$. Obviously, the hypothesis (\ref{lowerboundball}) implies the condition (\ref{upperboundVOm}) with $\theta=0$. Hence, to apply Theorem \ref{thmlocharsph}, it remains to show that $M_{0}$ has a polynomial volume growth as in (\ref{upperboundVOm}) and $M_{0}$ satisfies the spherical Harnack inequality (\ref{annuliharnack2}). The polynomial volume growth condition (\ref{upperboundVOm}) follows from (\ref{volumeannuli}). Let us now prove that the spherical Harnack inequality (\ref{annuliharnack2}) holds in $M_{0}$. Assume that $r\geq L$ and cover the set $B_{Ar}\setminus B_{ A^{-1}r}$, with balls $B(x_{i}, \rho/3)$ where $x_{i}\in M_{0}$ and $A>1$ is as in (RCA). By applying the Banach process, there exists a number $\tau(r)$ and a subsequence of disjoint balls $\{B(x_{i_{k}}, \rho/3)\}_{k=1}^{\tau(r)}$ such that the union of the balls $\{B(x_{i_{k}}, \rho)\}_{k=1}^{\tau(r)}$ cover the set $B_{Ar}\setminus B_{A^{-1}r}$. Hence, it follows from (\ref{volumeannuli}), that \begin{equation}\label{summederballsabsch}\sum_{i=1}^{\tau(r)}{V(x_{i}, \rho/3)}\leq V( Ar)-V(A^{-1}r)\leq C\log r.\end{equation} Then the hypothesis (\ref{lowerboundball}), combined with (\ref{summederballsabsch}), implies that \begin{equation}\label{anzahlderballschain}\tau(r)\leq \frac{C\log r}{v_{0}}.\end{equation} Let $y_{1}, y_{2}$ be two points on $\partial B_{r}$ such that $\min_{\partial B_{r}}u=u(y_{1})$ and $\max_{\partial B_{r}}u=u(y_{2})$ and $\gamma$ be a continuous path connecting them in $B_{Ar}\setminus B_{A^{-1}r}$ as is it ensured by (RCA) for all $r>A^{2}$. Now select out of the sequence $\{B(x_{i_{k}}, \rho)\}_{k=1}^{\tau(r)}$ those balls that intersect $\gamma$. In this way, we obtain a chain of at most $\tau(r)$ balls, which connect $y_{1}$ and $y_{2}$. Now let $u$ be a positive harmonic function in $M_{0}\setminus\overline{B_{A_{0}^{-1}r}}$, where $A_{0}\geq A$ is such that any ball of this chain lies in $M_{0}\setminus \overline{B_{A_{0}^{-1}r}}$ for all $1\leq i\leq \tau(r)$ and $r>A_{0}^{2}$. Applying the local elliptic Harnack inequality to $u$ repeatedly in the balls of this chain, we obtain $$\max_{\partial B_{r}}u=u(y_{2})\leq (C_{\rho})^{\tau}u(y_{1})=(C_{\rho})^{\tau}\min_{\partial B_{r}}u,$$ where $C_{\rho}$ is the Harnack constant in all $B(x_{i_{k}}, \rho)$. Together with (\ref{anzahlderballschain}), this yields $$\max_{\partial B_{r}}u\leq r^{\frac{c}{v_{0}}\log C_{\rho}}\min_{\partial \partial B_{r}}u,$$ which proves the spherical Harnack inequality (\ref{annuliharnack2}) with $N_{H}=\frac{C}{v_{0}}\log C_{\rho}$. Thus the hypotheses of Theorem \ref{thmlocharsph} are fulfilled and we obtain from (\ref{lowerbndendparamitharnaanuohneomloc}), that for any $x\in M$, there exist $t_{x}>0$, $c_{x}>0$ and $\alpha>0$ such that for all $t\geq t_{x}$, $$p_{t}(x, x)\geq \frac{c_{x}}{t^{\alpha}},$$ where $\alpha=\alpha(n, N_{H})$, which finishes the proof. \end{proof} \begin{defin}\normalfont As usual, for any piecewise $C^{1}$ path $\gamma:I\to M$, where $I$ is an interval in $\mathbb{R}$, denote by $l(\gamma)$ the length of $\gamma$ defined by $$l(\gamma)=\int_{I}{|\dot{\gamma}(t)|dt},$$ where $\dot{\gamma}$ is the velocity of $\gamma$, given by $\dot{\gamma}(t)(f)=\frac{d}{dt}f(\gamma(t))$ for any $f\in C^{\infty}(M)$. \end{defin} \begin{cor}\label{thmlowerptbdgeo2} Let $\Omega$ be an end of a complete non-compact weighted manifold $(M, \mu)$ and assume that for some $\kappa\geq0$, we have \begin{equation}\label{ricciincorlow}Ric(M_{0})\geq -\kappa,\end{equation} where $M_{0}$ is defined as above. Suppose that there exists $x_{0}\in M_{0}$ so that \begin{itemize} \item $M_{0}$ satisfies (RCA) with $A>1$ and piecewise $C^{1}$ path $\gamma$ so that there is some constant $c>0$ such that for all $r>A^{2}$, \begin{equation}\label{laengegeodesicbound}l(\gamma)\leq c\log r.\end{equation} \item There are constants $v_{0}>0$ and $\theta\geq 0$ so that for any $y\in M_{0}$, if $d(y, x_{0})\leq R$ for some $R>1$, it holds that $$V(y, \rho)\geq v_{0}R^{-\theta}.$$ \end{itemize}Then, for any $x\in M$, there exist $\alpha>0$, $t_{x}>0$ and $c_{x}>0$ such that for all $t\geq t_{x}$, \begin{equation}\label{lowerbndendparavol}p_{t}(x, x)\geq \frac{c_{x}}{t^{\alpha}},\end{equation} where $\alpha=\alpha(c, \theta, \kappa)$. \end{cor} \begin{proof} The assumption (\ref{ricciincorlow}) implies that $M_{0}$ is a locally Harnack manifold. Hence we are left to show that $M_{0}$ has a polynomial volume growth as in (\ref{PolygrowthOM}) and satisfies the spherical Harnack inequality (\ref{annuliharnack2}) to apply Theorem \ref{thmlocharsph}. Again we denote $B_{r}=B(x_{0}, r)$ and $V(r)=V(x_{0}, r)$. By the Bishop-Gromov theorem, the hypothesis (\ref{ricciincorlow}) implies that there exists a constant $C_{\kappa}>$, so that for any $y\in M_{0}$ and $R>1$, \begin{equation}\label{Volumeboundexp}V(y, R)\leq e^{C_{\kappa}R}.\end{equation} Together with the assumption (\ref{laengegeodesicbound}), this yields that the polynomial volume growth condition (\ref{PolygrowthOM}) holds in $M_{0}$. Let us now show that $M_{0}$ satisfies the spherical Harnack inequality (\ref{annuliharnack2}). Let $A>1$ be as above and assume that $r>A^{2}$. Denote by $y_{1}, y_{2}$ the points on $\partial B_{r}$ such that $\min_{\partial B_{r}}u=u(y_{1})$ and $\max_{\partial B_{r}}u=u(y_{2})$ and let $\gamma$ be a continuous path connecting them in $B_{Ar}\setminus B_{A^{-1}r}$ as is it ensured by (RCA). Then cover the path $\gamma$ with balls $\{B(x_{i}, \rho)\}_{i=1}^{\tau(r)}$, where $x_{i}\in M_{0}$ and $\rho>0$. Now let $u$ be a positive harmonic function in $M_{0}\setminus\overline{B_{A_{0}^{-1}r}}$, where $A_{0}\geq A$ is such that $B(x_{i}, \rho)\subset M_{0}\setminus \overline{B_{A_{0}^{-1}r}}$ for all $1\leq i\leq \tau(r)$ and $r>A_{0}^{2}$. In this way, we obtain a chain of at most $\tau(r)$ balls $B(x_{i}, \rho)$, which connect $y_{1}$ and $y_{2}$. By (\ref{laengegeodesicbound}), we deduce that \begin{equation}\label{anzahlballsinsquence} \tau(r)\leq \frac{c}{\rho}\log(r).\end{equation} Applyig the local elliptic Harnack inequality to $u$ repeatedly in the balls of this chain, we obtain $$\max_{\partial B_{r}}u=u(y_{2})\leq (C_{\rho})^{\tau}u(y_{1})=(C_{\rho})^{\tau}\min_{\partial B_{r}}u,$$ where $C_{\rho}$ is the Harnack constant in all $B(x_{i}, \rho)$. Together with (\ref{anzahlballsinsquence}), this yields $$\max_{\partial B_{r}}u\leq r^{\frac{c}{\rho}\log C_{\rho}}\min_{\partial B_{r}}u,$$ which proves (\ref{annuliharnack2}) with $N_{H}=\frac{c}{\rho}\log C_{\rho}$. Thus the hypotheses of Theorem \ref{thmlocharsph} are fulfilled and we obtain by (\ref{lowerbndendparamitharnaanuohneomloc}), that for any $x\in M$, there exist $t_{x}>0$, $c_{x}>0$ and $\alpha>0$ such that for all $t\geq t_{x}$, $$p_{t}(x, x)\geq \frac{c_{x}}{t^{\alpha}},$$ which finishes the proof. \end{proof} \subsection{An example in dimension two}\label{exindimtwo} Consider the topological space $M=(0, +\infty)\times \mathbb{S}^{1}$, that is, any point $x\in M$ can be represented in the polar coordinates $x=(r, \theta)$ with $r>0$ and $\theta\in \mathbb{S}^{1}$. Equip $M$ with the Riemannian metric $ds^{2}$ given by \begin{equation}\label{metricmodelman}ds^{2}=dr^{2}+\psi^{2}(r)d\theta^{2},\end{equation} where $\psi(r)$ is a smooth positive function on $(0, +\infty)$ and $d\theta^{2}$ is the standard Riemannian metric on $\mathbb{S}^{1}$. In this case, $M$ is called a \textit{two-dimensional Riemannian model with a pole}. \begin{rem}\normalfont A sufficient and necessary condition, for the existence of this manifold is that $\psi$ satisfies the conditions $\psi(0)=0$ and $\psi'(0)=1$. This ensures that the metric $ds^{2}$ can be smoothly extended to the origin $r=0$ (see \cite{greene2006function}). \end{rem} We define the area function $S$ on $(0, +\infty)$ by \begin{equation}\label{defofarea}S(r)=\psi(r).\end{equation} \begin{prop}\label{propforspherintwo} Let $M$ be a two-dimensional Riemannian model with a pole. Suppose that for any $A>1$, there exists a constant $c>0$, so that for all large enough $r$, \begin{equation}\label{langsamevariquo}\sup_{t\in (A^{-1}r, Ar)}\frac{S''_{+}(t)}{S(t)}\leq c\frac{S''_{+}(r)}{S(r)}.\end{equation} Also assume that there exists a constant $N>0$ such that, for every large enough $r$, \begin{equation}\label{spherhartwodimmod}\frac{S(r)}{r}+\sqrt{S''_{+}(r)S(r)}\leq N\log(r).\end{equation} Then the spherical Harnack inequality (\ref{annuliharnack2}) holds in $M$. \end{prop} \begin{proof} Fix some $x_{0}\in M$ and denote $B_{r}=B(x_{0}, r)$. Since any model manifold of dimension $n\geq 2$ satisfies the (RCA) condition, there exists $A_{0}>1$ such that for all $r>A_{0}^{2}$ and any $x_{1}, x_{2}\in \partial B_{r}$, there exists $T>0$ and a continuous path $\gamma:[0, T]\to M$ such that $\gamma(0)=x_{1}$ and $\gamma(T)=x_{2}$, whose image is contained in $B_{A_{0}r}\setminus B_{A_{0}^{-1}r}$. Let us choose $A>A_{0}$ so that there exists a constant $\epsilon>0$, such that $B(x, R)\subset B_{Ar}\setminus \overline{B_{A^{-1}r}}$, for any $x\in \gamma([0, T])$, where $R=\epsilon r$. Let $u$ be a positive harmonic function in $M\setminus\overline{B_{A^{-1}r}}$ and $x_{1},x_{2}\in \partial B_{r}$ such that $\max_{\partial B_{r}}u=u(x_{1})$ and $\min_{\partial B_{r}}u=u(x_{2})$. Thus, we have to show that there are constants $N_{H}>0$ and $C_{H}>0$, so that if $r$ is large enough, then \begin{equation}\label{sphericalharnackmodtwo}u(x_{1})\leq C_{H}r^{N_{H}}u(x_{2}).\end{equation} Let $x\in \gamma([0, T])$. Recall from [\cite{Grigoryan2012}, Exercise 3.31], that the Ricci curvature $Ric$ on $M$ is given by \begin{equation}\label{gaussin2S}Ric=-\frac{S''}{S}.\end{equation} Hence, we obtain from (\ref{gaussin2S}), $$Ric(x)\geq\inf_{t\in (A^{-1}r, Ar)}\left(-\frac{S''(t)}{S(t)}\right)\geq-\sup_{t\in (A^{-1}r, Ar)}\left(\frac{S''_{+}(t)}{S(t)}\right).$$ By (\ref{langsamevariquo}), we get, assuming that $r$ is large enough, \begin{equation}\label{riccigaussgamma}Ric(x)\geq-c\frac{S''_{+}(r)}{S(r)}=:-\kappa(r).\end{equation} Clearly, we can assume that $|\gamma'(t)|=1$. We have $$\int_{0}^{T}{\frac{|\nabla u(\gamma(t))|}{u(\gamma(t))}dt}\leq \sup_{0\leq t\leq T}\frac{|\nabla u(\gamma(t))|}{u(\gamma(t))}\int_{0}^{T}{dt}\leq \sup_{0\leq t\leq T}\frac{|\nabla u(\gamma(t))|}{u(\gamma(t))}d(x_{1}, x_{2}).$$ Again, since $M$ has dimension $n=2$, and as $x_{1},x_{2}\in \partial B_{r}$, we see that $$d(x_{1}, x_{2})\leq S(r),$$ whence $$\int_{0}^{T}{\frac{|\nabla u(\gamma(t))|}{u(\gamma(t))}dt}\leq \sup_{0\leq t\leq T}\frac{|\nabla u(\gamma(t))|}{u(\gamma(t))}S(r).$$ Applying the well-known gradient estimate (cf. \cite{cheng1975differential}) to the harmonic function $u$ in all balls $B(x, R)$, we obtain, $$\sup_{0\leq t\leq T}\frac{|\nabla u(\gamma(t))|}{u(\gamma(t))}\leq C_{n}\left(\frac{1+R\sqrt{\kappa(r)}}{R}\right),$$ where $\kappa(r)$ is given by (\ref{riccigaussgamma}) and $C_{n}>0$ is a constant depending only on $n$. Therefore, we deduce \begin{align*}\log u(x_{1})-\log u(x_{2})=\left|\int_{0}^{T}{\frac{d\log u(\gamma(t))}{dt}} \right|&\leq\int_{0}^{T}{\frac{|du(\gamma(t))|}{u(\gamma(t))}}\\&=\int_{0}^{T}{\frac{|\langle \nabla u, \gamma'(t)\rangle|}{u(\gamma(t))}dt}\\&\leq\int_{0}^{T}{\frac{|\nabla u(\gamma(t))|}{u(\gamma(t))}dt}\\&\leq C_{n}\left(\frac{1}{\epsilon r}+\sqrt{\kappa(r)}\right)S(r),\end{align*} which is equivalent to $$u(x_{1})\leq \exp\left(C_{n}\left(\frac{S(r)}{\epsilon r}+S(r)\sqrt{\kappa(r)}\right)\right)u(x_{2}).$$ Hence, we get by (\ref{riccigaussgamma}), $$u(x_{1})\leq \exp\left(C_{n}\left(\frac{S(r)}{\epsilon r}+\sqrt{cS''_{+}(r)S(r)}\right)\right)u(x_{2}).$$ Finally, by (\ref{spherhartwodimmod}), we deduce for large enough $r$, $$u(x_{1})\leq r^{C_{n}\max\left\{\sqrt{c}, \frac{1}{\epsilon}\right\}N}u(x_{2}),$$ which proves (\ref{sphericalharnackmodtwo}) with $C_{H}=1$ and $N_{H}=C_{n}\max\left\{\sqrt{c}, \frac{1}{\epsilon}\right\}N$ and finishes the proof. \end{proof} \begin{example}\normalfont Let $(M, \mu)$ be a two-dimensional weighted manifold with end $\Omega$ and, following the notation in Theorem \ref{thmlocharsph}, suppose that $M_{0}$ is a Riemannian model with a pole such that $$S_{0}(r)=\left\{ \begin{array}{lc} r\log r,&r\geq 2 \\ r,&r\leq 1.\end{array}\right.$$ Let us show that $M_{0}$ satisfies the hypotheses of Theorem \ref{thmlocharsph} so that for any $x\in M$, there exist $t_{x}>0$, $c_{x}>0$ and $\alpha>0$ such that for all $t\geq t_{x}$, \begin{equation}\label{lowermodelheatfulinex}p_{t}(x, x)\geq \frac{c_{x}}{t^{\alpha}}.\end{equation} Since $S_{0}''(r)=\frac{1}{r}$ for $r\geq 2$, the inequality (\ref{langsamevariquo}) is satisfied and also $$\frac{S_{0}(r)}{r}+\sqrt{(S_{0}'')_{+}(r)S_{0}(r)}=\log r+\sqrt{\log r}\leq 2 \log r,$$ whence (\ref{spherhartwodimmod}) holds and we get that $M_{0}$ satisfies the spherical Harnack inequality (\ref{annuliharnack2}). On the other hand, we have for $r\geq 2$, $-\frac{S_{0}''(r)}{S_{0}(r)}=-\frac{1}{r^{2}\log r}$ so that it follows from (\ref{gaussin2S}) that $M_{0}$ has non-positive bounded below sectional curvature. Hence, $M_{0}$ is a locally Harnack manifold and, as it is simply connected, is a Cartan-Hadamard manifold which yields that the balls in $M_{0}$ of have at least euclidean volume. Therefore, condition (\ref{upperboundVOm}) holds as well and we conclude from Theorem \ref{thmlocharsph} that $(M, \mu)$ admits the estimate (\ref{lowermodelheatfulinex}). \end{example} \section{Isoperimetric inequalities for warped products}\label{Isoperimetric inequalities for warped products} Let $(M, \mu)$ be a weighted manifold. \begin{defin}\normalfont For any Borel set $A\subset M$, define its perimeter $\mu^{+}(A)$ by $$\mu^{+}(A)=\liminf_{r\to 0^{+}}\frac{\mu(A^{r})-\mu(A)}{r},$$ where $A^{r}$ is the $r$-neighborhood of $A$ with respect to the Riemannian metric of $M$. \end{defin} \begin{defin}\normalfont We say that $(M, \mu)$ admits the \textit{lower isoperimetric function} $J$ if, for any precompact open set $U\subset M$ with smooth boundary, \begin{equation}\label{isoperimetricdefJ}\mu^{+}(U)\geq J(\mu(U)).\end{equation} \end{defin} For example, the euclidean space $\mathbb{R}^{n}$ with the Lebesgue measure satisfies the inequality in (\ref{isoperimetricdefJ}) with the function $J(v)=c_{n}v^{\frac{n-1}{n}}$. \subsection{Setting and main theorem}\label{settingandmainiso} Let $(M_{1}, \mu_{1})$ and $(M_{2}, \mu_{2})$ be weighted manifolds and let $M=M_{1}\times M_{2}$ be the direct product of $M_{1}$ and $M_{2}$ as topological spaces. This means that any point $z\in M$ can be written as $z=(x, y)$ with $x\in M_{1}$ and $y\in M_{2}$. Then we define the Riemannian metric $ds^{2}$ on $M$ by \begin{equation}\label{riemmetriconM}ds^{2}=dx^{2}+\psi^{2}(x)dy^{2},\end{equation} where $\psi$ is a smooth positive function on $M_{1}$ and $dx^{2}$ and $dy^{2}$ denote the Riemannian metrics on $M_{1}$ and $M_{2}$, respectively. Let us define the measure $\mu$ on $M$ by \begin{equation}\label{defofmeasonproduct}\mu=\mu_{1}\times \mu_{2}\end{equation} and note that $(M, \mu)$ is then a weighted manifold with respect to the metric in (\ref{riemmetriconM}). Denote by $\nabla$ the gradient on $M$ and with $\nabla_{x}$ and $\nabla_{y}$ the gradients on $M_{1}$ and $M_{2}$, respectively. It follows from (\ref{riemmetriconM}), that we have the identity \begin{equation}\label{identitygrad}|\nabla u|^{2}=|\nabla_{x}u|^{2}+\frac{1}{\psi^{2}(x)}|\nabla_{y}u|^{2},\end{equation} for any smooth function $u$ on $M$. \begin{defin}\normalfont Let $\varphi:(0, +\infty)\to (0, +\infty)$ be a monotone decreasing function. Then we define the generalized inverse function $\phi$ of $\varphi$ on $(0, +\infty)$ by \begin{equation}\label{defgeneinv}\phi(s)=\sup\{t>0:\varphi(t)>s\}.\end{equation} \end{defin} \begin{lemma}[\cite{de2015study}, Proposition 4.2] The generalized inverse $\phi$ of $\varphi$ has the following properties: \begin{enumerate} \item $\phi$ is monotone decreasing, right continuous and $\lim_{s\to \infty}\phi(s)=0$; \item $\varphi$ is right continuous if and only if $\varphi$ itself is the generalized function of $\phi$, that is \begin{equation}\label{itselfgeneralized}\varphi(t)=\sup\{s>0:\phi(s)>t\};\end{equation} \item we have the identity \begin{equation}\label{genehavesameint}\int_{0}^{\infty} {\varphi(t) dt} = \int_{0}^{\infty}{\phi(s)ds}.\end{equation} \end{enumerate} \end{lemma} \begin{lemma}[\cite{federer2014geometric}]\label{approxofboundary} Let $U$ be a precompact open subset of a weighted manifold $(M, \mu)$ with smooth boundary. Then $$\mu^{+}(U)=\inf_{\{u_{n}\}}\limsup_{n\to \infty}\int_{M}{|\nabla u_{n}|d\mu}=\inf_{\{u_{n}\}}\liminf_{n\to \infty}\int_{M}{|\nabla u_{n}|d\mu},$$ where $\{u_{n}\}_{n\in \mathbb{M}}$ is a monotone increasing sequence of smooth non-negative functions with compact support, converging pointwise to the characteristic function of the set $U$. \end{lemma} The proof of the following theorem follows the ideas of Theorem 1 in \cite{grigor1985isoperimetric}, where an isoperimetric inequality is obtained for Riemannian products $M=M_{1}\times M_{2}$ of two Riemannian manifolds $M_{1}$ and $M_{2}$. \begin{thm}\label{thm1iso} Let $(M_{1}, \mu_{1})$ and $(M_{2}, \mu_{2})$ be weighted manifolds and let the weighted manifold $(M, \mu)$ be defined as above, that is, the Riemannian metric on $M$ is defined by (\ref{riemmetriconM}) and measure $\mu$ is defined by (\ref{defofmeasonproduct}). Assume that there exists a constant $C_{0}>0$, such that for all $x\in M_{1}$, \begin{equation}\label{assumponpsi}\psi(x)\leq C_{0}.\end{equation} Suppose that $(M_{1}, \mu_{1})$ and $(M_{2}, \mu_{2})$ have the lower isoperimetric functions $J_{1}$ and $J_{2}$, which are continuous on the intervals $(0, \mu_{1}(M_{1}))$ and $(0, \mu_{2}(M_{2}))$, respectively. Then $(M, \mu)$ admits the lower isoperimetric function $J$, defined by $$J(v)=c\inf_{\varphi, \phi}\left(\int_{0}^{\infty} {J_{1}(\varphi(t))dt}+\int_{0}^{\infty}{J_{2}(\phi(s))ds}.\right),$$ where $c=\frac{1}{2}\min\left\{1,\frac{1}{C_{0}}\right\}$ and $\varphi$ and $\phi$ are generalized mutually inverse functions such that \begin{equation}\label{varphipsileq}\varphi\leq \mu_{1}(M_{1}), \quad\phi\leq \mu_{2}(M_{2}),\end{equation} and \begin{equation}\label{intvarphipsieq}v=\int_{0}^{\infty} {\varphi(t) dt} = \int_{0}^{\infty}{\phi(s)ds}.\end{equation} \end{thm} \begin{proof} Let $U$ be an open precompact set in $M$ with smooth boundary such that $\mu(U)=v$. Let us define the function \begin{equation}\label{defvonisoonM}I(v)=\inf_{\varphi, \phi}\left(\int_{0}^{\infty} {J_{1}(\varphi(t))dt}+\int_{0}^{\infty}{J_{2}(\phi(s))ds}.\right),\end{equation} where $\varphi$ and $\phi$ are generalized mutually inverse functions satisfying (\ref{varphipsileq}) and (\ref{intvarphipsieq}). We need to prove that \begin{equation}\label{isomitJimproof}\mu^{+}(U)\geq cI(v),\end{equation} where $I$ is defined by (\ref{defvonisoonM}) and $c$ is defined as above. Let $\{f_{n}\}_{n\in \mathbb{N}}$ be a monotone increasing sequence of smooth non-negative functions on $M$ with compact support such that $f_{n}\to 1_{U}$ as $n\to \infty$. Note that by Lemma \ref{approxofboundary}, it suffices to show that \begin{equation}\label{limitwithperimeter}\limsup_{n\to \infty}\int_{M}{|\nabla f_{n}|d\mu}\geq cI(v).\end{equation} By the identity (\ref{identitygrad}) and using (\ref{assumponpsi}), we have $$|\nabla f_{n}|^{2}=|\nabla_{x}f_{n}|^{2}+\frac{1}{\psi(x)^{2}}|\nabla_{y}f_{n}|^{2}\geq \frac{1}{2}\min\left\{1,\frac{1}{C_{0}}\right\}^{2}\left(|\nabla_{x}f_{n}|+|\nabla_{y}f_{n}|\right)^{2}.$$ Together with (\ref{limitwithperimeter}), it therefore suffices to prove that \begin{equation}\label{whattoshow}\limsup_{n\to \infty}\int_{M}{|\nabla_{x} f_{n}|d\mu}+\limsup_{n\to \infty}\int_{M}{|\nabla_{y} f_{n}|d\mu}\geq I(v).\end{equation} Let us first estimate the second summand on the left-hand side of (\ref{whattoshow}). For that purpose, consider for every $x\in M_{1}$, the section $$U_{x}=\{y\in M_{2}: (x, y)\in U\}.$$ By Sard's theorem, the set $U_{x}$ has smooth boundary for allmost all $x$. Considering the function $f_{n}(x, y)$ as a function on $M_{2}$ with fixed $x\in M_{1}$, we obtain by Lemma \ref{approxofboundary} for allmost all $x$, \begin{equation}\label{approxwithsecone} \liminf_{n\to \infty}\int_{M}{|\nabla_{y} f_{n}(x, y)|d\mu_{2}(y)}\geq\mu_{2}^{+}(U_{x}).\end{equation} Integrating this over $M_{1}$ and using Fatou's lemma, we deduce \begin{equation}\label{firstsumminproof}\liminf_{n\to \infty}\int_{M}{|\nabla_{y} f_{n}|d\mu}\geq \int_{M_{1}}{\mu_{2}^{+}(U_{x})d\mu_{1}(x)}.\end{equation} The first summand on the left-hand side of (\ref{whattoshow}) could be estimated analogously, but instead, we will estimate it using the assumption that $(M_{1}, \mu_{1})$ and $(M_{2}, \mu_{2})$ admit lower isoperimetric functions $J_{1}$ and $J_{2}$, respectively. First, by Fubini's formula, we have \begin{equation}\label{startsecsuminproof} \int_{M}{|\nabla_{x}f_{n}|d\mu}=\int_{M_{1}}{\int_{M_{2}}{|\nabla_{x}f_{n}|d\mu_{2}}d\mu_{1}}\geq \int_{M_{1}}{\left|\nabla_{x}\int_{M_{2}}{f_{n}(x, y)d\mu_{2}(y)}\right|d\mu_{1}(x)}.\end{equation} Now let us consider on $M_{1}$ the function $$F_{n}(x)=\int_{M_{2}}{f_{n}(x, y)d\mu_{2}(y)}.$$ Note that $F_{n}(x)$ is a monotone increasing sequence of non-negative smooth functions on $M_{1}$, such that \begin{equation}\label{limitofFn}F(x):=\lim_{n\to \infty}F_{n}(x)=\mu_{2}(U_{x}).\end{equation} Since $F_{n}$ is smooth for all $n$, we deduce that the sets $\{F_{n}>t\}$ have smooth boundary, so that we can apply the isoperimetric inequality on $M_{1}$, that is, $$\mu_{1}^{+}\{F_{n}>t\}\geq J_{1}(\mu_{1}\{F_{n}>t\}).$$ Hence, we obtain, using (\ref{startsecsuminproof}) and the co-area formula, \begin{align*}\int_{M}{|\nabla_{x}f_{n}|d\mu}\geq \int_{M_{1}}{|\nabla_{x}F_{n}|d\mu_{1}}&=\int_{0}^{\infty}{\mu_{1}'\{F_{n}=t\}dt}\\&=\int_{0}^{\infty}{\mu_{1}^{+}\{F_{n}>t\}dt}\\&\geq \int_{0}^{\infty}{J_{1}(\mu_{1}\{F_{n}>t\})dt}.\end{align*} Passing to the limit as $n\to \infty$, we get by Fatou's lemma, using the continuiuty of $J_{1}$, \begin{equation}\label{secondsuminproof}\limsup_{n\to \infty}\int_{M}{|\nabla_{x}f_{n}|d\mu}\geq \int_{0}^{\infty}{J_{1}(\mu_{1}\{F>t\})dt}.\end{equation} By the isoperimetric inequality on $M_{2}$ with function $J_{2}$ and by (\ref{limitofFn}), $$\mu_{2}^{+}(U_{x})\geq J_{2}(\mu_{2}(U_{x}))=J_{2}(F(x)),$$ whence combining this with (\ref{firstsumminproof}) and (\ref{secondsuminproof}), we get \begin{equation}\label{combiningbothsum}\limsup_{n\to \infty}\int_{M}{|\nabla_{x} f_{n}|d\mu}+\limsup_{n\to \infty}\int_{M}{|\nabla_{y} f_{n}|d\mu}\geq \int_{M_{1}}{J_{2}(F(x))d\mu_{1}(x)}+\int_{0}^{\infty}{J_{1}(\mu_{1}\{F>t\})dt}.\end{equation} Let us set $$\varphi(t)=\mu_{1}\{F>t\}$$ and note that $\varphi$ is monotone decreasing and right-continuous. Let $\phi$ be the generalized inverse function to $\varphi$ defined by (\ref{defgeneinv}). Then we obtain by (\ref{itselfgeneralized}), \begin{equation}\label{equimeaspsiF}\sup\{s>0:\phi(s)>t\}=\mu_{1}\{F>t\},\end{equation} which means that $\phi$ and $F$ are equimeasurable. Therefore, $$\varphi\leq \mu_{1}(M_{1}),\quad \phi\leq \mu_{2}(M_{2}),$$ and by (\ref{genehavesameint}), the definition of $\varphi$ and Fubini's formula, $$\int_{0}^{\infty} {\phi(t) dt} = \int_{0}^{\infty}{\varphi(t)dt}=\int_{M_{1}}{Fd\mu_{1}}=\mu(U)=v.$$ Hence, the pair $\varphi$, $\phi$ satisfies the condition in (\ref{intvarphipsieq}). Note that by (\ref{equimeaspsiF}), $$\int_{M_{1}}{J_{2}(F(x))d\mu_{1}(x)}=\int_{0}^{\infty}{J_{2}(\phi(t))dt},$$ whence we obtain for the right-hand side of (\ref{combiningbothsum}), $$\int_{M_{1}}{J_{2}(F(x))d\mu_{1}(x)}+\int_{0}^{\infty}{J_{1}(\mu_{1}\{F>t\})dt}=\int_{0}^{\infty}{J_{2}(\phi(t))dt}+\int_{0}^{\infty} {J_{1}(\varphi(t))dt}\geq I(v),$$ which proves (\ref{whattoshow}) and thus, finishes the proof. \end{proof} Let $P>0$. Given two non-negative functions $f$ on $(0, +\infty)$ and $g$ on $(0, P)$ define the function $h$ on $(0, +\infty)$ by \begin{equation}\label{defvonhinineq}h(v)=\inf_{\varphi, \phi}\left(\int_{0}^{\infty} {f(\varphi(t))dt}+\int_{0}^{\infty}{g(\phi(s))ds}.\right),\end{equation} where $\varphi$ and $\phi$ are generalized mutually inverse functions on $(0, +\infty)$ such that \begin{equation}\label{gleichheitderint}\int_{0}^{\infty} {\varphi(t) dt} = \int_{0}^{\infty}{\phi(s)ds}=v.\end{equation} and with the condition that $\phi< P$. \begin{lemma}\label{afunctiineq} Let $f$ and $g$ be continuous functions on the intervals $(0, +\infty)$ and $(0, P)$, respectively and suppose that $g$ is symmetric with respect to $\frac{1}{2}P$. Also, assume that the functions $\frac{f(x)}{x}$ and $\frac{g(y)}{y}$ are monotone decreasing while the functions $f$ and $g$ are monotone increasing on the intervals $(0, +\infty)$ and $\left(0, \frac{P}{2}\right)$, respectively. Then, for any $v>0$, \begin{equation}\label{statementoffuncineq}h(v)\geq \min\left(\frac{1}{6}h_{0}(v),\frac{1}{8} f\left(\frac{v}{P}\right)P \right),\end{equation} where the function $h_{0}$ is defined for all $v>0$, by \begin{equation}\label{defvonhnull}h_{0}(v)=\inf_{\overset{xy=v}{x>0,~0<y\leq\frac{1}{2}P}}(f(x)y+g(y)x).\end{equation} \end{lemma} \begin{rem}\normalfont A similar functional inequality was stated in [\cite{grigor1985isoperimetric}, Theorem 2a] without proof. \end{rem} In the following we denote by $|A|$ the Lebsgue measure of a domain $A\subset \mathbb{R}^{2}$. \begin{proof} By an approximation argument, we can assume that $\varphi$ is strictly monotone decreasing and continuous on $(0, P)$ so that $\phi$, defined as above, is the inverse function of $\varphi$ on $(0, +\infty)$. Note that $\phi$ is then also strictly monotone decreasing and continuous and still satisfies $\phi<P$. For such fixed $\varphi, \phi$, let us denote \begin{equation}\label{defvonI}S=\int_{0}^{\infty} {f(\varphi(t))dt}+\int_{0}^{\infty}{g(\phi(s))ds},\end{equation} so that it suffices to prove that \begin{equation}\label{whattoshowwithI}S\geq \min\left(\frac{1}{6}h_{0}(v), \frac{1}{8}f\left(\frac{v}{P}\right)P \right),\end{equation} where $h_{0}$ is defined by (\ref{defvonhnull}). For any $p\in (0, T)$, consider the domain $$\Phi_{p}=\{(t, s)\in \mathbb{R}^{2}:p\leq t< P, ~0\leq s \leq \varphi(t)\}$$ and for any $q>0$ the domain $$\Psi_{q}=\{(t, s)\in \mathbb{R}^{2}:s\geq q, ~0\leq t \leq \phi(s)\},$$ so that by construction, \begin{equation}\label{partitionofset}v=\int_{0}^{\infty} {\phi(s) ds}=|\Phi_{p}|+|\Psi_{q}|+pq.\end{equation} Since $\phi$ is strictly monotone decreasing and continuous, there exists $q>0$ such that $|\Psi_{q}|= \frac{1}{3}v$. Let us set $p=\phi(q)$. The proof will be split into two main cases. \textbf{Case 1}. Let us assume that $$|\Phi_{p}|\geq \frac{1}{3}v.$$ Then we obtain by (\ref{partitionofset}), that $p\leq \frac{1}{3q}v$. By the monotonicity of $\frac{g(y)}{y}$, we therefore get \begin{align*}\int_{0}^{\infty} {g(\phi(s))ds}\geq \frac{1}{3}xg(y),\end{align*} where $x=3q$ and $y=\frac{1}{3q}v$ and similarly, \begin{align}\nonumber\int_{0}^{\infty}{f(\varphi(t))dt}\geq \frac{1}{3}f(x)y.\end{align} Hence, we obtain that $$S\geq \frac{1}{3}h_{0}(v).$$ \textbf{Case 2}. Let us now assume that $$|\Phi_{p}|< \frac{1}{3}v.$$ Then we can decrease $p$ such that $|\Phi_{p}|=\frac{1}{3}v$. Set $q=\varphi(p)$ and note that this $q$ is larger than the $q$ from Case 1, whence $$|\Psi_{q}|\leq\frac{1}{3}v,$$ so that (\ref{partitionofset}) implies \begin{equation}\label{largepq}\frac{1}{3}\leq pq\leq\frac{2}{3}v.\end{equation} \textbf{Case 2a}. Assume further that $p\geq \frac{1}{4}P.$ It follows that $$\int_{0}^{\infty}{f(\varphi(t))dt}\geq\frac{1}{3}\frac{f(q)}{q}v$$ and since $f$ is monotone increasing, we conclude $$S\geq \frac{P}{8}f\left(\frac{v}{P}\right),$$ which proves (\ref{whattoshowwithI}). \textbf{Case 2b}. Assume now that $p<\frac{1}{4}P$ and set $q_{0}=\varphi\left(\frac{1}{2}P\right)$. \textbf{Case 2b(i)}. Let us first consider the case when $q_{0}\leq \frac{1}{2}q$. Using that $g(y)$ is monotone increasing on $\left(0, \frac{P}{2}\right)$, we obtain, $$\int_{0}^{\infty}{g(\psi(s))ds}\geq \frac{1}{2}g(p)q.$$ Together with $$\int_{0}^{\infty}{f(\varphi(t))dt}\geq f(q)p,$$ we deduce $$S\geq\frac{1}{2}g(p)q+f(q)p,$$ so that setting $x=\frac{v}{p}$ and $y=p$, yields \begin{align*}S\geq\frac{1}{6}\left(f(x)y+g(y)x\right)\geq h_{0}(v).\end{align*} \textbf{Case 2b(ii)}. Finally, let us consider the case when $q_{0}>\frac{1}{2}q$. Note that the condition that $\frac{f(x)}{x}$ is monotone decreasing, implies that for any $\lambda\in (0, 1)$, $$f(\lambda x)\geq \lambda f(x).$$ Together with the monotonicity of $f$, we therefore obtain $$\int_{0}^{P/2}{f(\varphi(t))dt}\geq f(q)\frac{P}{4},$$ which yields $$S\geq f\left(\frac{v}{P}\right)\frac{P}{4},$$ and thus, proves (\ref{whattoshowwithI}) also in this case. \end{proof} \begin{cor}\label{thmforapp} In the situation of Theorem \ref{thm1iso} suppose that $$\mu_{1}(M_{1})=\infty\quad\text{and}\quad\mu_{2}(M_{2})<\infty$$ and assume that $\frac{J_{1}(x)}{x}$ and $\frac{J_{2}(y)}{y}$ are monotone decreasing while the functions $J_{1}$ and $J_{2}$ are monotone increasing on the intervals $(0, +\infty)$ and $\left(0, \frac{1}{2}\mu_{2}(M_{2})\right)$, respectively. Then the manifold $(M, \mu)$ admits the lower isoperimetric function \begin{equation}\label{isoperiineqinthm2a}J(v)= c \min\left(\frac{1}{6}J_{0}(v), \frac{1}{8}J_{1}\left(\frac{v}{\mu_{2}(M_{2})}\right)\mu_{2}(M_{2}) \right),\end{equation} where function $J_{0}$ is defined for all $v>0$, by \begin{equation}\label{defvonJo}J_{0}(v)=\inf_{\overset{xy=v}{x>0,~0<y\leq\frac{1}{2}\mu_{2}(M_{2})}}(J_{1}(x)y+J_{2}(y)x),\end{equation} and the constant $c$ is defined as in Theorem \ref{thm1iso}. \end{cor} \begin{proof} From Theorem \ref{thm1iso}, we know that $(M, \mu)$ has the lower isoperimetric function $cI$, where $I$ is defined by $$I(v)=\inf_{\varphi, \phi}\left(\int_{0}^{\infty} {J_{1}(\varphi(t))dt}+\int_{0}^{\infty}{J_{2}(\phi(s))ds}.\right),$$ where $\varphi$ and $\phi$ are generalized mutually inverse functions satisfying $\phi\leq \mu_{2}(M_{2})$ and the condition in (\ref{gleichheitderint}). Since $\mu_{2}(M_{2})$ is finite, we can assume that the isoperimetric function $J_{2}$ is symmetric with respect to $\frac{1}{2}\mu_{2}(M_{2})$, because the boundaries of an open set and its complement coincide in this case. Applying Lemma \ref{afunctiineq} to $I$ with $f=J_{1}$, $g=J_{2}$ and $P=\mu_{2}(M_{2})$, we obtain $$I(v)\geq \min\left(\frac{1}{6}J_{0}(v),\frac{1}{8} J_{1}\left(\frac{v}{\mu_{2}(M_{2})}\right)\mu_{2}(M_{2}) \right),$$ where function $J_{0}$ is defined by (\ref{defvonJo}), which implies that function $J$ given by (\ref{isoperiineqinthm2a}) is a lower isoperimetric function for $(M, \mu)$. \end{proof} \subsection{Weighted models with boundary}\label{secisobdry} Let us also consider the topological space $M=\mathbb{R}_{+}\times \mathbb{S}^{n-1}$, $n\geq 2$, where $\mathbb{R}_{+}=[0, +\infty)$, so that any point $x\in M$ can be written in the polar form $x=(r, \theta)$ with $r\in\mathbb{R}_{+}$ and $\theta \in \mathbb{S}^{n-1}$. We equip $M$ with the Riemannian metric $ds^{2}$ that is defined in polar coordinates $(r, \theta)$ by $$ds^{2}=dr^{2}+\psi^{2}(r)d\theta^{2}$$ with $\psi(r)$ being a smooth positive function on $\mathbb{R}_{+}$ and $d\theta^{2}$ being the Riemannian metric on $\mathbb{S}^{n-1}$. Note that $M$ with this metric becomes a manifold with boundary $$\delta M=\{(r, \theta)\in M: r=0\}$$ and we call $M$ in this case a \textit{Riemannian model with boundary}. The Riemannian measure $\mu$ on $M$ with respect to this metric is given by \begin{equation}\label{measonmodpsi}d\mu=\psi^{n-1}(r)drd\sigma(\theta),\end{equation} where $dr$ denotes the Lebesue measure on $\mathbb{R}_{+}$ and $d\sigma$ denotes the Riemannian measure on $\mathbb{S}^{1}$. Let us normalize the metric $d\theta^{2}$ on $\mathbb{S}^{n-1}$ so that $\sigma(\mathbb{S}^{n-1})=1$ and define the area function $S$ on $\mathbb{R}_{+}$ by \begin{equation}\label{defofareabd}S(r)=\psi^{n-1}(r).\end{equation} Given a smooth positive function $h$ on $M$, that only depends on the polar radius $r$, and a measure $\widetilde{\mu}$ on $M$ defined by $d\widetilde{\mu}=h^{2}d\mu$, we obtain that the weighted manifold $(M, \widetilde{\mu})$ has the area function \begin{equation}\label{areafuncontild}\widetilde{S}(r)=h^{2}(r)S(r).\end{equation} Then the weighted manifold $(M, \widetilde{\mu})$ is called \textit{weighted model} and we get that \begin{equation}\label{fubinionmode}d\widetilde{\mu}=\widetilde{S}(r)drd\sigma(\theta).\end{equation} \begin{thm}\label{propisoformod} Let $(M_{0}, \mu_{0})$ be a model manifold with boundary. Assume that there exists a constant $C_{0}>0$ such that for all $r\geq 0$, \begin{equation}\label{smallpsiass}\psi_{0}(r)\leq C_{0}.\end{equation} Assume also, that \begin{equation}\label{assonareatilde}\widetilde{S_{0}}(r)\simeq \left\{ \begin{array}{lc} r^{\delta}e^{r^{\alpha}},& r\geq 1, \\1,& r<1,\end{array}\right.\end{equation} where $\delta\in \mathbb{R}$ and $\alpha\in (0, 1]$. Then the weighted model $(M_{0}, \widetilde{\mu_{0}})$ admits the lower isoperimetric function $J$ defined by \begin{equation}\label{lowerisoonM0}J(w)=\widetilde{c} \left\{ \begin{array}{lc} \frac{w}{(\log w)^{\frac{1-\alpha}{\alpha}}},& w\geq 2, \\c'w^{\frac{n-1}{n}},& w<2,\end{array}\right.\end{equation} where $\widetilde{c}$ is a small enough constant and $c'$ is a positive constant chosen such that $J$ is continuous. \end{thm} \begin{proof} Let $\nu$ be the measure on $\mathbb{R}_{+}$ defined by $d\nu(r)=\widetilde{S_{0}}(r)dr$. Then (\ref{fubinionmode}) implies that measure $\widetilde{\mu_{0}}$ has the representation $\widetilde{\mu_{0}}=\nu\times \sigma$, where $\sigma$ is the normalized Riemannian measure on the sphere $\mathbb{S}^{n-1}$. Obviously, we have by (\ref{assonareatilde}), that $$\nu(\mathbb{R}_{+})=\int_{0}^{\infty}{\widetilde{S_{0}}(r)dr}=+\infty.$$ Since $\widetilde{S_{0}}$ is a positive, continuous and non-decreasing function on $\mathbb{R}_{+}$, we obtain from [\cite{brock2012weighted}, Proposition 3.1], that $(\mathbb{R}_{+}, \nu)$ has a lower isoperimetric function $J_{\nu}(v)$ given by $$J_{\nu}(v)=\widetilde{S_{0}}(r),$$ where $v=\nu([0, r))$. Clearly, for small $R$, we have $J_{\nu}(v)\simeq 1$. For large enough $R$, we obtain $$v=\int_{0}^{R}{\widetilde{S_{0}}(r)dr}\simeq R^{\delta+1-\alpha}e^{R^{\alpha}}.$$ This implies that for large $v$, $$\log v\simeq R^{\alpha}+(\delta+1-\alpha)\log R\simeq R^{\alpha},$$ and thus, $$J_{\nu}(v)=\widetilde{S_{0}}(R)\simeq R^{\delta}e^{R^{\alpha}}=R^{\alpha-1}R^{\delta+1-\alpha}e^{R^{\alpha}}\simeq\frac{v}{(\log v)^{\frac{1-\alpha}{\alpha}}},$$ which proves that \begin{equation}\label{isoforweightedhalfex}J_{\nu}(v)=c_{0} \left\{ \begin{array}{lc} \frac{v}{(\log v)^{\frac{1-\alpha}{\alpha}}},& v\geq 2, \\1,& v<2,\end{array}\right.\end{equation} is a lower isoperimetric function of $(\mathbb{R}_{+}, \nu)$ if $c_{0}>0$ is a small enough constant. Note that $J_{\nu}$ is continuous and monotone increasing on $\mathbb{R}_{+}$ and, since $\alpha\in(0, 1]$, the function $\frac{J_{\nu}(v)}{v}$ is monotone decreasing. Let $J_{\sigma}$ be the function defined by \begin{equation}\label{lowerisoforsphere2}J_{\sigma}(v)=c_{n}\left\{ \begin{array}{lcl} v^{\frac{n-2}{n-1}},&\textnormal{if}& 0\leq v\leq \frac{1}{2}, \\(1-v)^{\frac{n-2}{n-1}},&\textnormal{if}& \frac{1}{2}<v\leq 1 ,\end{array}\right.\end{equation} and recall that $J_{\sigma}$ is a lower isoperimetric function for $(\mathbb{S}^{n-1}, \sigma)$ assuming that the constant $c_{n}>0$ is sufficiently small. Since we assume that $\psi_{0}$ satisfies the condition in (\ref{smallpsiass}), we can apply Corollary \ref{thmforapp} and deduce that a lower isoperimetric function $J$ of $(M_{0}, \widetilde{\mu_{0}})$ is given by \begin{equation}\label{isoperimodwithbond}J(w)=c\min\left(\frac{1}{6}J_{0}(w), \frac{1}{8}J_{\nu}\left(w\right)\right),\end{equation} where $J_{0}$ is defined by $$J_{0}(w)=\inf_{\overset{uv=w}{u>0,~0<v\leq\frac{1}{2}}}\left(J_{\nu}(u)v+J_{\sigma}(v)u\right)$$ and the constant $c>0$ is defined as in Theorem \ref{thm1iso}. In order to estimate $J$ in this case, let us consider the function $K$, defined for all $w>0$, by \begin{equation}\label{defvonkinapp}K(w)=\frac{J(w)}{w}=\min\left(\frac{1}{6}K_{0}(w), \frac{1}{8}K_{\nu}\left(w\right)\right),\end{equation} where $K_{0}$ is given by \begin{equation}\label{Knullforsmall}K_{0}(w)=\inf_{\overset{uv=w}{u>0,~0<v\leq\frac{1}{2}}}(K_{1}(u)+K_{\sigma}(v)),\end{equation} where $K_{\nu}(u)=\frac{J_{\nu}(u)}{u}$ and $K_{\sigma}(v)=\frac{J_{\sigma}(v)}{v}$. Observe that, since $K_{\sigma}$ is monotone decreasing, $$K_{0}(w)\geq \inf_{0<v\leq \frac{1}{2}}K_{\sigma}(v)\geq K_{\sigma}\left(\frac{1}{2}\right).$$ Note that if $w\geq 2$ and $v\leq\frac{1}{2}$, then $u=\frac{w}{v}\geq 4$. Hence, we obtain that for $w\geq 2$, $$K_{0}(w)\simeq \text{const}.$$ Substituting this into (\ref{defvonkinapp}), we get, using that $K_{\nu}$ is monotone decreasing, $K(w)\simeq K_{\nu}(w)$ for $w\geq 2$, and whence \begin{equation}\label{isomixedlarge}J(w)\simeq J_{\nu}(w)\simeq \frac{w}{(\log w)^{\frac{1-\alpha}{\alpha}}},\quad w\geq 2.\end{equation} Note that if $w\leq 2$, the infimum is attained when $u\leq 2$ and the summands in (\ref{Knullforsmall}) are comparable. Observe that this holds true when $$v\simeq w^{\frac{1}{2-\frac{n-2}{n-1}}},$$ so that substituting this into (\ref{Knullforsmall}), we deduce for $w\leq 2$, $$K_{0}(w)\simeq w^{-\frac{1}{n}}.$$ Hence, we obtain that for all $w\leq 2$, $$J_{0}(w)\simeq w^{\frac{n-1}{n}},$$ and therefore by (\ref{isoperimodwithbond}), $$J(w)\simeq w^{\frac{n-1}{n}}, \quad w\leq 2.$$ Combining this with (\ref{isomixedlarge}), we conclude that the function $J(w)$ defined by (\ref{lowerisoonM0}) is a lower isoperimetric function for the weighted model $(M_{0}, \widetilde{\mu_{0}})$. \end{proof} \section{On-diagonal heat kernel upper bounds}\label{On-diagonal heat kernel upper bounds} Let $(M, \mu)$ be a weighted manifold. For any open set $\Omega\subset M$, define \begin{equation}\label{selfadjbottspec}\lambda_{1}(\Omega)=\inf_{u}\frac{\int_{\Omega}{|\nabla u|^{2}d\mu}}{\int_{\Omega}{u^{2}d\mu}},\end{equation} where the infimum is taken over all nonzero Lipschitz functions $u$ compactly supported in $\Omega$. \begin{defin}\normalfont We say that $(M, \mu)$ satisfies a \textit{Faber-Krahn inequality} with a function $\Lambda:(0, +\infty)\to (0, +\infty)$ if, for any non-empty precompact open set $\Omega\subset M$, \begin{equation}\label{deffaberkrahn}\lambda_{1}(\Omega)\geq \Lambda(\mu(\Omega)).\end{equation} \end{defin} It is well-known that a Faber-Krahn inequality (\ref{deffaberkrahn}) implies certain heat kernel upper bounds of the heat kernel (see \cite{carron1996inegalites} and \cite{grigor2006heat}). \begin{prop}[\cite{grigor2006heat}, Theorem 5.1]\label{thmfaberheatupper} Suppose that a weighted manifold $(M, \mu)$ satisfies a Faber-Krahn inequality (\ref{deffaberkrahn}) with $\Lambda$ being a continuous and decreasing function such that \begin{equation}\label{condforfaberkr}\int_{0}^{1}{\frac{dv}{v\Lambda(v)}}<\infty.\end{equation} Then for all $t>0$, \begin{equation}\label{upperbdheatfaber}\sup_{x\in M}p_{t}(x, x)\leq \frac{4}{\gamma(t/2)},\end{equation} where the function $\gamma$ is defined by \begin{equation}\label{upperbdheatfabert}t=\int_{0}^{\gamma(t)}{\frac{dv}{v\Lambda(v)}}.\end{equation} \end{prop} \begin{defin}\normalfont Let $\{M_{i}\}_{i=0}^{k}$ be a finite family of non-compact Riemannian manifolds. We say that a manifold $M$ is a \textit{connected sum} of the manifolds $M_{i}$ and write \begin{equation}\label{connectedsumdef}M=\bigsqcup_{i=0}^{k}{M_{i}}\end{equation} if, for some non-empty compact set $K\subset M$ the exterior $M\setminus K$ is a disjoint union of open sets $E_{0}, \ldots, E_{k}$ such that each $E_{i}$ is isometric to $M_{i}\setminus K_{i}$ for some compact set $K_{i}\subset M_{i}$. \end{defin} Conversely, we have the following definition. \begin{defin}\normalfont Let $M$ be a non-compact manifold and $K\subset M$ be a compact set with smooth boundary such that $M\setminus K$ is a disjoint union of finitely many ends $E_{0}, \ldots, E_{k}$. Then $M$ is called a \textit{manifold with ends}. \end{defin} \begin{rem}\normalfont Let $M$ be a manifold with ends $E_{0}, \ldots, E_{k}$. Considering each end $E_{i}$ as an exterior of another manifold $M_{i}$, then $M$ can be written as in (\ref{connectedsumdef}). \end{rem} Let $(M=\bigsqcup_{i=0}^{k}{M_{i}}, \mu)$ be a connected sum of complete non-compact weighted manifolds $(M_{i}, \mu_{i})$ and $h$ be a positive smooth function on $M$. As before, let us consider the weighted manifold $(M, \widetilde{\mu})$, where $\widetilde{\mu}$ is defined by $d\widetilde{\mu}=h^{2}d\mu$. By restricting $h$ to the end $E_{i}=M_{i}\setminus K_{i}$ and then extending this restriction smoothly to a function $h_{i}$ on $M_{i}$, we obtain weighted manifolds $(M_{i}, \widetilde{\mu}_{i})$, where $\widetilde{\mu}_{i}$ is given by $d\widetilde{\mu}_{i}=h_{i}^{2}d\mu$. \begin{thm}\label{heatonsumweightfaber} Let $(M, \widetilde{\mu})=\left(\bigsqcup_{i=0}^{k}{M_{i}}, \widetilde{\mu}\right)$ be a weighted manifold with ends where $M_{0}$ is a model manifold with boundary so that for all $r\geq0$, $$\psi_{0}(r)\leq C_{0}$$ and $$\widetilde{S_{0}}(r)\simeq \left\{ \begin{array}{ll} r^{\delta}e^{r^{\alpha}},& r\geq1, \\1,& r<1,\end{array}\right.$$ where $0<\alpha\leq 1$, $\delta \in \mathbb{R}$ and $\widetilde{S_{0}}$ denotes the area function of a weighted model $(M_{0}, \widetilde{\mu_{0}})$. Assume also that all $(M_{i}, \widetilde{\mu_{i}})$, $i=1, \ldots k$, have Faber-Krahn functions $\widetilde{\Lambda_{i}}$ such that $$\widetilde{\Lambda_{i}}(v)\geq c_{i}\left\{ \begin{array}{ll} \frac{1}{(\log v)^{\frac{2-2\alpha}{\alpha}}},& v\geq2, \\v^{-\frac{2}{n}},& v<2,\end{array}\right.$$ for constants $c_{i}>0$. Then there exist constants $C>0$ and $C_{1}>0$ depending on $\alpha$ and $n$ so that the heat kernel $\widetilde{p_{t}}$ of $(M, \widetilde{\mu})$ satisfies \begin{equation}\label{upperheatweightest}\sup_{x\in M}\widetilde{p}_{t}(x, x)\leq C\left\{ \begin{array}{ll} \exp\left(-C_{1}t^{\frac{\alpha}{2-\alpha}}\right),& t\geq1, \\t^{-\frac{n}{2}},& 0<t<1.\end{array}\right.\end{equation} \end{thm} \begin{proof} It follows from Theorem \ref{propisoformod}, that $(M_{0}, \widetilde{\mu_{0}})$ has the lower isoperimetric function $J$ given by (\ref{lowerisoonM0}), that is $$J(v)=\widetilde{c} \left\{ \begin{array}{lc} \frac{v}{(\log v)^{\frac{1-\alpha}{\alpha}}},& v\geq 2, \\c'v^{\frac{n-1}{n}},& v<2,\end{array}\right.$$ where $\widetilde{c}>0$ is a small enough constant and $c'$ is a positive constant chosen such that $J$ is continuous. Since $J$ is continuous and the function $\frac{J(v)}{v}$ is non-increasing, we obtain from [\cite{Grigoryan1999}, Proposition 7.1], that $(M_{0}, \widetilde{\mu_{0}})$ admits a Faber-Krahn function $\widetilde{\Lambda}_{0}$ given by \begin{equation}\label{faberkrahnfromisoneu}\widetilde{\Lambda}_{0}(v)=\frac{1}{4}\left(\frac{J(v)}{v}\right)^{2}\simeq \left\{ \begin{array}{lc} \frac{1}{(\log v)^{\frac{2-2\alpha}{\alpha}}},& v\geq 2, \\v^{-\frac{2}{n}},& v<2.\end{array}\right.\end{equation} We obtain from [\cite{grigor2016surgery}, Theorem 3.4] that there exist constants $c>0$ and $Q>1$ such that $(M, \widetilde{\mu})$ admits the Faber-Krahn function \begin{equation}\label{Fabrkrahnonsum}\widetilde{\Lambda}(v)=c\min_{0\leq i\leq k}\widetilde{\Lambda}_{i}(Qv).\end{equation} Hence $(M, \widetilde{\mu})$ has a Faber-Krahn function $\widetilde{\Lambda}$, satisfying \begin{equation}\label{faberkrahnfromisonew}\widetilde{\Lambda}(v)\simeq\left\{ \begin{array}{lc} \frac{1}{(\log v)^{\frac{2-2\alpha}{\alpha}}},& v\geq 2, \\v^{-\frac{2}{n}},& v<2.\end{array}\right.\end{equation} Observe that the Faber-Krahn function $\widetilde{\Lambda}$ satisfies condition (\ref{condforfaberkr}). Thus, we can apply Proposition \ref{thmfaberheatupper}, which yields the heat kernel upper bound in (\ref{upperbdheatfaber}). Hence, it remains to estimate the function $\gamma$ from the right hand side of (\ref{upperbdheatfaber}) by using (\ref{upperbdheatfabert}). In the case when $t>0$ is small enough, we get by (\ref{upperbdheatfabert}) and (\ref{faberkrahnfromisonew}), $$t=\int_{0}^{\gamma(t)}{\frac{dv}{v\widetilde{\Lambda}(v)}}=C'\int_{0}^{\gamma(t)}{\frac{dv}{v^{1-\frac{2}{n}}}}=C'\gamma(t)^{\frac{2}{n}} ,$$ which implies for some constant $C''>0$, $$\gamma(t)=C'' t^{\frac{n}{2}}.$$ For large enough $t$ on the other hand, we deduce $$t=\int_{0}^{\gamma(t)}{\frac{dv}{v\widetilde{\Lambda}(v)}}\simeq \int_{2}^{\log(\gamma(t))}{u^{\frac{2-2\alpha}{\alpha}}du}\simeq \log(\gamma(t))^{\frac{2-\alpha}{\alpha}}.$$ Therefore, $$\gamma(t)\simeq \exp\left(\textnormal{const}~t^{\frac{\alpha}{2-\alpha}}\right),$$ where $\textnormal{const}$ is a positive constant depending on $\alpha$ and $n$. Substituting these estimates for $\gamma(t)$ into (\ref{upperbdheatfaber}), we obtain the upper bound (\ref{upperheatweightest}) for the heat kernel $\widetilde{p}_{t}$ of $(M, \widetilde{\mu})$ for small and large values of $t$. For the intermediate values of $t$, we deduce the upper bound (\ref{upperheatweightest}) from the fact that the function $t\mapsto \sup_{x\in M}\widetilde{p}_{t}(x, x)$ is continuous. \end{proof} \begin{example}\normalfont In Theorem \ref{heatonsumweightfaber} one can take $(M_{i}, \widetilde{\mu_{i}})=(\mathbb{H}^{n}, \mu_{i})$, $i=1, \ldots k$, where $\mu_{i}$ is the Riemannian measure on the hyperbolic space $\mathbb{H}^{n}$ since for all $0<\alpha\leq 1$, we have $$\Lambda_{\mathbb{H}^{n}}(v)\simeq\left\{ \begin{array}{lc} 1,& v\geq 2, \\v^{-\frac{2}{n}},& v<2\end{array}\right.\geq c\left\{ \begin{array}{ll} \frac{1}{(\log v)^{\frac{2-2\alpha}{\alpha}}},& v\geq2, \\v^{-\frac{2}{n}},& v<2.\end{array}\right.$$ \end{example} \begin{rem}\normalfont Let $(M, \widetilde{\mu})$ be the weighted manifold with ends, defined as in Theorem \ref{heatonsumweightfaber}, so that $\widetilde{S_{0}}(r)\simeq e^{r^{\alpha}}r^{\delta}$ for $r>1$ and hence, for $R>1$, $$\widetilde{V_{0}}(R)=\int_{0}^{R}{\widetilde{S_{0}}(r)dr}\simeq\int_{0}^{R}{e^{r^{\alpha}}r^{\delta}dr}\simeq e^{R^{\alpha}}R^{\delta+1-\alpha}.$$ Then, we obtain from [\cite{Coulhon1997}, Proposition 3.4] for large enough $R$, $$\widetilde{\lambda_{1}}(\Omega_{R})\leq 4\left(\frac{\widetilde{S_{0}}(R)}{\widetilde{V_{0}}(R)}\right)^{2}\leq\frac{C}{R^{2-2\alpha}},$$ where $\Omega_{R}=\{(r, \theta)\in M_{0}: 0<r<R\}$. Hence, setting $R(t)=t^{\frac{1}{2-\alpha}}$, [\cite{Coulhon1997}, Proposition 2.3] yields the following lower bound for the heat kernel $\widetilde{p_{t}}$ in $(M, \widetilde{\mu})$ for large enough $t$: \begin{align*}\sup_{x}\widetilde{p_{t}}(x, x)\geq\frac{1}{\widetilde{\mu}(\Omega_{R})}\exp\left(-\widetilde{\lambda_{1}}(\Omega_{R})t\right)\geq\frac{C_{1}}{e^{R^{\alpha}(t)}R^{1-\alpha}(t)}\exp\left(-\frac{Ct}{R^{2-2\alpha}(t)}\right)\geq \frac{C_{1}}{e^{C_{2}t^{\frac{\alpha}{2-\alpha}}}},\end{align*} which shows that the exponential decay in the upper bound given in (\ref{upperheatweightest}) is sharp. \end{rem} \subsection{Weighted models with two ends} Let $M$ be the topological space $M=\mathbb{R}\times \mathbb{S}^{n-1}$, $n\geq 2$, that is, any point $x\in M$ can be written in the polar form $x=(r, \theta)$ with $r\in\mathbb{R}$ and $\theta \in \mathbb{S}^{n-1}$. For a fixed smooth positive function $\psi$ on $\mathbb{R}$ consider on $M$ the Riemannian metric $ds^{2}$ given by $$ds^{2}=dr^{2}+\psi^{2}(r)d\theta^{2},$$ where $d\theta^{2}$ is the standard Riemannian metric on $\mathbb{S}^{n-1}$. The Riemannian measure $\mu$ on $M$ with respect to this metric is given by \begin{equation}\label{muontwoendwithS}d\mu=\psi^{n-1}(r)drd\sigma(\theta),\end{equation} where $dr$ denotes the Lebesgue measure on $\mathbb{R}$ and $d\sigma$ the Riemannian measure on $\mathbb{S}^{n-1}$. As before, we normalize the metric $d\theta^{2}$ on $\mathbb{S}^{n-1}$ so that $\sigma(\mathbb{S}^{n-1})=1$. Then we define the area function $S$ on $\mathbb{R}$ by \begin{equation}\label{defSonttwo}S(r)=\psi^{n-1}(r).\end{equation} Given a smooth positive function $h$ on $M$, that only depends on the polar radius $r\in \mathbb{R}$, and considering the measure $\widetilde{\mu}$ on $M$ defined by $d\widetilde{\mu}=h^{2}d\mu$, we get that the weighted model $(M, \widetilde{\mu})$, has the area function \begin{equation}\label{areaoftwoweight}\widetilde{S}(r)=h^{2}(r)S(r).\end{equation} The Laplace-Beltrami operator $\Delta_{\mu}$ on $M$ can be represented in the polar coordinates $(r, \theta)$ as follows: \begin{equation}\label{laplaceonmodel}\Delta_{\mu}=\frac{\partial^{2}}{\partial r^{2}}+\frac{S'(r)}{S(r)}\frac{\partial}{\partial r}+\frac{1}{\psi^{2}(r)}\Delta_{\theta},\end{equation} where $\Delta_{\theta}$ is the Laplace-Beltrami operator on $\mathbb{S}^{n-1}$. If we assume that $u$ is a radial function, that is, $u$ depends only on the polar radius $r$, we obtain from (\ref{laplaceonmodel}), that $u$ is harmonic in $M$ if and only if \begin{equation}\label{harmonicontwoendsu}u(r)=c_{1}+c_{2}\int_{r_{1}}^{r}{\frac{dt}{S(t)}},\end{equation}where $r_{1}\in [-\infty, +\infty]$ so that the integral converges and $c_{1}, c_{2}$ are arbitrary reals. \begin{thm}\label{heatkernelforsmallendviah} Let $(M, \mu)=(M_{0}\sqcup M_{1}, \mu)$ be a Riemannian model with two ends, where $M_{0}=\{(r, \theta)\in M: r\geq 0\}$ is a model manifold with boundary such that for all $r\geq0$, $$\psi_{0}(r)=e^{-\frac{1}{n-1}r^{\alpha}}.$$ Also assume that $(M_{1}, \mu_{1})$ is a Riemannian model with \begin{equation}\label{assumptiononM1on2end}\int_{1}^{\infty}{\frac{dt}{S_{1}(t)}}<\infty,\end{equation} and Faber-Krahn function $\Lambda_{1}$, so that \begin{equation}\label{faverkrahnohnoneside}\Lambda_{1}(v)\geq c_{1}\left\{ \begin{array}{ll} \frac{1}{(\log v)^{\frac{2-2\alpha}{\alpha}}},& v\geq2, \\v^{-\frac{2}{n}},& v<2,\end{array}\right.\end{equation} for some constant $c_{1}>0$. Then there exist positive constants $C_{x}=C_{x}(x, \alpha, n)$ and $C_{1}=C_{1}(\alpha, n)$ such that the heat kernel of $(M, \mu)$ satisfies, for all $x\in M$, the inequality \begin{equation}\label{heatkerneluppernew}p_{t}(x, x)\leq C_{x}\left\{ \begin{array}{ll} \exp\left(-C_{1}t^{\frac{\alpha}{2-\alpha}}\right),& t\geq1, \\t^{-\frac{n}{2}},& 0<t<1.\end{array}\right. \end{equation} \end{thm} \begin{proof} Observe that the assumption (\ref{assumptiononM1on2end}) yields that we can choose positive constants $\kappa_{1}$ and $\kappa_{2}$ so that the smooth function $h$ on $M$ defined by $$h(r)=\kappa_{1}+\kappa_{2}\int_{1}^{r}{\frac{dt}{S(t)}},$$ is positive in $M$ and satisfies $h\simeq 1$ in $\{r\leq 0\}$. Consider the weighted model with two ends $(M, \widetilde{\mu})$, where $\widetilde{\mu}$ is defined by $d\widetilde{\mu}=h^{2}d\mu$. It follows from (\ref{faverkrahnohnoneside}) that the weighted model $(M_{1}, \widetilde{\mu_{1}})$ has the Faber-Krahn function $\widetilde{\Lambda}_{1}$ satisfying $$\widetilde{\Lambda}_{1}(v)\geq \widetilde{c}_{1}\left\{ \begin{array}{ll} \frac{1}{(\log v)^{\frac{2-2\alpha}{\alpha}}},& v\geq2, \\v^{-\frac{2}{n}},& v<2,\end{array}\right.$$ for some constant $\widetilde{c}_{1}>0$. Further, note that $$h|_{M_{0}}(r)\simeq \left\{ \begin{array}{ll} r^{1-\alpha}e^{r^{\alpha}},& r\geq 1, \\1,& 0\leq r<1,\end{array}\right.$$ whence the area function $\widetilde{S_{0}}$ of the weighted model with boundary $(M_{0}, \widetilde{\mu_{0}})$ admits the estimate \begin{equation}\label{estimatestilononeend}\widetilde{S_{0}}(r)\simeq \left\{ \begin{array}{ll} r^{2-2\alpha}e^{r^{\alpha}},& r\geq 1, \\1,& 0\leq r<1.\end{array}\right.\end{equation} Since also $\psi_{0}\leq 1$, we can apply Theorem \ref{heatonsumweightfaber} and obtain that there exist constants $C>0$ and $C_{1}>0$ depending on $\alpha$ and $n$ so that the heat kernel $\widetilde{p_{t}}$ of $(M, \widetilde{\mu})$ satisfies \begin{equation}\label{largeendheatinproof}\sup_{x\in M}\widetilde{p}_{t}(x, x)\leq C\left\{ \begin{array}{ll} \exp\left(-C_{1}t^{\frac{\alpha}{2-\alpha}}\right),& t\geq1, \\t^{-\frac{n}{2}},& 0<t<1.\end{array}\right.\end{equation} Using that $h$ is harmonic in $M$, we have by (\ref{relationofheatkernelsends}), for all $t>0$ and $x\in M$, the identity $$\widetilde{p}_{t}(x, x)=\frac{p_{t}(x, x)}{h^{2}(x)},$$ which together with (\ref{largeendheatinproof}) implies the upper bound (\ref{heatkerneluppernew}) and thus, finishes the proof. \end{proof} \begin{rem}\normalfont Consider the end $\Omega:=\{r>0\}$ of the Riemannian model $(M, \mu)$ from Theorem \ref{heatkernelforsmallendviah} and note that $\left(\overline{\Omega}=\{r\geq0\}, \mu|_{\{r\geq0\}}\right)$ is parabolic by [\cite{Grigorextquotesingleyan1999}, Proposition 3.1], whence the estimate (\ref{heatkerneluppernew}) implies that we cannot get a polynomial decay of the heat kernel in $M$ as it follows from (\ref{nonparaendpoldecglob}) in Theorem \ref{thmlowerptbd}, just by assuming the polynmial volume growth condition (\ref{PolygrowthOM}). \end{rem} \begin{rem}\normalfont Consider again the end $\Omega:=\{r>0\}$ of the Riemannian model $(M, \mu)$ from Theorem \ref{heatkernelforsmallendviah} and assume for simplicity that $n=2$. Let $M_{0}$ be defined as in Theorem \ref{thmlocharsph}, that is, there exists a compact set $K_{0}\subset M_{0}$ that is the closure of a non-empty open set, such that $\Omega$ is isometric to $M_{0}\setminus K_{0}$. Let us check which conditions from Theorem \ref{thmlocharsph} are not satisfied in $M_{0}$. A simple computation shows that the area function $S_{0}$ of the manifold $M_{0}$ satisfies $S_{0}''(r)\sim\alpha^{2}e^{-r^{\alpha}}r^{2\alpha-2}~\text{as}~r\to +\infty$, so that $-\frac{S_{0}''(r)}{S(r)}\to 0~\text{as}~r\to +\infty$. Together with the fact that on a compact set, the curvature is non-negative, it then follows from (\ref{gaussin2S}) that the curvature on $M_{0}$ is bounded below, which implies that $M_{0}$ is a locally Harnack manifold. Obviously, $S_{0}$ also satisfies the conditions (\ref{langsamevariquo}) and (\ref{spherhartwodimmod}) from Proposition \ref{propforspherintwo}, whence we obtain that on $M_{0}$ the spherical Harnack inequality (\ref{annuliharnack2}) holds. On the other hand, condition (\ref{upperboundVOm}) in $M_{0}$ fails, since for fixed $\rho>0$, the volume $V(x, \rho)$ can be arbitrarily small when $r\to +\infty$ where $x=(r, \theta)\in \Omega$. Hence, we have that in general, we can not drop the condition (\ref{upperboundVOm}) in Theorem \ref{thmlocharsph} to get the polynomial decay (\ref{lowerbndendparamitharnaanuohneomloc}) of the heat kernel in $M$. \end{rem} \bibliographystyle{abbrv}
{ "redpajama_set_name": "RedPajamaArXiv" }
8,260
Q: import module from s3 in sagemaker I have a .py file in an s3 bucket which I am trying to load in as a python module within Sagemaker I've tried adding the file path to the sys path with: sys.path.append('foo') but get an error with : import bar.py I can read the py file with: pd.read_csv('foo/bar.py') but get an error with: open('foo/bar.py) Please can you guide me in how to import this .py file as a module A: You can first download the from S3: import os os.system("aws s3 cp s3://<S3location> .") Then you can import the file.
{ "redpajama_set_name": "RedPajamaStackExchange" }
6,213
Q: SELECT clause for 3 data fields and display as 1 field Table name : LOCATION //LOCATION CITY ROAD# STREET# ------------------------------- ANSON 41 16 Following query: SELECT (CITY,ROAD#,STREET#) AS "Location" FROM LOCATION; hope to get following output like: Location ---------------- ANSON,41,16 isn't possible to get something like this? A: SELECT CITY || ',' || ROAD# || ',' || STREET# AS "Location" FROM LOCATION;
{ "redpajama_set_name": "RedPajamaStackExchange" }
3,059
[sparkfizzle](http://github.com/tfe/sparkfizzle/) ======== A quick and dirty script for piping tweets into Campfire. Follow individual Twitter accounts and monitor the tweet stream for certain terms. The name is a play on [Sparkflare](https://sparkflare.com/), which stopped being free a couple months ago. So I made my own with a fraction of the features. Usage ----- The script has dependencies on the broach and tweetstream gems. If you try to run it without them, it'll let you know. Configuration is done by providing the path to a YAML config file as the last argument when calling this script. An example YAML config is provided. Just fill in your own Twitter and Campfire credentials, the Campfire room you'd like tweets to be posted to, and the Twitter IDs and track terms you'd like to follow. Then just: ruby sparkfizzle.rb /path/to/my_sweet_config.yml Logs will be output to `sparkfizzle_my_sweet_config.log` in the directory you run it from. Todo ---- * I'm not sure what happens if you only want to track some terms OR follow some IDs, not do both at the same time. Handle that case. * Allow usage of SSL for Campfire accounts that support it (broach's `use_ssl` config setting). Contact ------- Problems, comments, and pull requests all welcome. [Find me on GitHub.](http://github.com/tfe/) Copyright ------- Copyright © 2010 [Todd Eichel](http://toddeichel.com/) for [Fooala, Inc.](http://opensource.fooala.com/).
{ "redpajama_set_name": "RedPajamaGithub" }
1,123
Липня — деревня в Дедовичском районе Псковской области России. Входит в состав Шелонской волости. Расположена между реками Липня и Крутец (приток Липни) на северо-востоке района в 8 км к северо-востоку от районного центра, посёлка Дедовичи, и в 3 км к западу от деревни Крутец. Население Численность населения деревни по оценке на начало 2001 года составляла 148 человек. История До июня 2010 года деревня входила в состав Крутецкой волости Дедовичского района, упразднённой в пользу Дубишенской волости. С 2010 до 2015 гг. деревня была в составе Дубишенской волости, упразднённой в пользу Шелонской волости. Примечания Ссылки Сайт Дедовичского района. Дубишенская волость Населённые пункты Дедовичского района
{ "redpajama_set_name": "RedPajamaWikipedia" }
4,459
export PATH=$PATH:$HOME/lib::$GIT_STUFF_HOME source csc-git-lib.sh ################################################################################################### ## ## Setze $GIT_STUFF_HOME auf das Verzeichnis der Git-Stuff-Libraries wie csc-git-lib.sh ## z.B. export GIT_STUFF_HOME=/c/Users/te_zf4iks2/git/misc-examples/git-stuff ## export PATH=$GIT_STUFF_HOME:$PATH ## ################################################################################################### if [ "$1" == "--help" ]; then echo $(basename $0) "branch_name" ; echo " branch_name - Branch, der ausgecheckt werden soll, default: aktueller Branch" echo " Bsp: $(basename $0) FB_K2019043" exit; fi; for directory in ./*; do if [[ -d $directory ]]; then echo " " echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"; echo $directory cd $directory directory_failed="0"; if [[ "$(is_git_repository)" -eq 0 ]]; then echo Directory ist kein GIT-Repository und wird uebersprungen else start_branch=$(parse_git_branch) if [ -z $1 ]; then echo "Branch ist nicht gesetzt; es wird der aktuelle Branch $(parse_git_branch) genutzt" else echo "$1 ist als Branch gesetzt; dieser Branch wird ausgecheckt und dagegen gearbeitet (am Ende wird wieder auf den aktuellen Branch zurueckgesetzt)" checkout $1; if test $? -gt 0; then echo "checkout $1 failed";directory_failed="1";fi; fi if [ $directory_failed -eq "0" ]; then current_branch=$(parse_git_branch) pull $current_branch; if test $? -gt 0; then echo "pull " $? " failed";fi; fi; ## Zuruecksetzen auf Start-Branch if [ ! -z $1 ]; then checkout $start_branch; if test $? -gt 0; then echo "checkout $start_branch failed";fi; fi; fi; cd .. fi; done
{ "redpajama_set_name": "RedPajamaGithub" }
921
package torrent import ( "time" // "log" ) // An accumulator that keeps track of the rate of increase. type Accumulator struct { maxRatePeriod time.Duration rateSince time.Time last time.Time rate float64 total int64 } func NewAccumulator(now time.Time, maxRatePeriod time.Duration) ( acc *Accumulator) { acc = &Accumulator{} acc.maxRatePeriod = maxRatePeriod acc.rateSince = now.Add(time.Second * -1) acc.last = acc.rateSince acc.rate = 0.0 acc.total = 0 return acc } func (a *Accumulator) Add(now time.Time, amount int64) { // log.Printf("Add %v %v \n", now, amount) a.total += amount a.rate = (a.rate*float64(a.last.Sub(a.rateSince)) + float64(amount)) / float64(now.Sub(a.rateSince)) a.last = now newRateSince := now.Add(-a.maxRatePeriod) if a.rateSince.Before(newRateSince) { a.rateSince = newRateSince } } func (a *Accumulator) GetRate(now time.Time) float64 { // log.Printf("GetRate %v \n", now) a.Add(now, 0) return a.GetRateNoUpdate() } func (a *Accumulator) GetRateNoUpdate() float64 { // log.Println("GetRateNoUpdate ", a) return a.rate * float64(time.Second) } func (a *Accumulator) DurationUntilRate(now time.Time, newRate float64) time.Duration { // log.Printf("Add %v %v \n", now, newRate) rate := a.rate if rate <= newRate { return time.Duration(0) } dt := float64(now.Sub(a.rateSince)) return time.Duration(((rate * dt) / newRate) - dt) } func (a *Accumulator) getTotal() int64 { // log.Println("getTotal ", a) return a.total }
{ "redpajama_set_name": "RedPajamaGithub" }
7,535
OWN THE WATERFRONT Turkey 2020 | SHIPBUILDING | INTERVIEW Orhan Gülcek is the chairman of Cemre Shipyard. He founded Cemre Engineering in 1996, a company that initially specialized in painting ships. Since 2005, Cemre has been building ships, and has since become best known for its shipbuilding capabilities. Gülcek is a member of the Turkish Chamber of Shipping and the Istanbul Chamber of Commerce, serving as a board member for the Turkish Shipbuilders' Association (GISBIR) and the Yalova-Altınova Shipyard Entrepreneurs Industry and Trade Inc. Co. He is the vice chairman of Yalova Pilotage Inc. Co. He has an electrical engineering degree from Yıldız Technical University in Istanbul. Cemre recently delivered its 50th vessel to Havyard Group, a Norwegian shipping company. Why did Cemre begin working with northern European companies, and what future plans do you have with them? We started to work with Havyard in 2005 when northern European countries showed their interest in Turkey's shipbuilding sector. We rented a place at Istanbul Shipyard from the Ministry of Defense and started looking for clients. When we came into contact with Havyard, all the Turkish shipyards were working at full capacity. We won our initial contract with Havyard because we had capacity, but we won follow-up work because of our performance. Later, we continued to work with it because we provided high-quality ships and delivered on time due to our entire team's hard work. We have delivered Havyard 50 ships so far and are working on 10 more ships, hopefully with more to come. This partnership has a win-win model and is based on mutual trust. We think it will strengthen and grow in the future. Other than Norway, which European countries are you most interested in? We consider Europe as one bloc, so it is not wise to look at a single country from a business development standpoint. We prefer to construct project or niche ships, such as the ships we construct for the energy sector that service offshore windmills. These ships are often used by more than one country and are extremely versatile. We have constructed ships for Norway, Denmark, Iceland, the UK, and France. The most popular ships of late are fishing vessels, ferries, and energy-support ships. Why did the company choose to focus on niche sectors of the shipbuilding industry? Turkey's construction of classical ships has decreased substantially because of competition from East Asia. We have focused on project ships based on our client's needs, or on a tailor-made basis. One of our goals is to construct cruise ships and to grow our presence in that area. We see great growth potential for ships that use advanced technologies, such as hybrid power. There is growing demand for hybrid and battery-powered ships because the environmental factor is gaining prominence, especially in northern European countries. Hydrogen-powered ships are also being designed and people are beginning to realize the future of autonomous ships. We have to follow the technology. We have to target the ships that involve technology and demand more engineering contribution. What benefits do hybrid vessels offer, and do those benefits justify the higher cost? We have different technologies for our hybrid solutions. The key benefits are fuel efficiency, silent working ability, and environmental impact. So far, we have built hybrid ships primarily for ferry companies. Eventually, I believe all ferries will use electrical hybrid solutions, especially in Norway. Regarding cost, hybrid technologies will become cheaper in the future. For example, the batteries were expensive last year but now their prices are dropping. With time, all of the associated costs will be more balanced. When it comes to the market for the first time, the prices are higher because of the R&D costs. How has the lira's depreciation impacted your input costs and export demand? For us, the lira's sudden depreciation or appreciation is a disadvantage. Although we export and an increase in the value of foreign currency may seem to benefit us, that is not the case. Around 60-70% of our costs depends on foreign currency. Once our suppliers accept the foreign currency exchange rate and calculate costs accordingly, pulling the prices back is extremely hard. On the contrary, stable and small increases are better for us as that helps us plan ahead. With Cemre's pipeline full for the next two years, what investments are you planning to expand your capacity? We have always invested in expanding our shipyard capacity; each year we grow by 10,000sqm. We started ship construction in 2005 with a really small area and in 2007 we had our own 20,000-sqm shipyard. The first shipyard we had has grown to 70,000sqm. After expanding it yearly for a few years, we decided to purchase a larger plot of land. At present, we have two 70,000-sqm shipyards and a 10,000-sqm piece of land; however, according to our production plans, we need more space. There are 37 other shipyards in Yalova, and we plan to outsource some of our jobs to them. We want to invest more in technology, because the ships we construct are special ships and we want to construct these ships as quickly as possible with the best technology available. In the future, how do you envision Cemre further diversifying its shipbuilding capabilities? We recently acquired a facility security document, so we plan to work on military projects for foreign countries. Another alternative is that we want to construct small-sized passenger ships, especially because passenger ships are also sensitive to the environment and use hybrid technologies. With the advancement of technology, new routes are formed, which boosts demand for new ships. Setting a new course "ARES Shipyard invests most of its revenues into new facilities, investments, and infrastructure."​
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
5,640
Stalachtis trangeri är en fjärilsart som beskrevs av William Schaus 1928. Stalachtis trangeri ingår i släktet Stalachtis och familjen juvelvingar. Inga underarter finns listade i Catalogue of Life. Källor Juvelvingar trangeri
{ "redpajama_set_name": "RedPajamaWikipedia" }
1,488
The Hilton & Hyland Story Jeff Hyland Rick Hilton Secured Escrow Hilton & Hyland 257 North Cañon Drive What are you interested in?SellingBuyingMediaOther Sign up for Hilton & Hyland account. Have a account ? Log in Jack Friedkin Website: http://www.JackFriedkin.com Growing up in Beverly Hills and Malibu, Jack Friedkin developed an appreciation for distinctive residential properties at an early age. Already a top agent in his third year in the business, Jack has sold over $70 million in luxury real estate, earning a reputation for honesty, integrity, and unwavering commitment to the success of his clients. An alumnus of the New York Film Academy's School of Cinematography, Jack holds a Bachelor of Arts in Mass Communications. Upon graduating, he achieved widespread acclaim as Director of Photography for a top reality television production company, shooting back-to-back series throughout the country. Coming full circle, his show, Amazing America, highlighted unusual real estate across the nation. Inspired to make an industry leap, Jack returned to his hometown in 2014 to pursue his passion for residential architecture in the incomparable Southern California market. He immediately achieved success at a high level, selling his first multi-million dollar listing in Beverly Hills within days of obtaining his license. Employing his knowledge and well-honed creative instincts, Jack prides himself on helping clients find homes that match their specific needs and visions. In his free time, Jack enjoys furthering his real estate education by immersing himself in the social and historical impact of local architects. With a lifelong passion for giving back to the community, he devotes himself to numerous philanthropic endeavors, lending considerable time and resources to the Los Angeles Boys and Girls Club, the Malibu Public Library, and The Sherry Lansing Foundation. Together with his real estate partner, Leonard Rabinowitz, the duo has managed to raise over $48 million for charity Jack Friedkin's Properties 30966 Broad Beach Road 30966 Broad Beach Rd, Malibu, CA 90265 BD 7 / BA 9 / SQ 5,824 Bloom Ranch 11882 STATE HIGHWAY 75, HAILEY, ID 83333 BD 9 / BA 11 / SQ 17,000 10560 Wilshire Blvd Unit #PenthouseA 10560 WILSHIRE BLVD #PenthouseA, Los Angeles, CA 90024 16 Galleon St 16 GALLEON ST, Marina Del Rey, CA 90292 23438 Malibu Colony Rd 23438 Malibu Colony Rd, Malibu, CA 90265 BD 5 / BA 6 565 Perugia 565 Perugia, Los Angeles , CA 90077 3300 Palos Verdes Drive West 3300 Palos Verdes Drive West, Rancho Palos Verdes, CA 90275 BD 5 / BA 10 31048 Broad Beach Rd 10416 W Sunset Blvd 10416 W Sunset Blvd, Los Angeles, CA 90077 SOLD | 1139 Maybrook Dr 1139 Maybrook Dr, Beverly Hills, CA 90210 Copyright © 2020 Hilton & Hyland. All Rights Reserved Website developed by Luxury Presence.
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
2,470
\section{Introduction} The Gini index (Gini score, accuracy ratio) is a popular tool for model selection in machine learning, and there are versions of the Gini index that are used to evaluate actuarial pricing models and financial credit risk models; see Frees et al.~\cite{Frees1, Frees2}, Denuit et al.~\cite{DenuitSznajderTrufin}, Engelmann et al.~\cite{Engelmann} and Tasche \cite{Tasche}. However, in general, the Gini index does not give a (strictly) consistent scoring rule; Example 3 of Byrne \cite{Byrne} gives a counterexample. (Strict) consistency is an important property in model selection because it ensures that maximizing the Gini index does not lead to a wrong model choice; see Gneiting \cite{Gneiting} and Gneiting--Raftery \cite{GneitingRaftery}. The Gini index can be obtained from Somers' $D$ \cite{Somers}, which essentially considers Kendall's $\tau$; see Newson \cite{Newson}. Intuitively, this tells us that the Gini index is a rank-based score that is not calibration-sensitive. The missing piece to make the Gini index a strictly consistent scoring rule is to restrict it to the class of auto-calibrated regression models, this is proved in Theorem \ref{proposition Gini}, below; for auto-calibration we refer to Kr\"uger--Ziegel \cite{Ziegel}, Denuit et al.~\cite{DenuitCharpentierTrufin} and Section 7.4.2 of W\"uthrich--Merz \cite{WM2022}. \medskip {\bf Organization.} In the next section, we introduce the notion of strictly consistent scoring rules. In Section \ref{The Gini index in machine learning}, we discuss the Gini index as it is usually used in the machine learning community. In Section \ref{Auto-calibration and consistency of the Gini index}, we introduce and discuss the property of having an auto-calibrated regression model (forecasts), and we prove that the Gini index gives a strictly consistent scoring rule if we restrict to the class of auto-calibrated regression models. This makes the maximization of the Gini index a sensible model selection tool on the class of auto-calibrated regression models. Finally, in Section \ref{Conclusions} we conclude. \section{Consistent scoring rules} Let $(Y,\boldsymbol{X})$ be a random tuple on a sufficiently rich probability space $(\Omega, {\cal A}, \p)$ with real-valued non-negative response $Y$ having finite mean and with covariates $\boldsymbol{X}$. Denote by ${\cal F}$ the family of potential distributions of $(Y,\boldsymbol{X})$ being supported on ${\cal Y} \times {\cal X}$. Let $F_{Y| \boldsymbol{X}}$ be the conditional distribution of $Y$, given $\boldsymbol{X}$. For any model $(Y,\boldsymbol{X})\sim F \in {\cal F}$, we consider the conditional mean functional $T$ \begin{equation*} F_{Y|\boldsymbol{X}} ~\mapsto~ T(F_{Y|\boldsymbol{X}}) = \mu^\dagger(\boldsymbol{X})= \E \left[\left.Y\right|\boldsymbol{X} \right], \end{equation*} where $\boldsymbol{X} \mapsto \mu^\dagger(\boldsymbol{X})=\E \left[\left.Y\right|\boldsymbol{X} \right]$ denotes the true regression function of the chosen model. The main task in regression modeling is to find this unknown true regression function $\mu^\dagger(\cdot)$ from i.i.d.~data $(Y_i,\boldsymbol{X}_i)$, $1\le i\le n$, having the same distribution as $(Y,\boldsymbol{X})$. \medskip Choose a scoring function $S:{\cal Y}\times \R \to \R$ giving us the score $\E\left[S\left(Y,\widehat{\mu}(\boldsymbol{X})\right)\right]$ for regression function $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$ and $(Y,\boldsymbol{X}) \sim F \in {\cal F}$. A {\it scoring rule} is obtained by selecting the argument(s) $\widehat{\mu}^\star(\cdot)$ that maximize the score over the regression functions $\widehat{\mu}(\cdot)$, subject to existence, \begin{equation}\label{scoring rule definition} \widehat{\mu}^\star(\cdot) ~\in~ \underset{\widehat{\mu}(\cdot)}{\arg\max}~ \E\left[S\left(Y,\widehat{\mu}(\boldsymbol{X})\right)\right], \end{equation} under the given model choice $(Y,\boldsymbol{X}) \sim F \in {\cal F}$. \medskip A scoring rule is called {\it consistent} on ${\cal F}$ for the conditional mean functional $T$, if for any model $(Y,\boldsymbol{X})\sim F \in {\cal F}$ with conditional distributions $F_{Y|\boldsymbol{X}}$ of $Y$, given $\boldsymbol{X}$, we have $S(Y,T(F_{Y|{\boldsymbol{X}}})) \in L^1(\p)$, and for any regression function $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$ with $S(Y,\widehat{\mu}(\boldsymbol{X})) \in L^1(\p)$ we have \begin{equation}\label{definition consistency} \E\left[S\left(Y,T(F_{Y|{\boldsymbol{X}}})\right)\right] \ge \E\left[S\left(Y,\widehat{\mu}(\boldsymbol{X})\right)\right]. \end{equation} A scoring rule is called {\it strictly consistent} on ${\cal F}$ for the conditional mean functional $T$, if it is consistent on ${\cal F}$, and if an identity in \eqref{definition consistency} holds if and only if $\widehat{\mu}(\boldsymbol{X})=T(F_{Y|\boldsymbol{X}})=\mu^\dagger(\boldsymbol{X})$, a.s. \begin{rems} \normalfont \begin{itemize} \item Strict consistency implies that the true regression function $\mu^\dagger(\cdot)$ is the unique maximizer in \eqref{scoring rule definition}, and it can be estimated by score maximization (assuming it is contained in the set over which we optimize, which we generally do). Empirically, we then consider for i.i.d.~data $(Y_i,\boldsymbol{X}_i)$, $1\le i\le n$, \begin{equation*} \underset{\widehat{\mu}(\cdot)}{\arg\max}~ \frac{1}{n}\sum_{i=1}^nS\left(Y_i,\widehat{\mu}(\boldsymbol{X}_i)\right), \end{equation*} where we still need to ensure that we can exchange the limit $n\to\infty$ and the $\arg\max$-operator to asymptotically select the true regression function $\mu^\dagger(\cdot)$ under strict consistency. \item Formula \eqref{definition consistency} states unconditional consistency as we average over the distribution of $\boldsymbol{X}$. For conditional consistency (in $\boldsymbol{X}$) and its relation to the unconditional version we refer to Section 2.2 in Dimitriadis et al.~\cite{DimitriadisFisslerZiegel2020}. A point prediction version of consistency is given in Definition 1 in Gneiting \cite{Gneiting}. \item For scoring rule \eqref{scoring rule definition} we consider a maximization. By a sign switch we can turn this into a minimization problem, and in that case we rather speak about expected loss minimization. \item Typically, we restrict \eqref{scoring rule definition}-\eqref{definition consistency} to smaller classes of regression functions $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$. In the sequel, we will require continuity for these smaller classes, and, further below, we require the auto-calibration property. This requires that the true regression function $\mu^\dagger(\cdot)$ has this continuity, auto-calibration it will satisfy automatically, see Lemma \ref{lemma convex order}, below. \end{itemize} \end{rems} \section{The Gini index in machine learning} \label{The Gini index in machine learning} In the sequel we assume $\widehat{\mu}(\boldsymbol{X})$ to have a continuous distribution $F_{\widehat{\mu}(\boldsymbol{X})}$ for all $(Y,\boldsymbol{X})\sim F \in {\cal F}$ and for any considered regression function $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$. This implies $F_{\widehat{\mu}(\boldsymbol{X})}(F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\alpha))=\alpha$ for all $\alpha \in (0,1)$, and with $F_{\widehat{\mu}(\boldsymbol{X})}^{-1}$ denoting the left-continuous generalized inverse of $F_{\widehat{\mu}(\boldsymbol{X})}$. \medskip In machine learning (ML) one considers the {\it cumulative accuracy profile} (CAP) defined by \begin{equation*} \alpha \in (0,1) \quad \mapsto \quad {\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha) ~=~ \frac{1}{\E[Y]}\, \E \left[ Y\, \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) > F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha)\right\}}\right] ~\in~ [0,1]. \end{equation*} In actuarial science, the CAP is also called concentration curve (up to sign switches), see Denuit--Trufin \cite{DenuitTrufin}. The CAP measures a rank-based correlation between the prediction $\widehat{\mu}(\boldsymbol{X})$ and the response $Y$. The {\it Gini index (Gini score, Gini ratio, Gini coefficient, accuracy ratio) in ML} is defined by \begin{equation}\label{Gini ML} G^{\rm ML}_{Y, \widehat{\mu}(\boldsymbol{X})} = \frac{\int_0^1 {\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha)\, d\alpha -1/2} {\int_0^1 {\rm CAP}_{Y,Y} (\alpha)\, d\alpha -1/2}, \end{equation} where we additionally assume that $Y$ has an (unconditional) continuous distribution $F_Y$. For a geometric interpretation see Figure \ref{Gamma Lorenz} (lhs) and formula \eqref{Gini geometry}, below. \begin{rems}\normalfont \begin{itemize} \item The denominator in \eqref{Gini ML} does not use the regression function $\widehat{\mu}(\cdot)$, i.e., it has no impact on model selection by maximizing the Gini index $G^{\rm ML}_{Y, \widehat{\mu}(\boldsymbol{X})}$ over $\widehat{\mu}(\cdot)$. Hence, for scoring we can focus on the term in the enumerator \begin{eqnarray}\nonumber \int_0^1 {\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha)\, d\alpha &=&\frac{1}{\E[Y]} \, \E \left[ Y \int_0^1 \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) > F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha)\right\}}\, d\alpha \right] \\&=&\nonumber \frac{1}{\E[Y]} \, \E \left[ Y \,\p \left[\left. F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(U)< \widehat{\mu}(\boldsymbol{X}) \right| \widehat{\mu}(\boldsymbol{X}) \right] \right] \\&=&\frac{1}{\E[Y]} \, \E \left[ Y F_{\widehat{\mu}(\boldsymbol{X})}(\widehat{\mu}(\boldsymbol{X})) \right], \end{eqnarray} for an independent $(0,1)$-uniform random variable $U$ and where we use continuity of $F_{\widehat{\mu}(\boldsymbol{X})}$. This shows that the Gini index in ML is not calibration-sensitive because $F_{\widehat{\mu}(\boldsymbol{X})}(\widehat{\mu}(\boldsymbol{X}))$ has a $(0,1)$-uniform distribution, i.e., the specific distribution of $\widehat{\mu}(\boldsymbol{X})$ does not matter, but only its correlation with $Y$ matters. \item Since typically the true data model $(Y,\boldsymbol{X}) \sim F$ is not known, the Gini index in ML \eqref{Gini ML} is replaced by an empirical version \begin{equation}\label{Gini ML empirical} \widehat{G}^{\rm ML}_{Y, \widehat{\mu}(\boldsymbol{X})} = \frac{\int_0^1 \widehat{\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha)\, d\alpha -1/2} {\int_0^1 \widehat{\rm CAP}_{Y,Y} (\alpha)\, d\alpha -1/2}~\le~1, \end{equation} where we set \begin{equation}\label{CAP empirical} \widehat{\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha) =\frac{1}{\frac{1}{n}\sum_{i=1}^n Y_i} \, \frac{1}{n} \sum_{i=1}^{n} Y_i\, \mathds{1}_{\left\{\widehat{\mu}(\boldsymbol{X}_i)> \widehat{\mu}\left(\boldsymbol{X}_{\left(\lceil (1-\alpha) n \rceil\right)}\right)\right\}}, \end{equation} for i.i.d.~data $(Y_i,\boldsymbol{X}_i)$, $1\le i\le n$, having the same distribution as $(Y,\boldsymbol{X})$, and for order statistics $\widehat{\mu}(\boldsymbol{X}_{(1)})< \widehat{\mu}(\boldsymbol{X}_{(2)}) < \ldots < \widehat{\mu}(\boldsymbol{X}_{(n)})$; note that by assumption the distribution of $\widehat{\mu}(\boldsymbol{X})$ is continuous which implies that all observations $\widehat{\mu}(\boldsymbol{X}_i)$ are mutually different for $1\le i \le n$, and we have a strict ordering in the order statistics. \item Let us further comment on \eqref{CAP empirical}. First, if we mirror the CAP at the diagonal we have \begin{eqnarray}\label{mirrored CAP definition} {\rm CAP}^-_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha) &=& \frac{1}{\E[Y]}\, \E \left[ Y\, \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) \le F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\alpha)\right\}}\right] \\&=&\nonumber 1- \frac{1}{\E[Y]}\, \E \left[ Y\, \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) > F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\alpha)\right\}}\right] ~=~1-{\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (1-\alpha). \end{eqnarray} For an empirical version of the mirrored CAP we replace the above expression by \begin{eqnarray*} \widehat{\rm CAP}^-_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha) &=& \frac{1}{\frac{1}{n}\sum_{i=1}^n Y_i}\, \frac{1}{n} \sum_{i=1}^{n} Y_i\, \mathds{1}_{\left\{\widehat{\mu}(\boldsymbol{X}_i) \le \widehat{F}_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\alpha)\right\}} \\&=& 1- \frac{1}{\frac{1}{n}\sum_{i=1}^n Y_i}\, \frac{1}{n} \sum_{i=1}^{n} Y_i\, \mathds{1}_{\left\{\widehat{\mu}(\boldsymbol{X}_i) > \widehat{\mu}(\boldsymbol{X}_{(\lceil \alpha n\rceil)})\right\}}, \end{eqnarray*} where in the last indicator we use the empirical distribution, for $m \in \R$ and $\alpha \in (0,1)$, \begin{equation*} \widehat{F}_{\widehat{\mu}(\boldsymbol{X})}(m) = \frac{1}{n}\, \sum_{i=1}^n \mathds{1}_{\{ \widehat{\mu}(\boldsymbol{X}_i) \le m \}} \qquad \text{ and } \qquad \widehat{F}^{-1}_{\widehat{\mu}(\boldsymbol{X})}(\alpha) = \widehat{\mu}(\boldsymbol{X}_{(\lceil \alpha n\rceil)}). \end{equation*} This justifies the choice in \eqref{CAP empirical}. Similarly, we have for the denominator in \eqref{Gini ML empirical} \begin{equation}\label{discrete Lorenz curve continuous} \widehat{\rm CAP}^-_{Y,Y} (\alpha)= \frac{1}{\frac{1}{n}\sum_{i=1}^n Y_i} \, \frac{1}{n} \sum_{i=1}^{n} Y_i\, \mathds{1}_{\left\{Y_i\,\le \,Y_{(\lceil \alpha n \rceil)}\right\}} ~\stackrel{(*)}{=}~ \frac{1}{\frac{1}{n}\sum_{i=1}^n Y_i} \, \frac{1}{n} \sum_{i=1}^{\lceil \alpha n \rceil} Y_{(i)}, \end{equation} for the identity $\stackrel{(*)}{=}$ to hold for any $\alpha \in (0,1)$, we need to assume that we have a strict ordering $Y_{(1)}< Y_{(2)} < \ldots <Y_{(n)}$, i.e., that there are no ties in the observations $(Y_i)_{1\le i \le n}$, which is the case because $Y$ was assumed to have a continuous distribution $F_Y$. This then motivates to set \begin{eqnarray}\nonumber \widehat{\rm CAP}_{Y,Y} (\alpha) &=& 1-\widehat{\rm CAP}^-_{Y,Y} (1-\alpha) = \frac{1}{\frac{1}{n}\sum_{i=1}^n Y_i} \, \frac{1}{n} \sum_{i=1}^{n} Y_i\, \mathds{1}_{\left\{Y_i\,> \,Y_{(\lceil (1-\alpha) n \rceil)}\right\}} \\&\stackrel{(*)}{=}& \frac{1}{\frac{1}{n}\sum_{i=1}^n Y_i} \, \frac{1}{n} \sum_{i=\lceil (1-\alpha) n \rceil+1}^n Y_{(i)} . \label{Lorenz empirical} \end{eqnarray} If we have a perfect joint ordering between $(Y_i)_{1\le i \le n}$ and $(\widehat{\mu}(\boldsymbol{X}_i))_{1\le i \le n}$, the upper bound in \eqref{Gini ML empirical} is attained, see \eqref{CAP empirical} and \eqref{Lorenz empirical}. This is the motivation for the scaling in \eqref{Gini ML}. \end{itemize} \end{rems} In the definition of the Gini index in ML \eqref{Gini ML} we have assumed that $Y$ has a continuous distribution $F_Y$. This is not the case for discrete responses $Y$. Therefore, in the discrete case we need to replace the denominator in \eqref{Gini ML} by a different object. For illustrative purposes we show the binary classification case in the next example. \begin{example}[binary classification]\normalfont We consider a binary classification example with true regression function \begin{equation*} \boldsymbol{X}~\mapsto~ p^\dagger(\boldsymbol{X})= \E \left[\left. Y \right| \boldsymbol{X} \right]= \p \left[\left. Y=1 \right| \boldsymbol{X} \right] ~\in ~(0,1). \end{equation*} That is, $Y$ is conditionally Bernoulli distributed, given $\boldsymbol{X}$, with probability $p^\dagger(\boldsymbol{X}) \in (0,1)$ and range ${\cal Y}=\{0,1\}$. In this case the CAP for a regression function $\boldsymbol{X} \mapsto \widehat{p}(\boldsymbol{X})$ with continuous distribution $F_{\widehat{p}(\boldsymbol{X})}$ is for $\alpha \in (0,1)$ given by \begin{eqnarray*} {\rm CAP}_{Y,\widehat{p}(\boldsymbol{X})} (\alpha) &=& \frac{1}{\E[Y]}\, \E \left[ Y\, \mathds{1}_{\left\{ \widehat{p}(\boldsymbol{X}) > F_{\widehat{p}(\boldsymbol{X})}^{-1}(1-\alpha)\right\}}\right] \\&=& \frac{1}{\p[Y=1]}\, \E \left[ \mathds{1}_{\left\{Y=1, \, \widehat{p}(\boldsymbol{X}) > F_{\widehat{p}(\boldsymbol{X})}^{-1}(1-\alpha)\right\}}\right] \\&=&\p \left[\left. \widehat{p}(\boldsymbol{X}) > F_{\widehat{p}(\boldsymbol{X})}^{-1}(1-\alpha)\right| Y=1\right] \\&=&1-F_{\widehat{p}(\boldsymbol{X})|Y=1}\left(F_{\widehat{p}(\boldsymbol{X})}^{-1}(1-\alpha)\right). \end{eqnarray*} This corresponds to formula (5.2) in Tasche \cite{Tasche}. For the Gini index in ML we need to calculate the denominator of \eqref{Gini ML}. However, this formula only applies for a continuous distribution $F_Y$ of $Y$. In the case of a discrete distribution of $Y$ we need to modify \eqref{Gini ML}. Starting from the right-hand side of \eqref{discrete Lorenz curve continuous}, we define the empirical function in the discrete case by \begin{equation*} \alpha\in(0,1) ~ \mapsto ~ \widehat{\rm CAP}^-_{Y,Y} (\alpha) ~=~ \frac{1}{\frac{1}{n}\sum_{i=1}^n Y_i} \, \frac{1}{n} \sum_{i=1}^{\lceil \alpha n \rceil} Y_{(i)}, \end{equation*} for i.i.d.~data $(Y_i,\boldsymbol{X}_i)$, $1\le i\le n$. In the Bernoulli case, this function is identically equal to zero up to $\alpha \le 1-\sum_{i=1}^n Y_i/n$, these describes the number of zeros among the observations $(Y_i)_{1\le i \le n}$, and afterwards it increases to 1. Since this increase is only described on the discrete grid with span $1/n$, we linearly interpolate between these points. This provides a straight line between $1-\sum_{i=1}^n Y_i/n$ and 1 with slope $n/\sum_{i=1}^n Y_i$. Under this linear interpolation, we get the area (integral) \begin{equation*} \int_0^1 \widehat{\rm CAP}^-_{Y,Y} (\alpha) \, d\alpha = \frac{1}{2n}\,\sum_{i=1}^n Y_i. \end{equation*} By the law of large numbers, the latter converges to $p^\dagger/2=\E[p^\dagger(\boldsymbol{X})]/2=\E[Y]/2$, a.s., as $n\to \infty$. This motivates in the (discrete) binary classification case the following definition of the Gini index in ML \begin{equation}\label{Gini ML Bernoulli} G^{\rm ML}_{Y, \widehat{\mu}(\boldsymbol{X})} = \frac{1/2-\int_0^1 F_{\widehat{p}(\boldsymbol{X})|Y=1}\left(F_{\widehat{p}(\boldsymbol{X})}^{-1}(1-\alpha)\right) d\alpha} {(1-p^\dagger)/2}. \end{equation} In the binary classification case, the CAP can be related to the receiver operating characteristics (ROC) curve. The area under the curve (AUC) of the ROC curve has a one-to-one relationship to the Gini index in ML \eqref{Gini ML Bernoulli} in the Bernoulli case, we refer to Section 5 in Tasche \cite{Tasche}. We mention this because the ML community more frequently uses the AUC than the Gini index for model selection. In general, in the discrete case we replace the integral in the denominator in \eqref{Gini ML} by the term \begin{equation}\label{the general denominator} \frac{1}{4\E[Y]}\, \E\left[\left|Y-\widetilde{Y}\right|\right], \end{equation} where $\widetilde{Y}$ is an independent copy of $Y$. This latter quantity \eqref{the general denominator} can be calculated for any distribution $F_Y$ of $Y$, and in the continuous case we precisely receive the denominator in \eqref{Gini ML}. The binary classification case \eqref{the general denominator} provides us with $(1-p^\dagger)/2$ which gives \eqref{Gini ML Bernoulli}. \hfill {\scriptsize $\blacksquare$} \end{example} \section{Auto-calibration and consistency of the Gini index} \label{Auto-calibration and consistency of the Gini index} Let $(Y,\boldsymbol{X}) \sim F$. A regression function $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$ is auto-calibrated for $Y$ if, a.s., \begin{equation*} \widehat{\mu}(\boldsymbol{X}) = \E \left[ Y \left|\widehat{\mu}(\boldsymbol{X}) \right]\right. . \end{equation*} Auto-calibration is an important property in insurance pricing, as it implies that every cohort of insurance policies paying the same price $\widehat{\mu}(\boldsymbol{X})$ is in average self-financing, because the price $\widehat{\mu}(\boldsymbol{X})$ exactly covers the expected claim $Y$ of that cohort. I.e., we do not have any systematic cross-financing between the price cohorts. This is the core of risk classification in insurance. It also implies unbiasedness on the portfolio level \begin{equation}\label{unbiasedness under auto-calibration} \E\left[\widehat{\mu}(\boldsymbol{X})\right] = \E \left[ Y \right], \end{equation} which is a minimal requirement in insurance pricing. Typically, there are many auto-calibrated regression functions $\widehat{\mu}(\boldsymbol{X})$ for $Y$, i.e., there are many systems of self-financing pricing cohorts. \begin{lemma} \label{lemma convex order} The true regression function $\boldsymbol{X}\mapsto\mu^\dagger(\boldsymbol{X})=\E[Y|\boldsymbol{X} ]$ is auto-calibrated for $Y$, and it strictly dominates in convex order any other auto-calibrated regression function $\boldsymbol{X}\mapsto \widehat{\mu}(\boldsymbol{X})$ for $Y$. \end{lemma} {\footnotesize {\bf Proof.} To prove auto-calibration of $\mu^\dagger$ we apply the tower property to the $\sigma$-algebras $\sigma( \mu^\dagger(\boldsymbol{X})) \subset \sigma(\boldsymbol{X})$ which gives, a.s., \begin{equation*} \E\left[Y\left| \mu^\dagger(\boldsymbol{X})\right]\right. =\E\left[\E\left[Y\left| \boldsymbol{X}\right]\right.\left| \mu^\dagger(\boldsymbol{X})\right]\right. =\E\left[\mu^\dagger(\boldsymbol{X})\left| \mu^\dagger(\boldsymbol{X})\right]\right. =\mu^\dagger(\boldsymbol{X}). \end{equation*} For any convex function $\psi$, auto-calibration, the tower property for $\sigma(\widehat{\mu}(\boldsymbol{X})) \subset \sigma(\boldsymbol{X})$ and Jensen's inequality give \begin{eqnarray*} \E \left[ \psi \left(\widehat{\mu}(\boldsymbol{X}) \right)\right] &=&\E \left[ \psi \left(\E \left[ Y \left|\widehat{\mu}(\boldsymbol{X}) \right]\right.\right)\right] ~=~\E \left[ \psi \left(\E \left[\E \left[ Y \left|\boldsymbol{X} \right]\right.\left|\widehat{\mu}(\boldsymbol{X}) \right]\right.\right)\right] ~=~\E \left[ \psi \left(\E \left[\left.\mu^\dagger(\boldsymbol{X})\right|\widehat{\mu}(\boldsymbol{X}) \right]\right)\right] \\&\le &\E \left[ \E \left[\left.\psi\left(\mu^\dagger(\boldsymbol{X})\right) \right|\widehat{\mu}(\boldsymbol{X}) \right]\right] ~=~ \E \left[ \psi\left(\mu^\dagger(\boldsymbol{X})\right) \right], \end{eqnarray*} whenever these exist. This proves that $\mu^\dagger$ dominates in convex order any other auto-calibrated regression function $\widehat{\mu}$ for $Y$. Assume that there exists an auto-calibrated regression function $\boldsymbol{X}\mapsto \widehat{\mu}(\boldsymbol{X})$ for $Y$ such that for any convex function $\psi$ we have an equality in the previous calculation, whenever these exist. This implies that $\mu^\dagger(\boldsymbol{X})$ is $\sigma(\widehat{\mu}(\boldsymbol{X}))$-measurable. Auto-calibration and the tower property for $\sigma(\widehat{\mu}(\boldsymbol{X})) \subset \sigma(\boldsymbol{X})$ then provide, a.s., \begin{equation*} \widehat{\mu}(\boldsymbol{X}) = \E \left[ Y \left|\widehat{\mu}(\boldsymbol{X}) \right]\right. = \E \left[\E \left[ Y \left|\boldsymbol{X} \right]\right. \left|\widehat{\mu}(\boldsymbol{X}) \right]\right. =\E \left[\left. \mu^\dagger(\boldsymbol{X}) \right|\widehat{\mu}(\boldsymbol{X}) \right]=\mu^\dagger(\boldsymbol{X}). \end{equation*} This proves the statement of strict convex order. \hfill {\scriptsize $\Box$}} \medskip The next proposition is a consequence of Lemma \ref{lemma convex order} and of Theorem 3.1 in Kr\"uger--Ziegel \cite{Ziegel}. \begin{prop} \label{proposition Bregman} The true regression function $\boldsymbol{X}\mapsto\mu^\dagger(\boldsymbol{X})$ forecast-dominates any auto-calibrated regression function $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$ for $Y$ meaning that \begin{equation*} \E \left[ - D_{\psi}(Y,\mu^\dagger(\boldsymbol{X}))\right] \ge \E \Big[ -D_{\psi}(Y,\widehat{\mu}(\boldsymbol{X}))\Big], \end{equation*} for any convex function $\psi$ where the above exists, and with Bregman divergence given by \begin{equation*} D_\psi(y,m) = \psi(y)-\psi(m)-\psi'(m)(y-m) ~\ge ~0, \end{equation*} for $y,m \in \R$ and $\psi'$ is a (sub-)gradient of the convex function $\psi$. \end{prop} Proposition \ref{proposition Bregman} says that every negative Bregman divergence provides a consistent scoring rule \eqref{definition consistency} for the conditional mean regression functional $T$ under auto-calibration for $Y$. This statement motivates the common practice in model selection of minimizing (out-of-sample) deviance losses, as deviance losses are special cases of Bregman divergences; see Chapters 2 and 4 in W\"uthrich--Merz \cite{WM2022}. For more information on this topic we refer to Kr\"uger--Ziegel \cite{Ziegel}, Theorem 7 in Gneiting \cite{Gneiting} and Savage \cite{Savage}, the latter two references state that Bregman divergences provide the only strictly consistent scoring functions for mean estimation. \medskip The definition of the Gini index \cite{Gini0} in economics slightly differs from the ML version \eqref{Gini ML}. Assume $F_{\widehat{\mu}(\boldsymbol{X})}$ is a continuous distribution. It is then based on the {\it Lorenz curve} \cite{Lorenz} given by \begin{equation*} \alpha \in (0,1) \quad \mapsto \quad L_{{\widehat{\mu}(\boldsymbol{X})}} \left( F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\alpha) \right) ~=~ \frac{1}{\E[\widehat{\mu}(\boldsymbol{X})]}\, \E \left[ \widehat{\mu}(\boldsymbol{X}) \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) \le F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\alpha)\right\}}\right] ~\in~ [0,1]. \end{equation*} Note that we have the property ${\rm CAP}^-_{\widehat{\mu}(\boldsymbol{X}), \widehat{\mu}(\boldsymbol{X})}= L_{{\widehat{\mu}(\boldsymbol{X})}} ( F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\alpha))$, see \eqref{mirrored CAP definition}. \medskip The {\it Gini index in economics} has many (equivalent)\footnote{For an equivalence in \eqref{Gini economics} we need that $F_{\widehat{\mu}(\boldsymbol{X})}$ is continuous, otherwise one should choose the term on the right-hand side as the definition of the Gini index in economics.} definitions, we use the following two \begin{equation}\label{Gini economics} G^{\rm eco}_{\widehat{\mu}(\boldsymbol{X})} = 1 - 2 \int_0^1 L_{{\widehat{\mu}(\boldsymbol{X})}} \left( F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\alpha)\right) d\alpha = \frac{1}{2\E[\widehat{\mu}(\boldsymbol{X})]}\, \E\Big[\Big|\widehat{\mu}(\boldsymbol{X})-\widehat{\mu}(\boldsymbol{Z})\Big|\Big], \end{equation} where $\widehat{\mu}(\boldsymbol{Z})$ is an independent copy of $\widehat{\mu}(\boldsymbol{X})$. The first definition in \eqref{Gini economics} is based on a continuous distribution $F_{\widehat{\mu}(\boldsymbol{X})}$, whereas the second one can be used for any distribution $F_{\widehat{\mu}(\boldsymbol{X})}$, we also refer to \eqref{the general denominator}. \begin{figure}[htb!] \begin{center} \begin{minipage}[t]{0.44\textwidth} \begin{center} \includegraphics[width=\textwidth]{CAP.pdf} \end{center} \end{minipage} \begin{minipage}[t]{0.44\textwidth} \begin{center} \includegraphics[width=\textwidth]{GammaLorenz1.pdf} \end{center} \end{minipage} \end{center} \vspace{-.7cm} \caption{(lhs) cumulative accuracy profile (CAP) and (rhs) Lorenz curve.} \label{Gamma Lorenz} \end{figure} There are three differences between the Gini index in ML and the one in economics, see Figure \ref{Gamma Lorenz}: (i) $G^{\rm eco}_{\widehat{\mu}(\boldsymbol{X})}$ considers a mirrored version of the curves compared to $G^{\rm ML}_{Y, \widehat{\mu}(\boldsymbol{X})}$; (ii) $G^{\rm ML}_{Y, \widehat{\mu}(\boldsymbol{X})}$ depends on $Y$ and $\widehat{\mu}(\boldsymbol{X})$, $G^{\rm eco}_{\widehat{\mu}(\boldsymbol{X})}$ only depends on $\widehat{\mu}(\boldsymbol{X})$; (iii) scalings are different leading to areas B and C, respectively, in Figure \ref{Gamma Lorenz}. The two Gini indices are geometrically obtained by, see Figure \ref{Gamma Lorenz}, \begin{equation}\label{Gini geometry} G^{\rm ML}_{Y,\widehat{\mu}(\boldsymbol{X})} = \frac{\text{area}({\rm A})}{\text{area}({\rm A}+{\rm B})} \quad \text{ and } \quad G^{\rm eco}_{\widehat{\mu}(\boldsymbol{X})} = \frac{\text{area}({\rm A})}{\text{area}({\rm A}+{\rm C})}= 2\, \text{area}({\rm A})=1-2\,\text{area}({\rm C}). \end{equation} Property 3.1 of Denuit--Trufin \cite{DenuitTrufin} gives the following nice result. \begin{prop}\label{Denuit Trufin result} Under auto-calibration of the regression function $\boldsymbol{X} \to \widehat{\mu}(\boldsymbol{X})$ for $Y$ we have the identity ${\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha)=1-L_{\widehat{\mu}(\boldsymbol{X})} ( F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha))$ for all $\alpha \in (0,1)$. \end{prop} {\footnotesize {\bf Proof.} Using the tower property, auto-calibration of $\widehat{\mu}$ for $Y$ and unbiasedness \eqref{unbiasedness under auto-calibration} give us \begin{eqnarray*} {\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha) &=& \frac{1}{\E[Y]}\, \E \left[ Y\, \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) > F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha)\right\}}\right] ~=~ \frac{1}{\E[Y]}\, \E \left[\E \left[\left. Y \right| \widehat{\mu}(\boldsymbol{X})\right]\, \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) > F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha)\right\}}\right] \\ &=& \frac{1}{\E[\widehat{\mu}(\boldsymbol{X})]}\, \E \left[ \widehat{\mu}(\boldsymbol{X})\, \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) > F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha)\right\}}\right] ~=~1-\frac{1}{\E[\widehat{\mu}(\boldsymbol{X})]}\, \E \left[ \widehat{\mu}(\boldsymbol{X}) \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) \le F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha)\right\}}\right]. \end{eqnarray*} This proves the claim. \hfill {\scriptsize $\Box$}} \medskip Thus, under auto-calibration for $Y$, the CAP and the Lorenz curve coincide (up to mirroring/sign switching). This gives us the following corollary. \begin{cor} \label{identical Gini} Under auto-calibration of the regression function $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$ for $Y$ we have for the Gini indices \begin{equation}\label{GL identity} G^{\rm ML}_{Y,\widehat{\mu}(\boldsymbol{X})}= \frac{G^{\rm eco}_{\widehat{\mu}(\boldsymbol{X})}} {2\int_0^1 {\rm CAP}_{Y,Y} (\alpha)\, d\alpha -1}. \end{equation} \end{cor} {\footnotesize {\bf Proof.} Proposition \ref{Denuit Trufin result} gives us for the Gini index in ML \begin{eqnarray*} G^{\rm ML}_{Y, \widehat{\mu}(\boldsymbol{X})} &=& \frac{\int_0^1 {\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha)\, d\alpha -1/2} {\int_0^1 {\rm CAP}_{Y,Y} (\alpha)\, d\alpha -1/2} ~=~ \frac{1/2-\int_0^1 L_{\widehat{\mu}(\boldsymbol{X})} ( F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha))\, d\alpha} {\int_0^1 {\rm CAP}_{Y,Y} (\alpha)\, d\alpha -1/2} \\&=& \frac{1-2\int_0^1 L_{\widehat{\mu}(\boldsymbol{X})} ( F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha))\, d\alpha} {2\int_0^1 {\rm CAP}_{Y,Y} (\alpha)\, d\alpha -1} ~=~ \frac{1-2\int_0^1 L_{\widehat{\mu}(\boldsymbol{X})} ( F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\beta))\, d\beta} {2\int_0^1 {\rm CAP}_{Y,Y} (\alpha)\, d\alpha -1}, \end{eqnarray*} where the last step uses the change of variable $\alpha \mapsto \beta=1-\alpha$. This proves the claim. \hfill {\scriptsize $\Box$}} \medskip This says that under auto-calibration for the response both Gini indices (the ML score and the version in economics) provide the same scoring rule because the (positive) denominator\footnote{Note that the denominator in \eqref{GL identity} is positive for every non-deterministic $Y$. This follows from the fact that the denominator is equal to twice \eqref{the general denominator} which is positive unless $Y$ is deterministic.} in \eqref{GL identity} does not depend on the specific choice of the regression function $\widehat{\mu}(\cdot)$. Moreover, the same arguments apply to the Gini indices in non-continuous cases, e.g., in the binary classification (Bernoulli) case \eqref{Gini ML Bernoulli}. \begin{theo} \label{proposition Gini} The true regression function $\boldsymbol{X}\mapsto\mu^\dagger(\boldsymbol{X})$ maximizes the Gini index (in ML) among all auto-calibrated regression functions $\boldsymbol{X}\mapsto\widehat{\mu}(\boldsymbol{X})$ for $Y$, i.e., $G^{\rm ML}_{Y,\mu^\dagger(\boldsymbol{X})} > G^{\rm ML}_{Y,\widehat{\mu}(\boldsymbol{X})}$ unless $\widehat{\mu}(\boldsymbol{X})=\mu^\dagger(\boldsymbol{X})$, a.s. \end{theo} {\footnotesize {\bf Proof.} Conditionally, given $\widehat{\mu}(\boldsymbol{Z})$, $m \mapsto |m-\widehat{\mu}(\boldsymbol{Z})|$ is a convex function in $m \in \R$. Using formula \eqref{Gini economics}, independence between $\widehat{\mu}(\boldsymbol{X})$ and $\widehat{\mu}(\boldsymbol{Z})$ in \eqref{Gini economics} and Lemma \ref{lemma convex order} we obtain inequality, a.s., \begin{equation* \E\left[\left.\left|\widehat{\mu}(\boldsymbol{X})-\widehat{\mu}(\boldsymbol{Z})\right|\, \right| \widehat{\mu}(\boldsymbol{Z}) \right] \le \E\left[\left.\left|\mu^\dagger(\boldsymbol{X})-\widehat{\mu}(\boldsymbol{Z})\right|\, \right| \widehat{\mu}(\boldsymbol{Z}) \right], \end{equation*} where $\mu^\dagger(\boldsymbol{X})$ is independent of $\widehat{\mu}(\boldsymbol{Z})$. Using the tower property, applying the same argument to the exchanged role of $\mu^\dagger(\boldsymbol{X})$ and $\widehat{\mu}(\boldsymbol{Z})$, using unbiasedness \eqref{unbiasedness under auto-calibration} and using Corollary \ref{identical Gini} provides $G^{\rm ML}_{Y,\mu^\dagger(\boldsymbol{X})} \ge G^{\rm ML}_{Y,\widehat{\mu}(\boldsymbol{X})}$. Assume there exists an auto-calibrated regression function $\widehat{\mu}$ for $Y$ such that $G^{\rm ML}_{Y,\mu^\dagger(\boldsymbol{X})} = G^{\rm ML}_{Y,\widehat{\mu}(\boldsymbol{X})}$. Using auto-calibration of $\widehat{\mu}$ for $Y$ and the tower property, we receive for $\p$-a.e.~$\omega \in \Omega$ \begin{equation}\label{only using auto-calibration} \widehat{\mu}(\boldsymbol{X})(\omega) =\E\left[Y \left|\widehat{\mu}(\boldsymbol{X})\right]\right.(\omega) =\E\left[\E\left.\left[Y\right|\boldsymbol{X}\right] \left|\widehat{\mu}(\boldsymbol{X})\right]\right.(\omega) =\E\left[\left.\mu^\dagger(\boldsymbol{X}) \right|\widehat{\mu}(\boldsymbol{X})\right](\omega). \end{equation} Denote by $\Omega_1 \subset \Omega$ a set of full measure 1 on which \eqref{only using auto-calibration} holds. On $\Omega_1$, the predictor $\widehat{\mu}(\boldsymbol{X})$ is between the conditional essential infimum and supremum of $\mu^\dagger(\boldsymbol{X})$, given $\widehat{\mu}(\boldsymbol{X})$, because it corresponds to the conditional expectation of $\mu^\dagger(\boldsymbol{X})$, given $\widehat{\mu}(\boldsymbol{X})$. Consider the case of sample points $\omega \in \Omega_1$ where the conditional essential infimum and supremum of $\mu^\dagger(\boldsymbol{X})$, given $\widehat{\mu}(\boldsymbol{X})$, do not coincide, and denote the corresponding set of sample points by $\Omega_2 \subset \Omega_1$. On $\Omega_2$, the predictor $\widehat{\mu}(\boldsymbol{X})$ is strictly between the conditional essential infimum and supremum of $\mu^\dagger(\boldsymbol{X})$, given $\widehat{\mu}(\boldsymbol{X})$, due to the conditional expectation property \eqref{only using auto-calibration}. We have using \eqref{only using auto-calibration} and independence between $\widehat{\mu}(\boldsymbol{X})$ and $\widehat{\mu}(\boldsymbol{Z})$ \begin{eqnarray}\nonumber \E\left[\left|\widehat{\mu}(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z})\right| \right] &=& \E\left[\left|\E\left[\left.\mu^\dagger(\boldsymbol{X}) \right|\widehat{\mu}(\boldsymbol{X})\right]-\widehat{\mu}(\boldsymbol{Z})\right| \right] ~=~\E\left[ \left|\E\left[\left.\mu^\dagger(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z}) \right|\widehat{\mu}(\boldsymbol{X}),\widehat{\mu}(\boldsymbol{Z})\right]\right| \right] \\&=& \E\left[\left(\mathds{1}_{\Omega_2}+\mathds{1}_{\Omega_2^c}\right) \left|\E\left[\left.\mu^\dagger(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z}) \right|\widehat{\mu}(\boldsymbol{X}),\widehat{\mu}(\boldsymbol{Z})\right]\right| \right]. \label{this will give the proof} \end{eqnarray} We calculate the first term on the right-hand side of \eqref{this will give the proof} \begin{equation*} \E\left[\mathds{1}_{\Omega_2} \left|\E\left[\left.\mu^\dagger(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z}) \right|\widehat{\mu}(\boldsymbol{X}),\widehat{\mu}(\boldsymbol{Z})\right] \right|\,\right] = \int_{\Omega_2}\left( \int_\Omega \left|\E\left[\left.\mu^\dagger(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z}) \right|\widehat{\mu}(\boldsymbol{X}),\widehat{\mu}(\boldsymbol{Z})\right]\right| (\omega, \widetilde{\omega})\,d\p(\widetilde{\omega})\right) d\p(\omega). \end{equation*} We study the inner integral for fixed sample point $\omega \in \Omega_2$. Jensen's inequality gives us \begin{equation}\label{Omega1} \int_\Omega \left|\E\left[\left.\mu^\dagger(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z}) \right|\widehat{\mu}(\boldsymbol{X}),\widehat{\mu}(\boldsymbol{Z})\right]\right| (\omega, \widetilde{\omega})\,d\p(\widetilde{\omega}) ~<~ \int_\Omega \E\left[\left.\left|\mu^\dagger(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z})\right|\, \right|\widehat{\mu}(\boldsymbol{X}),\widehat{\mu}(\boldsymbol{Z})\right] (\omega, \widetilde{\omega})\,d\p(\widetilde{\omega}), \end{equation} where we receive a strict inequality for $\omega \in \Omega_2$ because of the following items: (1) on $\Omega_2$, $\mu^\dagger(\boldsymbol{X})$ is non-deterministic, conditionally given $\widehat{\mu}(\boldsymbol{X})$, (2) $m \mapsto |m-\widehat{\mu}(\boldsymbol{Z})|$ is a convex function, (3) $\widehat{\mu}(\boldsymbol{Z})$ has the same distribution (and support) as $\widehat{\mu}(\boldsymbol{X})$, and (4) $\widehat{\mu}(\boldsymbol{Z})$ and $(\mu^\dagger(\boldsymbol{X}),\widehat{\mu}(\boldsymbol{X}))$ are independent. Items (1)-(4) imply that on a set of positive $\p(\widetilde{\omega})$-measure we receive a strict Jensen's inequality, because on this set, $\widehat{\mu}(\boldsymbol{Z})$ is strictly within the conditional essential infimum and supremum of (the non-deterministic) $\mu^\dagger(\boldsymbol{X})$, given $\widehat{\mu}(\boldsymbol{X})$. Assume $\p[\Omega_2]>0$, i.e., strict inequality \eqref{Omega1} occurs on a set of positive measure. Applying Jensen's inequality also to the other term in \eqref{this will give the proof} we receive strict inequality \begin{equation*}\nonumber \E\left[\left|\widehat{\mu}(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z})\right| \right] ~< ~ \E\left[ \E\left[\left.\left|\mu^\dagger(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z})\right| \right|\widehat{\mu}(\boldsymbol{X}),\widehat{\mu}(\boldsymbol{Z})\right] \right] = \E\left[ \left|\mu^\dagger(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z})\right| \right]. \end{equation*} This strict inequality contradicts our assumption $G^{\rm ML}_{Y,\mu^\dagger(\boldsymbol{X})} = G^{\rm ML}_{Y,\widehat{\mu}(\boldsymbol{X})}$. Therefore, $\p[\Omega_2]=0$, which implies \begin{equation*} \p \left[\Omega_2^c \cap \Omega_1\right] = \p \left[ \Omega_2^c \right]=1. \end{equation*} On the set $\Omega_2^c \cap \Omega_1$, we have $\mu^\dagger(\boldsymbol{X}) =\widehat{\mu}(\boldsymbol{X})$, which proves the claim. \hfill {\scriptsize $\Box$}} \medskip Theorem \ref{proposition Gini} proves that the Gini index gives a strictly consistent scoring rule on the class of auto-calibrated regression functions that are $\boldsymbol{X}$-measurable, because the true regression function $\boldsymbol{X}\mapsto\mu^\dagger(\boldsymbol{X})$ maximizes this Gini index. A bigger Gini index can only be achieved by a larger information set than the $\sigma$-algebra generated by $\boldsymbol{X}$. \medskip The following proposition generalizes Property 5.1 of Denuit et al.~\cite{DenuitCharpentierTrufin}, which gives a method of restoring auto-calibration for a general regression function $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$. \begin{prop}Consider a regression function $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$. The following regression function is auto-calibrated for $Y$ \begin{equation*} \boldsymbol{X}~\mapsto~ \widehat{\mu}^{\rm (auto)}(\boldsymbol{X})= \E \left[Y \left| \widehat{\mu}(\boldsymbol{X}) \right]\right. . \end{equation*} \end{prop} {\footnotesize {\bf Proof.} Note that $\widehat{\mu}^{\rm (auto)}(\boldsymbol{X})$ is $\sigma(\widehat{\mu}(\boldsymbol{X}))$-measurable. This implies $\sigma(\widehat{\mu}^{\rm (auto)}(\boldsymbol{X})) \subset \sigma(\widehat{\mu}(\boldsymbol{X}))$. Henceforth, using the tower property, a.s., \begin{equation*} \E \left[Y \left| \widehat{\mu}^{\rm (auto)}(\boldsymbol{X}) \right]\right. = \E \left[\E \left[Y \left| \widehat{\mu}(\boldsymbol{X}) \right]\right. \left| \widehat{\mu}^{\rm (auto)}(\boldsymbol{X}) \right]\right. = \E \left[\widehat{\mu}^{\rm (auto)}(\boldsymbol{X}) \left| \widehat{\mu}^{\rm (auto)}(\boldsymbol{X}) \right]\right. =\widehat{\mu}^{\rm (auto)}(\boldsymbol{X}). \end{equation*} This completes the proof. \hfill {\scriptsize $\Box$}} \section{Conclusions} \label{Conclusions} In general, one should not use the Gini index for model selection because it does not give a strictly consistent scoring rule and, thus, may lead to wrong decisions. We have shown in Theorem \ref{proposition Gini} that if we restrict Gini index scoring to the class of auto-calibrated regression functions for the given response, the Gini index allows for strictly consistent scoring. This also translates to the binary classification case where the (machine learning version of the) Gini index has an equivalent formulation in terms of the area under the curve (AUC) of the receiver operating characteristics (ROC) curve, we refer to Tasche \cite{Tasche}. We only need to ensure that the binary classification model is auto-calibrated for the Bernoulli response to receive a strictly consistent scoring rule from the AUC. \bigskip {\small \renewcommand{\baselinestretch}{.51} \section{Introduction} The Gini index (Gini score, accuracy ratio) is a popular tool for model selection in machine learning, and there are versions of the Gini index that are used to evaluate actuarial pricing models and financial credit risk models; see Frees et al.~\cite{Frees1, Frees2}, Denuit et al.~\cite{DenuitSznajderTrufin}, Engelmann et al.~\cite{Engelmann} and Tasche \cite{Tasche}. However, in general, the Gini index does not give a (strictly) consistent scoring rule; Example 3 of Byrne \cite{Byrne} gives a counterexample. (Strict) consistency is an important property in model selection because it ensures that maximizing the Gini index does not lead to a wrong model choice; see Gneiting \cite{Gneiting} and Gneiting--Raftery \cite{GneitingRaftery}. The Gini index can be obtained from Somers' $D$ \cite{Somers}, which essentially considers Kendall's $\tau$; see Newson \cite{Newson}. Intuitively, this tells us that the Gini index is a rank-based score that is not calibration-sensitive. The missing piece to make the Gini index a strictly consistent scoring rule is to restrict it to the class of auto-calibrated regression models, this is proved in Theorem \ref{proposition Gini}, below; for auto-calibration we refer to Kr\"uger--Ziegel \cite{Ziegel}, Denuit et al.~\cite{DenuitCharpentierTrufin} and Section 7.4.2 of W\"uthrich--Merz \cite{WM2022}. \medskip {\bf Organization.} In the next section, we introduce the notion of strictly consistent scoring rules. In Section \ref{The Gini index in machine learning}, we discuss the Gini index as it is usually used in the machine learning community. In Section \ref{Auto-calibration and consistency of the Gini index}, we introduce and discuss the property of having an auto-calibrated regression model (forecasts), and we prove that the Gini index gives a strictly consistent scoring rule if we restrict to the class of auto-calibrated regression models. This makes the maximization of the Gini index a sensible model selection tool on the class of auto-calibrated regression models. Finally, in Section \ref{Conclusions} we conclude. \section{Consistent scoring rules} Let $(Y,\boldsymbol{X})$ be a random tuple on a sufficiently rich probability space $(\Omega, {\cal A}, \p)$ with real-valued non-negative response $Y$ having finite mean and with covariates $\boldsymbol{X}$. Denote by ${\cal F}$ the family of potential distributions of $(Y,\boldsymbol{X})$ being supported on ${\cal Y} \times {\cal X}$. Let $F_{Y| \boldsymbol{X}}$ be the conditional distribution of $Y$, given $\boldsymbol{X}$. For any model $(Y,\boldsymbol{X})\sim F \in {\cal F}$, we consider the conditional mean functional $T$ \begin{equation*} F_{Y|\boldsymbol{X}} ~\mapsto~ T(F_{Y|\boldsymbol{X}}) = \mu^\dagger(\boldsymbol{X})= \E \left[\left.Y\right|\boldsymbol{X} \right], \end{equation*} where $\boldsymbol{X} \mapsto \mu^\dagger(\boldsymbol{X})=\E \left[\left.Y\right|\boldsymbol{X} \right]$ denotes the true regression function of the chosen model. The main task in regression modeling is to find this unknown true regression function $\mu^\dagger(\cdot)$ from i.i.d.~data $(Y_i,\boldsymbol{X}_i)$, $1\le i\le n$, having the same distribution as $(Y,\boldsymbol{X})$. \medskip Choose a scoring function $S:{\cal Y}\times \R \to \R$ giving us the score $\E\left[S\left(Y,\widehat{\mu}(\boldsymbol{X})\right)\right]$ for regression function $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$ and $(Y,\boldsymbol{X}) \sim F \in {\cal F}$. A {\it scoring rule} is obtained by selecting the argument(s) $\widehat{\mu}^\star(\cdot)$ that maximize the score over the regression functions $\widehat{\mu}(\cdot)$, subject to existence, \begin{equation}\label{scoring rule definition} \widehat{\mu}^\star(\cdot) ~\in~ \underset{\widehat{\mu}(\cdot)}{\arg\max}~ \E\left[S\left(Y,\widehat{\mu}(\boldsymbol{X})\right)\right], \end{equation} under the given model choice $(Y,\boldsymbol{X}) \sim F \in {\cal F}$. \medskip A scoring rule is called {\it consistent} on ${\cal F}$ for the conditional mean functional $T$, if for any model $(Y,\boldsymbol{X})\sim F \in {\cal F}$ with conditional distributions $F_{Y|\boldsymbol{X}}$ of $Y$, given $\boldsymbol{X}$, we have $S(Y,T(F_{Y|{\boldsymbol{X}}})) \in L^1(\p)$, and for any regression function $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$ with $S(Y,\widehat{\mu}(\boldsymbol{X})) \in L^1(\p)$ we have \begin{equation}\label{definition consistency} \E\left[S\left(Y,T(F_{Y|{\boldsymbol{X}}})\right)\right] \ge \E\left[S\left(Y,\widehat{\mu}(\boldsymbol{X})\right)\right]. \end{equation} A scoring rule is called {\it strictly consistent} on ${\cal F}$ for the conditional mean functional $T$, if it is consistent on ${\cal F}$, and if an identity in \eqref{definition consistency} holds if and only if $\widehat{\mu}(\boldsymbol{X})=T(F_{Y|\boldsymbol{X}})=\mu^\dagger(\boldsymbol{X})$, a.s. \begin{rems} \normalfont \begin{itemize} \item Strict consistency implies that the true regression function $\mu^\dagger(\cdot)$ is the unique maximizer in \eqref{scoring rule definition}, and it can be estimated by score maximization (assuming it is contained in the set over which we optimize, which we generally do). Empirically, we then consider for i.i.d.~data $(Y_i,\boldsymbol{X}_i)$, $1\le i\le n$, \begin{equation*} \underset{\widehat{\mu}(\cdot)}{\arg\max}~ \frac{1}{n}\sum_{i=1}^nS\left(Y_i,\widehat{\mu}(\boldsymbol{X}_i)\right), \end{equation*} where we still need to ensure that we can exchange the limit $n\to\infty$ and the $\arg\max$-operator to asymptotically select the true regression function $\mu^\dagger(\cdot)$ under strict consistency. \item Formula \eqref{definition consistency} states unconditional consistency as we average over the distribution of $\boldsymbol{X}$. For conditional consistency (in $\boldsymbol{X}$) and its relation to the unconditional version we refer to Section 2.2 in Dimitriadis et al.~\cite{DimitriadisFisslerZiegel2020}. A point prediction version of consistency is given in Definition 1 in Gneiting \cite{Gneiting}. \item For scoring rule \eqref{scoring rule definition} we consider a maximization. By a sign switch we can turn this into a minimization problem, and in that case we rather speak about expected loss minimization. \item Typically, we restrict \eqref{scoring rule definition}-\eqref{definition consistency} to smaller classes of regression functions $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$. In the sequel, we will require continuity for these smaller classes, and, further below, we require the auto-calibration property. This requires that the true regression function $\mu^\dagger(\cdot)$ has this continuity, auto-calibration it will satisfy automatically, see Lemma \ref{lemma convex order}, below. \end{itemize} \end{rems} \section{The Gini index in machine learning} \label{The Gini index in machine learning} In the sequel we assume $\widehat{\mu}(\boldsymbol{X})$ to have a continuous distribution $F_{\widehat{\mu}(\boldsymbol{X})}$ for all $(Y,\boldsymbol{X})\sim F \in {\cal F}$ and for any considered regression function $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$. This implies $F_{\widehat{\mu}(\boldsymbol{X})}(F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\alpha))=\alpha$ for all $\alpha \in (0,1)$, and with $F_{\widehat{\mu}(\boldsymbol{X})}^{-1}$ denoting the left-continuous generalized inverse of $F_{\widehat{\mu}(\boldsymbol{X})}$. \medskip In machine learning (ML) one considers the {\it cumulative accuracy profile} (CAP) defined by \begin{equation*} \alpha \in (0,1) \quad \mapsto \quad {\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha) ~=~ \frac{1}{\E[Y]}\, \E \left[ Y\, \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) > F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha)\right\}}\right] ~\in~ [0,1]. \end{equation*} In actuarial science, the CAP is also called concentration curve (up to sign switches), see Denuit--Trufin \cite{DenuitTrufin}. The CAP measures a rank-based correlation between the prediction $\widehat{\mu}(\boldsymbol{X})$ and the response $Y$. The {\it Gini index (Gini score, Gini ratio, Gini coefficient, accuracy ratio) in ML} is defined by \begin{equation}\label{Gini ML} G^{\rm ML}_{Y, \widehat{\mu}(\boldsymbol{X})} = \frac{\int_0^1 {\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha)\, d\alpha -1/2} {\int_0^1 {\rm CAP}_{Y,Y} (\alpha)\, d\alpha -1/2}, \end{equation} where we additionally assume that $Y$ has an (unconditional) continuous distribution $F_Y$. For a geometric interpretation see Figure \ref{Gamma Lorenz} (lhs) and formula \eqref{Gini geometry}, below. \begin{rems}\normalfont \begin{itemize} \item The denominator in \eqref{Gini ML} does not use the regression function $\widehat{\mu}(\cdot)$, i.e., it has no impact on model selection by maximizing the Gini index $G^{\rm ML}_{Y, \widehat{\mu}(\boldsymbol{X})}$ over $\widehat{\mu}(\cdot)$. Hence, for scoring we can focus on the term in the enumerator \begin{eqnarray}\nonumber \int_0^1 {\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha)\, d\alpha &=&\frac{1}{\E[Y]} \, \E \left[ Y \int_0^1 \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) > F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha)\right\}}\, d\alpha \right] \\&=&\nonumber \frac{1}{\E[Y]} \, \E \left[ Y \,\p \left[\left. F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(U)< \widehat{\mu}(\boldsymbol{X}) \right| \widehat{\mu}(\boldsymbol{X}) \right] \right] \\&=&\frac{1}{\E[Y]} \, \E \left[ Y F_{\widehat{\mu}(\boldsymbol{X})}(\widehat{\mu}(\boldsymbol{X})) \right], \end{eqnarray} for an independent $(0,1)$-uniform random variable $U$ and where we use continuity of $F_{\widehat{\mu}(\boldsymbol{X})}$. This shows that the Gini index in ML is not calibration-sensitive because $F_{\widehat{\mu}(\boldsymbol{X})}(\widehat{\mu}(\boldsymbol{X}))$ has a $(0,1)$-uniform distribution, i.e., the specific distribution of $\widehat{\mu}(\boldsymbol{X})$ does not matter, but only its correlation with $Y$ matters. \item Since typically the true data model $(Y,\boldsymbol{X}) \sim F$ is not known, the Gini index in ML \eqref{Gini ML} is replaced by an empirical version \begin{equation}\label{Gini ML empirical} \widehat{G}^{\rm ML}_{Y, \widehat{\mu}(\boldsymbol{X})} = \frac{\int_0^1 \widehat{\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha)\, d\alpha -1/2} {\int_0^1 \widehat{\rm CAP}_{Y,Y} (\alpha)\, d\alpha -1/2}~\le~1, \end{equation} where we set \begin{equation}\label{CAP empirical} \widehat{\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha) =\frac{1}{\frac{1}{n}\sum_{i=1}^n Y_i} \, \frac{1}{n} \sum_{i=1}^{n} Y_i\, \mathds{1}_{\left\{\widehat{\mu}(\boldsymbol{X}_i)> \widehat{\mu}\left(\boldsymbol{X}_{\left(\lceil (1-\alpha) n \rceil\right)}\right)\right\}}, \end{equation} for i.i.d.~data $(Y_i,\boldsymbol{X}_i)$, $1\le i\le n$, having the same distribution as $(Y,\boldsymbol{X})$, and for order statistics $\widehat{\mu}(\boldsymbol{X}_{(1)})< \widehat{\mu}(\boldsymbol{X}_{(2)}) < \ldots < \widehat{\mu}(\boldsymbol{X}_{(n)})$; note that by assumption the distribution of $\widehat{\mu}(\boldsymbol{X})$ is continuous which implies that all observations $\widehat{\mu}(\boldsymbol{X}_i)$ are mutually different for $1\le i \le n$, and we have a strict ordering in the order statistics. \item Let us further comment on \eqref{CAP empirical}. First, if we mirror the CAP at the diagonal we have \begin{eqnarray}\label{mirrored CAP definition} {\rm CAP}^-_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha) &=& \frac{1}{\E[Y]}\, \E \left[ Y\, \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) \le F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\alpha)\right\}}\right] \\&=&\nonumber 1- \frac{1}{\E[Y]}\, \E \left[ Y\, \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) > F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\alpha)\right\}}\right] ~=~1-{\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (1-\alpha). \end{eqnarray} For an empirical version of the mirrored CAP we replace the above expression by \begin{eqnarray*} \widehat{\rm CAP}^-_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha) &=& \frac{1}{\frac{1}{n}\sum_{i=1}^n Y_i}\, \frac{1}{n} \sum_{i=1}^{n} Y_i\, \mathds{1}_{\left\{\widehat{\mu}(\boldsymbol{X}_i) \le \widehat{F}_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\alpha)\right\}} \\&=& 1- \frac{1}{\frac{1}{n}\sum_{i=1}^n Y_i}\, \frac{1}{n} \sum_{i=1}^{n} Y_i\, \mathds{1}_{\left\{\widehat{\mu}(\boldsymbol{X}_i) > \widehat{\mu}(\boldsymbol{X}_{(\lceil \alpha n\rceil)})\right\}}, \end{eqnarray*} where in the last indicator we use the empirical distribution, for $m \in \R$ and $\alpha \in (0,1)$, \begin{equation*} \widehat{F}_{\widehat{\mu}(\boldsymbol{X})}(m) = \frac{1}{n}\, \sum_{i=1}^n \mathds{1}_{\{ \widehat{\mu}(\boldsymbol{X}_i) \le m \}} \qquad \text{ and } \qquad \widehat{F}^{-1}_{\widehat{\mu}(\boldsymbol{X})}(\alpha) = \widehat{\mu}(\boldsymbol{X}_{(\lceil \alpha n\rceil)}). \end{equation*} This justifies the choice in \eqref{CAP empirical}. Similarly, we have for the denominator in \eqref{Gini ML empirical} \begin{equation}\label{discrete Lorenz curve continuous} \widehat{\rm CAP}^-_{Y,Y} (\alpha)= \frac{1}{\frac{1}{n}\sum_{i=1}^n Y_i} \, \frac{1}{n} \sum_{i=1}^{n} Y_i\, \mathds{1}_{\left\{Y_i\,\le \,Y_{(\lceil \alpha n \rceil)}\right\}} ~\stackrel{(*)}{=}~ \frac{1}{\frac{1}{n}\sum_{i=1}^n Y_i} \, \frac{1}{n} \sum_{i=1}^{\lceil \alpha n \rceil} Y_{(i)}, \end{equation} for the identity $\stackrel{(*)}{=}$ to hold for any $\alpha \in (0,1)$, we need to assume that we have a strict ordering $Y_{(1)}< Y_{(2)} < \ldots <Y_{(n)}$, i.e., that there are no ties in the observations $(Y_i)_{1\le i \le n}$, which is the case because $Y$ was assumed to have a continuous distribution $F_Y$. This then motivates to set \begin{eqnarray}\nonumber \widehat{\rm CAP}_{Y,Y} (\alpha) &=& 1-\widehat{\rm CAP}^-_{Y,Y} (1-\alpha) = \frac{1}{\frac{1}{n}\sum_{i=1}^n Y_i} \, \frac{1}{n} \sum_{i=1}^{n} Y_i\, \mathds{1}_{\left\{Y_i\,> \,Y_{(\lceil (1-\alpha) n \rceil)}\right\}} \\&\stackrel{(*)}{=}& \frac{1}{\frac{1}{n}\sum_{i=1}^n Y_i} \, \frac{1}{n} \sum_{i=\lceil (1-\alpha) n \rceil+1}^n Y_{(i)} . \label{Lorenz empirical} \end{eqnarray} If we have a perfect joint ordering between $(Y_i)_{1\le i \le n}$ and $(\widehat{\mu}(\boldsymbol{X}_i))_{1\le i \le n}$, the upper bound in \eqref{Gini ML empirical} is attained, see \eqref{CAP empirical} and \eqref{Lorenz empirical}. This is the motivation for the scaling in \eqref{Gini ML}. \end{itemize} \end{rems} In the definition of the Gini index in ML \eqref{Gini ML} we have assumed that $Y$ has a continuous distribution $F_Y$. This is not the case for discrete responses $Y$. Therefore, in the discrete case we need to replace the denominator in \eqref{Gini ML} by a different object. For illustrative purposes we show the binary classification case in the next example. \begin{example}[binary classification]\normalfont We consider a binary classification example with true regression function \begin{equation*} \boldsymbol{X}~\mapsto~ p^\dagger(\boldsymbol{X})= \E \left[\left. Y \right| \boldsymbol{X} \right]= \p \left[\left. Y=1 \right| \boldsymbol{X} \right] ~\in ~(0,1). \end{equation*} That is, $Y$ is conditionally Bernoulli distributed, given $\boldsymbol{X}$, with probability $p^\dagger(\boldsymbol{X}) \in (0,1)$ and range ${\cal Y}=\{0,1\}$. In this case the CAP for a regression function $\boldsymbol{X} \mapsto \widehat{p}(\boldsymbol{X})$ with continuous distribution $F_{\widehat{p}(\boldsymbol{X})}$ is for $\alpha \in (0,1)$ given by \begin{eqnarray*} {\rm CAP}_{Y,\widehat{p}(\boldsymbol{X})} (\alpha) &=& \frac{1}{\E[Y]}\, \E \left[ Y\, \mathds{1}_{\left\{ \widehat{p}(\boldsymbol{X}) > F_{\widehat{p}(\boldsymbol{X})}^{-1}(1-\alpha)\right\}}\right] \\&=& \frac{1}{\p[Y=1]}\, \E \left[ \mathds{1}_{\left\{Y=1, \, \widehat{p}(\boldsymbol{X}) > F_{\widehat{p}(\boldsymbol{X})}^{-1}(1-\alpha)\right\}}\right] \\&=&\p \left[\left. \widehat{p}(\boldsymbol{X}) > F_{\widehat{p}(\boldsymbol{X})}^{-1}(1-\alpha)\right| Y=1\right] \\&=&1-F_{\widehat{p}(\boldsymbol{X})|Y=1}\left(F_{\widehat{p}(\boldsymbol{X})}^{-1}(1-\alpha)\right). \end{eqnarray*} This corresponds to formula (5.2) in Tasche \cite{Tasche}. For the Gini index in ML we need to calculate the denominator of \eqref{Gini ML}. However, this formula only applies for a continuous distribution $F_Y$ of $Y$. In the case of a discrete distribution of $Y$ we need to modify \eqref{Gini ML}. Starting from the right-hand side of \eqref{discrete Lorenz curve continuous}, we define the empirical function in the discrete case by \begin{equation*} \alpha\in(0,1) ~ \mapsto ~ \widehat{\rm CAP}^-_{Y,Y} (\alpha) ~=~ \frac{1}{\frac{1}{n}\sum_{i=1}^n Y_i} \, \frac{1}{n} \sum_{i=1}^{\lceil \alpha n \rceil} Y_{(i)}, \end{equation*} for i.i.d.~data $(Y_i,\boldsymbol{X}_i)$, $1\le i\le n$. In the Bernoulli case, this function is identically equal to zero up to $\alpha \le 1-\sum_{i=1}^n Y_i/n$, these describes the number of zeros among the observations $(Y_i)_{1\le i \le n}$, and afterwards it increases to 1. Since this increase is only described on the discrete grid with span $1/n$, we linearly interpolate between these points. This provides a straight line between $1-\sum_{i=1}^n Y_i/n$ and 1 with slope $n/\sum_{i=1}^n Y_i$. Under this linear interpolation, we get the area (integral) \begin{equation*} \int_0^1 \widehat{\rm CAP}^-_{Y,Y} (\alpha) \, d\alpha = \frac{1}{2n}\,\sum_{i=1}^n Y_i. \end{equation*} By the law of large numbers, the latter converges to $p^\dagger/2=\E[p^\dagger(\boldsymbol{X})]/2=\E[Y]/2$, a.s., as $n\to \infty$. This motivates in the (discrete) binary classification case the following definition of the Gini index in ML \begin{equation}\label{Gini ML Bernoulli} G^{\rm ML}_{Y, \widehat{\mu}(\boldsymbol{X})} = \frac{1/2-\int_0^1 F_{\widehat{p}(\boldsymbol{X})|Y=1}\left(F_{\widehat{p}(\boldsymbol{X})}^{-1}(1-\alpha)\right) d\alpha} {(1-p^\dagger)/2}. \end{equation} In the binary classification case, the CAP can be related to the receiver operating characteristics (ROC) curve. The area under the curve (AUC) of the ROC curve has a one-to-one relationship to the Gini index in ML \eqref{Gini ML Bernoulli} in the Bernoulli case, we refer to Section 5 in Tasche \cite{Tasche}. We mention this because the ML community more frequently uses the AUC than the Gini index for model selection. In general, in the discrete case we replace the integral in the denominator in \eqref{Gini ML} by the term \begin{equation}\label{the general denominator} \frac{1}{4\E[Y]}\, \E\left[\left|Y-\widetilde{Y}\right|\right], \end{equation} where $\widetilde{Y}$ is an independent copy of $Y$. This latter quantity \eqref{the general denominator} can be calculated for any distribution $F_Y$ of $Y$, and in the continuous case we precisely receive the denominator in \eqref{Gini ML}. The binary classification case \eqref{the general denominator} provides us with $(1-p^\dagger)/2$ which gives \eqref{Gini ML Bernoulli}. \hfill {\scriptsize $\blacksquare$} \end{example} \section{Auto-calibration and consistency of the Gini index} \label{Auto-calibration and consistency of the Gini index} Let $(Y,\boldsymbol{X}) \sim F$. A regression function $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$ is auto-calibrated for $Y$ if, a.s., \begin{equation*} \widehat{\mu}(\boldsymbol{X}) = \E \left[ Y \left|\widehat{\mu}(\boldsymbol{X}) \right]\right. . \end{equation*} Auto-calibration is an important property in insurance pricing, as it implies that every cohort of insurance policies paying the same price $\widehat{\mu}(\boldsymbol{X})$ is in average self-financing, because the price $\widehat{\mu}(\boldsymbol{X})$ exactly covers the expected claim $Y$ of that cohort. I.e., we do not have any systematic cross-financing between the price cohorts. This is the core of risk classification in insurance. It also implies unbiasedness on the portfolio level \begin{equation}\label{unbiasedness under auto-calibration} \E\left[\widehat{\mu}(\boldsymbol{X})\right] = \E \left[ Y \right], \end{equation} which is a minimal requirement in insurance pricing. Typically, there are many auto-calibrated regression functions $\widehat{\mu}(\boldsymbol{X})$ for $Y$, i.e., there are many systems of self-financing pricing cohorts. \begin{lemma} \label{lemma convex order} The true regression function $\boldsymbol{X}\mapsto\mu^\dagger(\boldsymbol{X})=\E[Y|\boldsymbol{X} ]$ is auto-calibrated for $Y$, and it strictly dominates in convex order any other auto-calibrated regression function $\boldsymbol{X}\mapsto \widehat{\mu}(\boldsymbol{X})$ for $Y$. \end{lemma} {\footnotesize {\bf Proof.} To prove auto-calibration of $\mu^\dagger$ we apply the tower property to the $\sigma$-algebras $\sigma( \mu^\dagger(\boldsymbol{X})) \subset \sigma(\boldsymbol{X})$ which gives, a.s., \begin{equation*} \E\left[Y\left| \mu^\dagger(\boldsymbol{X})\right]\right. =\E\left[\E\left[Y\left| \boldsymbol{X}\right]\right.\left| \mu^\dagger(\boldsymbol{X})\right]\right. =\E\left[\mu^\dagger(\boldsymbol{X})\left| \mu^\dagger(\boldsymbol{X})\right]\right. =\mu^\dagger(\boldsymbol{X}). \end{equation*} For any convex function $\psi$, auto-calibration, the tower property for $\sigma(\widehat{\mu}(\boldsymbol{X})) \subset \sigma(\boldsymbol{X})$ and Jensen's inequality give \begin{eqnarray*} \E \left[ \psi \left(\widehat{\mu}(\boldsymbol{X}) \right)\right] &=&\E \left[ \psi \left(\E \left[ Y \left|\widehat{\mu}(\boldsymbol{X}) \right]\right.\right)\right] ~=~\E \left[ \psi \left(\E \left[\E \left[ Y \left|\boldsymbol{X} \right]\right.\left|\widehat{\mu}(\boldsymbol{X}) \right]\right.\right)\right] ~=~\E \left[ \psi \left(\E \left[\left.\mu^\dagger(\boldsymbol{X})\right|\widehat{\mu}(\boldsymbol{X}) \right]\right)\right] \\&\le &\E \left[ \E \left[\left.\psi\left(\mu^\dagger(\boldsymbol{X})\right) \right|\widehat{\mu}(\boldsymbol{X}) \right]\right] ~=~ \E \left[ \psi\left(\mu^\dagger(\boldsymbol{X})\right) \right], \end{eqnarray*} whenever these exist. This proves that $\mu^\dagger$ dominates in convex order any other auto-calibrated regression function $\widehat{\mu}$ for $Y$. Assume that there exists an auto-calibrated regression function $\boldsymbol{X}\mapsto \widehat{\mu}(\boldsymbol{X})$ for $Y$ such that for any convex function $\psi$ we have an equality in the previous calculation, whenever these exist. This implies that $\mu^\dagger(\boldsymbol{X})$ is $\sigma(\widehat{\mu}(\boldsymbol{X}))$-measurable. Auto-calibration and the tower property for $\sigma(\widehat{\mu}(\boldsymbol{X})) \subset \sigma(\boldsymbol{X})$ then provide, a.s., \begin{equation*} \widehat{\mu}(\boldsymbol{X}) = \E \left[ Y \left|\widehat{\mu}(\boldsymbol{X}) \right]\right. = \E \left[\E \left[ Y \left|\boldsymbol{X} \right]\right. \left|\widehat{\mu}(\boldsymbol{X}) \right]\right. =\E \left[\left. \mu^\dagger(\boldsymbol{X}) \right|\widehat{\mu}(\boldsymbol{X}) \right]=\mu^\dagger(\boldsymbol{X}). \end{equation*} This proves the statement of strict convex order. \hfill {\scriptsize $\Box$}} \medskip The next proposition is a consequence of Lemma \ref{lemma convex order} and of Theorem 3.1 in Kr\"uger--Ziegel \cite{Ziegel}. \begin{prop} \label{proposition Bregman} The true regression function $\boldsymbol{X}\mapsto\mu^\dagger(\boldsymbol{X})$ forecast-dominates any auto-calibrated regression function $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$ for $Y$ meaning that \begin{equation*} \E \left[ - D_{\psi}(Y,\mu^\dagger(\boldsymbol{X}))\right] \ge \E \Big[ -D_{\psi}(Y,\widehat{\mu}(\boldsymbol{X}))\Big], \end{equation*} for any convex function $\psi$ where the above exists, and with Bregman divergence given by \begin{equation*} D_\psi(y,m) = \psi(y)-\psi(m)-\psi'(m)(y-m) ~\ge ~0, \end{equation*} for $y,m \in \R$ and $\psi'$ is a (sub-)gradient of the convex function $\psi$. \end{prop} Proposition \ref{proposition Bregman} says that every negative Bregman divergence provides a consistent scoring rule \eqref{definition consistency} for the conditional mean regression functional $T$ under auto-calibration for $Y$. This statement motivates the common practice in model selection of minimizing (out-of-sample) deviance losses, as deviance losses are special cases of Bregman divergences; see Chapters 2 and 4 in W\"uthrich--Merz \cite{WM2022}. For more information on this topic we refer to Kr\"uger--Ziegel \cite{Ziegel}, Theorem 7 in Gneiting \cite{Gneiting} and Savage \cite{Savage}, the latter two references state that Bregman divergences provide the only strictly consistent scoring functions for mean estimation. \medskip The definition of the Gini index \cite{Gini0} in economics slightly differs from the ML version \eqref{Gini ML}. Assume $F_{\widehat{\mu}(\boldsymbol{X})}$ is a continuous distribution. It is then based on the {\it Lorenz curve} \cite{Lorenz} given by \begin{equation*} \alpha \in (0,1) \quad \mapsto \quad L_{{\widehat{\mu}(\boldsymbol{X})}} \left( F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\alpha) \right) ~=~ \frac{1}{\E[\widehat{\mu}(\boldsymbol{X})]}\, \E \left[ \widehat{\mu}(\boldsymbol{X}) \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) \le F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\alpha)\right\}}\right] ~\in~ [0,1]. \end{equation*} Note that we have the property ${\rm CAP}^-_{\widehat{\mu}(\boldsymbol{X}), \widehat{\mu}(\boldsymbol{X})}= L_{{\widehat{\mu}(\boldsymbol{X})}} ( F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\alpha))$, see \eqref{mirrored CAP definition}. \medskip The {\it Gini index in economics} has many (equivalent)\footnote{For an equivalence in \eqref{Gini economics} we need that $F_{\widehat{\mu}(\boldsymbol{X})}$ is continuous, otherwise one should choose the term on the right-hand side as the definition of the Gini index in economics.} definitions, we use the following two \begin{equation}\label{Gini economics} G^{\rm eco}_{\widehat{\mu}(\boldsymbol{X})} = 1 - 2 \int_0^1 L_{{\widehat{\mu}(\boldsymbol{X})}} \left( F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\alpha)\right) d\alpha = \frac{1}{2\E[\widehat{\mu}(\boldsymbol{X})]}\, \E\Big[\Big|\widehat{\mu}(\boldsymbol{X})-\widehat{\mu}(\boldsymbol{Z})\Big|\Big], \end{equation} where $\widehat{\mu}(\boldsymbol{Z})$ is an independent copy of $\widehat{\mu}(\boldsymbol{X})$. The first definition in \eqref{Gini economics} is based on a continuous distribution $F_{\widehat{\mu}(\boldsymbol{X})}$, whereas the second one can be used for any distribution $F_{\widehat{\mu}(\boldsymbol{X})}$, we also refer to \eqref{the general denominator}. \begin{figure}[htb!] \begin{center} \begin{minipage}[t]{0.44\textwidth} \begin{center} \includegraphics[width=\textwidth]{CAP.pdf} \end{center} \end{minipage} \begin{minipage}[t]{0.44\textwidth} \begin{center} \includegraphics[width=\textwidth]{GammaLorenz1.pdf} \end{center} \end{minipage} \end{center} \vspace{-.7cm} \caption{(lhs) cumulative accuracy profile (CAP) and (rhs) Lorenz curve.} \label{Gamma Lorenz} \end{figure} There are three differences between the Gini index in ML and the one in economics, see Figure \ref{Gamma Lorenz}: (i) $G^{\rm eco}_{\widehat{\mu}(\boldsymbol{X})}$ considers a mirrored version of the curves compared to $G^{\rm ML}_{Y, \widehat{\mu}(\boldsymbol{X})}$; (ii) $G^{\rm ML}_{Y, \widehat{\mu}(\boldsymbol{X})}$ depends on $Y$ and $\widehat{\mu}(\boldsymbol{X})$, $G^{\rm eco}_{\widehat{\mu}(\boldsymbol{X})}$ only depends on $\widehat{\mu}(\boldsymbol{X})$; (iii) scalings are different leading to areas B and C, respectively, in Figure \ref{Gamma Lorenz}. The two Gini indices are geometrically obtained by, see Figure \ref{Gamma Lorenz}, \begin{equation}\label{Gini geometry} G^{\rm ML}_{Y,\widehat{\mu}(\boldsymbol{X})} = \frac{\text{area}({\rm A})}{\text{area}({\rm A}+{\rm B})} \quad \text{ and } \quad G^{\rm eco}_{\widehat{\mu}(\boldsymbol{X})} = \frac{\text{area}({\rm A})}{\text{area}({\rm A}+{\rm C})}= 2\, \text{area}({\rm A})=1-2\,\text{area}({\rm C}). \end{equation} Property 3.1 of Denuit--Trufin \cite{DenuitTrufin} gives the following nice result. \begin{prop}\label{Denuit Trufin result} Under auto-calibration of the regression function $\boldsymbol{X} \to \widehat{\mu}(\boldsymbol{X})$ for $Y$ we have the identity ${\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha)=1-L_{\widehat{\mu}(\boldsymbol{X})} ( F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha))$ for all $\alpha \in (0,1)$. \end{prop} {\footnotesize {\bf Proof.} Using the tower property, auto-calibration of $\widehat{\mu}$ for $Y$ and unbiasedness \eqref{unbiasedness under auto-calibration} give us \begin{eqnarray*} {\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha) &=& \frac{1}{\E[Y]}\, \E \left[ Y\, \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) > F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha)\right\}}\right] ~=~ \frac{1}{\E[Y]}\, \E \left[\E \left[\left. Y \right| \widehat{\mu}(\boldsymbol{X})\right]\, \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) > F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha)\right\}}\right] \\ &=& \frac{1}{\E[\widehat{\mu}(\boldsymbol{X})]}\, \E \left[ \widehat{\mu}(\boldsymbol{X})\, \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) > F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha)\right\}}\right] ~=~1-\frac{1}{\E[\widehat{\mu}(\boldsymbol{X})]}\, \E \left[ \widehat{\mu}(\boldsymbol{X}) \mathds{1}_{\left\{ \widehat{\mu}(\boldsymbol{X}) \le F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha)\right\}}\right]. \end{eqnarray*} This proves the claim. \hfill {\scriptsize $\Box$}} \medskip Thus, under auto-calibration for $Y$, the CAP and the Lorenz curve coincide (up to mirroring/sign switching). This gives us the following corollary. \begin{cor} \label{identical Gini} Under auto-calibration of the regression function $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$ for $Y$ we have for the Gini indices \begin{equation}\label{GL identity} G^{\rm ML}_{Y,\widehat{\mu}(\boldsymbol{X})}= \frac{G^{\rm eco}_{\widehat{\mu}(\boldsymbol{X})}} {2\int_0^1 {\rm CAP}_{Y,Y} (\alpha)\, d\alpha -1}. \end{equation} \end{cor} {\footnotesize {\bf Proof.} Proposition \ref{Denuit Trufin result} gives us for the Gini index in ML \begin{eqnarray*} G^{\rm ML}_{Y, \widehat{\mu}(\boldsymbol{X})} &=& \frac{\int_0^1 {\rm CAP}_{Y,\widehat{\mu}(\boldsymbol{X})} (\alpha)\, d\alpha -1/2} {\int_0^1 {\rm CAP}_{Y,Y} (\alpha)\, d\alpha -1/2} ~=~ \frac{1/2-\int_0^1 L_{\widehat{\mu}(\boldsymbol{X})} ( F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha))\, d\alpha} {\int_0^1 {\rm CAP}_{Y,Y} (\alpha)\, d\alpha -1/2} \\&=& \frac{1-2\int_0^1 L_{\widehat{\mu}(\boldsymbol{X})} ( F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(1-\alpha))\, d\alpha} {2\int_0^1 {\rm CAP}_{Y,Y} (\alpha)\, d\alpha -1} ~=~ \frac{1-2\int_0^1 L_{\widehat{\mu}(\boldsymbol{X})} ( F_{\widehat{\mu}(\boldsymbol{X})}^{-1}(\beta))\, d\beta} {2\int_0^1 {\rm CAP}_{Y,Y} (\alpha)\, d\alpha -1}, \end{eqnarray*} where the last step uses the change of variable $\alpha \mapsto \beta=1-\alpha$. This proves the claim. \hfill {\scriptsize $\Box$}} \medskip This says that under auto-calibration for the response both Gini indices (the ML score and the version in economics) provide the same scoring rule because the (positive) denominator\footnote{Note that the denominator in \eqref{GL identity} is positive for every non-deterministic $Y$. This follows from the fact that the denominator is equal to twice \eqref{the general denominator} which is positive unless $Y$ is deterministic.} in \eqref{GL identity} does not depend on the specific choice of the regression function $\widehat{\mu}(\cdot)$. Moreover, the same arguments apply to the Gini indices in non-continuous cases, e.g., in the binary classification (Bernoulli) case \eqref{Gini ML Bernoulli}. \begin{theo} \label{proposition Gini} The true regression function $\boldsymbol{X}\mapsto\mu^\dagger(\boldsymbol{X})$ maximizes the Gini index (in ML) among all auto-calibrated regression functions $\boldsymbol{X}\mapsto\widehat{\mu}(\boldsymbol{X})$ for $Y$, i.e., $G^{\rm ML}_{Y,\mu^\dagger(\boldsymbol{X})} > G^{\rm ML}_{Y,\widehat{\mu}(\boldsymbol{X})}$ unless $\widehat{\mu}(\boldsymbol{X})=\mu^\dagger(\boldsymbol{X})$, a.s. \end{theo} {\footnotesize {\bf Proof.} Conditionally, given $\widehat{\mu}(\boldsymbol{Z})$, $m \mapsto |m-\widehat{\mu}(\boldsymbol{Z})|$ is a convex function in $m \in \R$. Using formula \eqref{Gini economics}, independence between $\widehat{\mu}(\boldsymbol{X})$ and $\widehat{\mu}(\boldsymbol{Z})$ in \eqref{Gini economics} and Lemma \ref{lemma convex order} we obtain inequality, a.s., \begin{equation* \E\left[\left.\left|\widehat{\mu}(\boldsymbol{X})-\widehat{\mu}(\boldsymbol{Z})\right|\, \right| \widehat{\mu}(\boldsymbol{Z}) \right] \le \E\left[\left.\left|\mu^\dagger(\boldsymbol{X})-\widehat{\mu}(\boldsymbol{Z})\right|\, \right| \widehat{\mu}(\boldsymbol{Z}) \right], \end{equation*} where $\mu^\dagger(\boldsymbol{X})$ is independent of $\widehat{\mu}(\boldsymbol{Z})$. Using the tower property, applying the same argument to the exchanged role of $\mu^\dagger(\boldsymbol{X})$ and $\widehat{\mu}(\boldsymbol{Z})$, using unbiasedness \eqref{unbiasedness under auto-calibration} and using Corollary \ref{identical Gini} provides $G^{\rm ML}_{Y,\mu^\dagger(\boldsymbol{X})} \ge G^{\rm ML}_{Y,\widehat{\mu}(\boldsymbol{X})}$. Assume there exists an auto-calibrated regression function $\widehat{\mu}$ for $Y$ such that $G^{\rm ML}_{Y,\mu^\dagger(\boldsymbol{X})} = G^{\rm ML}_{Y,\widehat{\mu}(\boldsymbol{X})}$. Using auto-calibration of $\widehat{\mu}$ for $Y$ and the tower property, we receive for $\p$-a.e.~$\omega \in \Omega$ \begin{equation}\label{only using auto-calibration} \widehat{\mu}(\boldsymbol{X})(\omega) =\E\left[Y \left|\widehat{\mu}(\boldsymbol{X})\right]\right.(\omega) =\E\left[\E\left.\left[Y\right|\boldsymbol{X}\right] \left|\widehat{\mu}(\boldsymbol{X})\right]\right.(\omega) =\E\left[\left.\mu^\dagger(\boldsymbol{X}) \right|\widehat{\mu}(\boldsymbol{X})\right](\omega). \end{equation} Denote by $\Omega_1 \subset \Omega$ a set of full measure 1 on which \eqref{only using auto-calibration} holds. On $\Omega_1$, the predictor $\widehat{\mu}(\boldsymbol{X})$ is between the conditional essential infimum and supremum of $\mu^\dagger(\boldsymbol{X})$, given $\widehat{\mu}(\boldsymbol{X})$, because it corresponds to the conditional expectation of $\mu^\dagger(\boldsymbol{X})$, given $\widehat{\mu}(\boldsymbol{X})$. Consider the case of sample points $\omega \in \Omega_1$ where the conditional essential infimum and supremum of $\mu^\dagger(\boldsymbol{X})$, given $\widehat{\mu}(\boldsymbol{X})$, do not coincide, and denote the corresponding set of sample points by $\Omega_2 \subset \Omega_1$. On $\Omega_2$, the predictor $\widehat{\mu}(\boldsymbol{X})$ is strictly between the conditional essential infimum and supremum of $\mu^\dagger(\boldsymbol{X})$, given $\widehat{\mu}(\boldsymbol{X})$, due to the conditional expectation property \eqref{only using auto-calibration}. We have using \eqref{only using auto-calibration} and independence between $\widehat{\mu}(\boldsymbol{X})$ and $\widehat{\mu}(\boldsymbol{Z})$ \begin{eqnarray}\nonumber \E\left[\left|\widehat{\mu}(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z})\right| \right] &=& \E\left[\left|\E\left[\left.\mu^\dagger(\boldsymbol{X}) \right|\widehat{\mu}(\boldsymbol{X})\right]-\widehat{\mu}(\boldsymbol{Z})\right| \right] ~=~\E\left[ \left|\E\left[\left.\mu^\dagger(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z}) \right|\widehat{\mu}(\boldsymbol{X}),\widehat{\mu}(\boldsymbol{Z})\right]\right| \right] \\&=& \E\left[\left(\mathds{1}_{\Omega_2}+\mathds{1}_{\Omega_2^c}\right) \left|\E\left[\left.\mu^\dagger(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z}) \right|\widehat{\mu}(\boldsymbol{X}),\widehat{\mu}(\boldsymbol{Z})\right]\right| \right]. \label{this will give the proof} \end{eqnarray} We calculate the first term on the right-hand side of \eqref{this will give the proof} \begin{equation*} \E\left[\mathds{1}_{\Omega_2} \left|\E\left[\left.\mu^\dagger(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z}) \right|\widehat{\mu}(\boldsymbol{X}),\widehat{\mu}(\boldsymbol{Z})\right] \right|\,\right] = \int_{\Omega_2}\left( \int_\Omega \left|\E\left[\left.\mu^\dagger(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z}) \right|\widehat{\mu}(\boldsymbol{X}),\widehat{\mu}(\boldsymbol{Z})\right]\right| (\omega, \widetilde{\omega})\,d\p(\widetilde{\omega})\right) d\p(\omega). \end{equation*} We study the inner integral for fixed sample point $\omega \in \Omega_2$. Jensen's inequality gives us \begin{equation}\label{Omega1} \int_\Omega \left|\E\left[\left.\mu^\dagger(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z}) \right|\widehat{\mu}(\boldsymbol{X}),\widehat{\mu}(\boldsymbol{Z})\right]\right| (\omega, \widetilde{\omega})\,d\p(\widetilde{\omega}) ~<~ \int_\Omega \E\left[\left.\left|\mu^\dagger(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z})\right|\, \right|\widehat{\mu}(\boldsymbol{X}),\widehat{\mu}(\boldsymbol{Z})\right] (\omega, \widetilde{\omega})\,d\p(\widetilde{\omega}), \end{equation} where we receive a strict inequality for $\omega \in \Omega_2$ because of the following items: (1) on $\Omega_2$, $\mu^\dagger(\boldsymbol{X})$ is non-deterministic, conditionally given $\widehat{\mu}(\boldsymbol{X})$, (2) $m \mapsto |m-\widehat{\mu}(\boldsymbol{Z})|$ is a convex function, (3) $\widehat{\mu}(\boldsymbol{Z})$ has the same distribution (and support) as $\widehat{\mu}(\boldsymbol{X})$, and (4) $\widehat{\mu}(\boldsymbol{Z})$ and $(\mu^\dagger(\boldsymbol{X}),\widehat{\mu}(\boldsymbol{X}))$ are independent. Items (1)-(4) imply that on a set of positive $\p(\widetilde{\omega})$-measure we receive a strict Jensen's inequality, because on this set, $\widehat{\mu}(\boldsymbol{Z})$ is strictly within the conditional essential infimum and supremum of (the non-deterministic) $\mu^\dagger(\boldsymbol{X})$, given $\widehat{\mu}(\boldsymbol{X})$. Assume $\p[\Omega_2]>0$, i.e., strict inequality \eqref{Omega1} occurs on a set of positive measure. Applying Jensen's inequality also to the other term in \eqref{this will give the proof} we receive strict inequality \begin{equation*}\nonumber \E\left[\left|\widehat{\mu}(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z})\right| \right] ~< ~ \E\left[ \E\left[\left.\left|\mu^\dagger(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z})\right| \right|\widehat{\mu}(\boldsymbol{X}),\widehat{\mu}(\boldsymbol{Z})\right] \right] = \E\left[ \left|\mu^\dagger(\boldsymbol{X}) -\widehat{\mu}(\boldsymbol{Z})\right| \right]. \end{equation*} This strict inequality contradicts our assumption $G^{\rm ML}_{Y,\mu^\dagger(\boldsymbol{X})} = G^{\rm ML}_{Y,\widehat{\mu}(\boldsymbol{X})}$. Therefore, $\p[\Omega_2]=0$, which implies \begin{equation*} \p \left[\Omega_2^c \cap \Omega_1\right] = \p \left[ \Omega_2^c \right]=1. \end{equation*} On the set $\Omega_2^c \cap \Omega_1$, we have $\mu^\dagger(\boldsymbol{X}) =\widehat{\mu}(\boldsymbol{X})$, which proves the claim. \hfill {\scriptsize $\Box$}} \medskip Theorem \ref{proposition Gini} proves that the Gini index gives a strictly consistent scoring rule on the class of auto-calibrated regression functions that are $\boldsymbol{X}$-measurable, because the true regression function $\boldsymbol{X}\mapsto\mu^\dagger(\boldsymbol{X})$ maximizes this Gini index. A bigger Gini index can only be achieved by a larger information set than the $\sigma$-algebra generated by $\boldsymbol{X}$. \medskip The following proposition generalizes Property 5.1 of Denuit et al.~\cite{DenuitCharpentierTrufin}, which gives a method of restoring auto-calibration for a general regression function $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$. \begin{prop}Consider a regression function $\boldsymbol{X} \mapsto \widehat{\mu}(\boldsymbol{X})$. The following regression function is auto-calibrated for $Y$ \begin{equation*} \boldsymbol{X}~\mapsto~ \widehat{\mu}^{\rm (auto)}(\boldsymbol{X})= \E \left[Y \left| \widehat{\mu}(\boldsymbol{X}) \right]\right. . \end{equation*} \end{prop} {\footnotesize {\bf Proof.} Note that $\widehat{\mu}^{\rm (auto)}(\boldsymbol{X})$ is $\sigma(\widehat{\mu}(\boldsymbol{X}))$-measurable. This implies $\sigma(\widehat{\mu}^{\rm (auto)}(\boldsymbol{X})) \subset \sigma(\widehat{\mu}(\boldsymbol{X}))$. Henceforth, using the tower property, a.s., \begin{equation*} \E \left[Y \left| \widehat{\mu}^{\rm (auto)}(\boldsymbol{X}) \right]\right. = \E \left[\E \left[Y \left| \widehat{\mu}(\boldsymbol{X}) \right]\right. \left| \widehat{\mu}^{\rm (auto)}(\boldsymbol{X}) \right]\right. = \E \left[\widehat{\mu}^{\rm (auto)}(\boldsymbol{X}) \left| \widehat{\mu}^{\rm (auto)}(\boldsymbol{X}) \right]\right. =\widehat{\mu}^{\rm (auto)}(\boldsymbol{X}). \end{equation*} This completes the proof. \hfill {\scriptsize $\Box$}} \section{Conclusions} \label{Conclusions} In general, one should not use the Gini index for model selection because it does not give a strictly consistent scoring rule and, thus, may lead to wrong decisions. We have shown in Theorem \ref{proposition Gini} that if we restrict Gini index scoring to the class of auto-calibrated regression functions for the given response, the Gini index allows for strictly consistent scoring. This also translates to the binary classification case where the (machine learning version of the) Gini index has an equivalent formulation in terms of the area under the curve (AUC) of the receiver operating characteristics (ROC) curve, we refer to Tasche \cite{Tasche}. We only need to ensure that the binary classification model is auto-calibrated for the Bernoulli response to receive a strictly consistent scoring rule from the AUC. \bigskip {\small \renewcommand{\baselinestretch}{.51}
{ "redpajama_set_name": "RedPajamaArXiv" }
92
La musique limousine est la musique traditionnelle du territoire correspondant au moins à la province historique française du Limousin, et davantage l'ancienne région administrative du même nom, voire encore plus justement l'aire culturelle fondée sur la zone traditionnelle de locution de l'occitan limousin. Suivant cette dernière acception, ce territoire correspond approximativement aux trois départements de la Corrèze, de la Creuse et de la Haute-Vienne, ainsi qu'à des marges de quelques départements voisins, dans le sud-ouest de la France. La musique limousine est une des variantes locales de la musique occitane, aux côtés de la musique auvergnate par exemple. Elle présente aussi des caractéristiques voisines des musiques de tradition d'oïl du Berry ou du Poitou. Au-delà, plusieurs spécificités instrumentales (la chabrette), rythmiques, socio-culturelles (comme le rôle des migrations économiques récurrentes dans la fabrique des traditions) ou liées au répertoire chanté justifient que l'on parle de traditions proprement limousines. Cependant, le territoire recèle des pratiques parfois très localisées et donc très variées. Françoise Étay parle ainsi de la région comme d'une « mosaïque à grands carreaux » ; par exemple, certaines traditions de l'est du territoire (Combrailles, Xaintrie) présenteront d'importantes similitudes avec les traditions de pays auvergnats tels l'Artense, le Mauriacois ou le Bourbonnais. Le terme de musique traditionnelle limousine renvoie aux traditions populaires recueillies aux , mais aussi par extension aux pratiques d'interprétation ou de ré-interprétation et de recréation observées dès lors et aujourd'hui encore, revendiquant une filiation avec les traditions précitées. Les traditions musicales limousines sont indissociables de traditions de danses bien identifiées et documentées, dont la bourrée et la sautière sont les plus spécifiques. Histoire Quelles origines ? Il est difficile de relier les traditions populaires limousines, de transmission orale, à des pratiques pluriséculaires. On peut affirmer qu'elles sont issues des pratiques de communautés essentiellement rurales, comme c'est le cas d'un grand nombre de traditions musicales occidentales, identifiées au début de l'époque contemporaine. Elles ont aussi été largement façonnées par les mouvements migratoires qui ont apporté leur lot d'influences extérieures, a fortiori urbaines. À partir du , les sociétés citadines composées de nombreux ruraux expatriés ont contribué à enrichir et renouveler le répertoire rural en intégrant des danses importés de l'étranger (mazurka, polka), ou en adaptant leurs propres instruments (c'est le cas de la cabrette inventée par les Auvergnats de Paris). Les traditions de cornemuses doivent probablement en partie à la pratique de la musette de cour. En retour, les musiques populaires ont aussi inspiré la musique savante (Camille Saint-Saëns et sa Rhapsodie d'Auvergne par exemple). Premiers collectages, premières re-créations Les premiers collectages sont écrits : dans les années 1850, plusieurs informateurs corréziens contribuent au Recueil des poésies populaires de la France d'Hippolyte Fortoul ; certains comme Oscar Lacombe collectent même en autonomie. George Sand (Les Maîtres sonneurs, Le marquis de Villemer) ou Alfred Assollant évoquent les pratiques de musique populaire du centre de la France dans leur œuvre littéraire. Les peintres représentent peu les traditions musicales : Paysans limousins de Philippe-Auguste Jeanron mentionne de façon très certainement abusive une scène limousine ; le paysage comme l'allure des personnages ne paraissent pouvoir attester cette hypothèse. Le est aussi marqué par un essor des pratiques facilité par la plus grande accessibilité des instruments : l'abolition des privilèges et des corporations permet à un nombre croissant de personnes d'acquérir des instruments et donc de diffuser les airs. Les chorales et ensembles instrumentaux développés par les Limousins de Paris à partir des années 1890 (comme la chorale de l'Orphéon limousin de Jean Clément) et les bals organisés par cette « colonie » (notamment par la Ruche corrézienne) contribuent aussi à la popularisation des airs du pays, collectés par quelques figures comme le peintre saint-juniaud Jean Teilliet. L'accordéon investit le répertoire régional. Dans le même temps, de nombreux musiciens et poètes (François Sarre ou Jean Rebier par exemple) commencent à renouveler le répertoire en signant des compositions inédites. Les musiciens sont influencés par le mouvement félibréen : bien souvent les airs sont doublés de paroles en occitan limousin. Certaines de ces créations intègrent rapidement un corpus perçu encore aujourd'hui comme traditionnel. Les groupes folkloriques qui naissent au début du (L'Eicola dau barbichet, L'Eicola de la Brianço) présentent dans des « reconstitutions » en costumes spectaculaires des airs traditionnels et des compositions. Une partie du mouvement folkloriste, qui prétend pratiquer une tradition immuable et authentique, se compromet pendant l'Occupation. Mais une autre partie demeure populaire même après-guerre, portée par les succès musette tels Bruyères corréziennes (Jean Ségurel). Au , les collectages deviennent sonores. Pour ses Archives de la parole, Ferdinand Brunot prospecte en 1913 en Corrèze, et y enregistre plusieurs airs chantés. Dans les années 1930 à 1960, plusieurs enregistrements commerciaux (ceux de Martin Cayla notamment) permettent aussi de fixer et populariser une partie du répertoire ; l'essor du bal musette y contribue entre autres par la voix de Jacques Mario, auteur compositeur, l'un des chanteurs de l'accordéoniste corrézien Jean Ségurel, qui lui-même a appris d'oreille dans sa jeunesse. Dans les années 1950 et 1960, la radio locale (Robert Dagnas pour son émission Chez nous sur Radio France Limoges ou Antoinette Cougnoux et Jean Ségurel pour Le Limousin sur les ondes par exemple) joue un rôle dans la sauvegarde de chants et airs traditionnels. Les enquêtes du Musée national des Arts et Traditions populaires laissent peu de traces mais constituent un autre exemple des collectages à cette époque. Le renouveau folk Les années 1970 et 1980 sont marquées en France par la vague du « renouveau folk », en partie stimulée par les contestations populaires des années 1960 qui aux États-Unis notamment mettent en lumière les cultures afro-américaines. En France, et notamment en Limousin, ce sont avant tout des anglo-saxons qui mettent en évidence la richesse du patrimoine musical hexagonal : Hugh Shields publie un disque French folk songs from Correze, John Wright entame de premiers collectages dans la région, au côté d'autres musiciens, originaires ou non du Limousin, engagés dans les premiers folk-clubs du pays (Le Bourdon à Paris et surtout La Chanterelle à Lyon). Le musicien folk Pete Seeger invite même les « jeunes gens » qui s'intéressent aux traditions nord-américaines à ne pas se laisser « coca-colaniser » et donc à étudier leurs propres traditions. Les collecteurs du mouvement revivaliste, qui s'érigent tant contre les codes de la musique savante, ceux d'une ethnomusicologie parfois perçue comme réactionnaire et prédatrice et ceux des groupes folkloriques, ont mis un point d'honneur à concilier collecte, apprentissage, jeu et transmission de ce patrimoine musical. Parmi leurs idéaux figure la volonté de valoriser les cultures minoritaires comme créatrices de nouvelles sociabilités. Cet intérêt pour le milieu rural est assez novateur, alors que les traditions paysannes demeurent dans certaines consciences associées au ruralisme idéologique du régime de Vichy. Quelques collectages sont également réalisés en milieu urbain, notamment auprès des témoins du quartier populaire des Ponts à Limoges. Ces nombreux collectages, réalisés pour la plupart à partir du début des années 1970 et jusque dans les années 1990, ont permis dans l'urgence de restituer et de conserver des airs des joueurs qui disparaissent pour les derniers au seuil des années 2000. Parmi ces musiciens collectés, figurent notamment les « vielleux » et « violoneux » de la montagne limousine, la vielle à roue (en Creuse) et le violon (sur les hauteurs du plateau de Millevaches) étant deux instruments emblématiques du patrimoine musical régional. À ces deux pratiques instrumentales, s'ajoutent l'accordéon diatonique et la chabrette (cornemuse limousine, sauvée de l'oubli et érigée en emblème régional) ; ces quatre instruments sont ceux qui ont le plus laissé de traces musicales au . La chabrette est néanmoins le seul instrument qui soit réellement spécifique au Limousin, et dont seulement trois musiciens ont pu être enregistrés. Institutionnalisation La pratique contemporaine des musiques de tradition a été facilitée et entretenue par la création, en 1971, à Limoges, de l'Association des Ménétriers du Massif Central, une des premières associations nationales de formation mutualisée en musique et en danses traditionnelles, puis de l'association des Musiciens routiniers et plus tard encore, en 1987, du premier département de « musiques et danses traditionnelles » en France, au sein du Conservatoire à rayonnement régional de Limoges (à l'époque conservatoire national de région). Y sont depuis et encore en enseignés la pratique des danses et ensembles musicaux traditionnels (et pas folkloriques) et cinq instruments : la vielle à roue, la cornemuse du Centre, le violon, l'accordéon diatonique et la chabrette limousine. Première nationale, le Conservatoire a également mis en place en 1998 un diplôme d'études musicales spécialité musique traditionnelle, qui prépare en deux ans les candidats aux épreuves de culture musicale et de pratique instrumentale et collective, et dont un nombre important de lauréats évolue depuis dans le milieu musical professionnel. Répertoire traditionnel Instruments La tradition de violon corrézien et la pratique originale de la chabrette constituent selon Françoise Étay deux des principales singularités musicales du Limousin. Petite cornemuse très ornée, la chabrette est considérée comme héritière d'une cornemuse du Poitou jouée à la Renaissance, qui elle-même fut pratiquée à la Cour avant de gagner les campagnes du Centre-Ouest de la France et de perdurer plus longtemps en Limousin. Sa facture a été arrêtée au cours du , avant que quelques passionnés ne redécouvrent le corpus instrument d'instruments et n'entreprennent d'en relancer le jeu et la fabrication. La tradition de vielle à roue est historiquement particulièrement présente en Creuse, en raison de la proximité du centre de production d'instruments de Jenzat, dans l'Allier. Elle s'est particulièrement développée au , à l'époque où cet instrument était perçu comme bucolique et donc archétypal d'une tradition populaire et authentique. Importé en France avec les migrations économiques de la Révolution industrielle, notamment en provenance d'Italie, l'accordéon diatonique prospère rapidement par la richesse harmonique et rythmique qu'il permet d'introduire dans les pratiques musicales. Il bénéficie spécifiquement en Limousin du développement de deux fabriques, Dedenis à Brive-la-Gaillarde, puis Maugein à Tulle. Maugein est toujours en activité en ; il s'agit de la dernière entreprise de fabrication d'accordéons en France. La pratique du violon, très énergique, semble avoir été très vivace dans la Montagne limousine, où la majorité des collectages ont été réalisés. Facture d'instruments Initiés ou inspirés par une facture pluriséculaire, quelques luthiers se sont engagés dans un travail d'étude et de fabrication d'instruments, souvent destinés à leur propre pratique musicale. Il peut s'agir d'instruments fidèles à la tradition, ou cherchant à la réinterpréter comme dans le cas de la « vielle géante » à moteur électrique de Philippe Destrem et Jean-Michel Ponty et de la lutherie originale du premier cité. Chanson Les chansons collectées sont très largement marquées par un contexte paysan et rural en voie de disparition depuis le milieu du . Elles décrivent avant tout les temps forts de la vie paysanne : mariages, départs à la guerre, fêtes populaires, migrations économiques saisonnières comme celles des maçons de la Creuse à La Valse Limousine décrivant la campagne limousine. Françoise Étay a aussi mis en évidence une tradition de chant rebelle et engagé, sinon moqueur, dans une région marquée par les épisodes de résistance et les révoltes populaires. Depuis la fin du , plusieurs générations de poètes et compositeurs (Joseph Mazabraud, François Célor, Lucien Lansade, Jacques Mario, Clody Musette, François Sarre, Jan dau Melhau, Didier Garlaschi, Bernard Comby...) ont aussi enrichi et renouvelé un répertoire de chansons qui par leur langue, leur évocation du territoire, leur inspiration esthétique et instrumentale, à la fois puisent dans une tradition (et les collectages) et contribuent à la renouveler. Répertoire de danses Les danses propres au territoire sont la sautière (pratiquée dans un territoire partagé entre le sud de la Haute-Vienne, l'ouest de la Corrèze et l'est de la Dordogne) et la bourrée, dont la pratique diffère de la bourrée auvergnate, peut-être plus connue (peuvent aussi être évoquées les giattes, bourrées de Combrailles, ou encore la « bourrée des Monédières »). Comme dans de nombreuses autres régions françaises, les autres danses pratiquées dans les bals sont, parmi les plus communes, la valse, la scottish, la polka et la mazurka, dont il existe un répertoire spécifiquement limousin, ou les bourrées à deux temps. Pratiques actuelles Lieux et structures De nombreux accordéonistes bien connus du Limousin, le département de musique traditionnelle du Conservatoire de Limoges (CRR), le Conservatoire à rayonnement départemental Émile-Goué de la Creuse, le Conservatoire de Tulle, le Centre régional des musiques traditionnelles ou encore quelques associations (Roule… et ferme derrière, Musiqu'à deux, Et la moitié !, Valsaviris...) organisent tout au long de l'année des festivals (Rencontres musicales de Nedde, Balaviris, Semaine occitane), des stages, des concerts et des bals dans toute la région. Le CRMTL, l'Institut d'études occitanes et le département du CRR sont engagés sur des projets de recherche et de valorisation du patrimoine musical. La musique limousine se transmet et se représente via ces événements festifs réguliers, des ouvrages et une discographie, en Limousin et ailleurs. Coexistent des praticiens de musique et danse folkloriques (attachés à la reproduction et la conservation de traditions attachées à une époque) et leurs homologues traditionnels, inspirés par les traditions en ce qu'elles constituent une culture vivante et évolutive. Musiciens et groupes Un certain nombre de groupes de musiciens traditionnels ou folkloriques contribuent à la popularisation et à la médiatisation des airs et danses du Limousin. Musiciens traditionnels : Musiciens folkloriques Métissage et transmissions Réappropriées, interprétées par des musiciens qui à partir des années 1970 n'ont plus à voir avec le contexte rural et paysan des origines, exode rural et déprise agricole obligent, les musiques traditionnelles subissent donc un processus d'hybridation et de globalisation qui les enrichissent et les remodèlent. Ce processus se confirme au début du avec l'émergence d'une génération qui n'a pas pratiqué les collectages, dans des pratiques relevant souvent plus ou moins de tendances appelées néo-trad, en majorité moins militantes, porteuses de recréations autant appréciées que débattues. Divers projets de création participent aussi à inscrire les musiques de tradition dans la modernité et le renouvellement. Plusieurs groupes, dont certains membres sont issus des filières diplômantes du Limousin, contribuent aussi à la diffusion et à la vivacité des traditions d'inspiration limousine. À l'instar de ce qu'on observe ailleurs en France, de nombreux répertoires de musiques et danses d'origines géographiques diverses (Berry, Quercy, Poitou, Bretagne, Pyrénées...) irriguent la pratique limousine, via les bals et les échanges musicaux. Le département de musique traditionnelle de Limoges a également ouvert un cours de cornemuse bulgare. À l'inverse, le principe des groupes folkloriques demeure la reproduction spectaculaire des danses et airs tels qu'ils ont pu être interprétés à une époque donnée, dans un contexte socio-culturel défini. Réunis en une fédération Marche-Limousin, ces groupes sont eux aussi confrontés au défi de la pérennité de leurs activités. Le dialogue entre musiciens folkloriques et traditionnels est aléatoire : bien des seconds ont appris auprès des premiers, mais leurs modes et lieux d'expression quoique parents demeurent différents, peinant peut-être à leur lisibilité respective. En dépit de nombreux espaces de transmission et de la création du DEM spécialité musique traditionnelle au Conservatoire de Limoges, la pratique de certaines traditions parmi les plus spécifiques (notamment les danses de bourrée) n'en demeure pas moins en péril relatif. La numérisation du matériau collecté (portail La Biaça de l'Institut d'études occitanes du Limousin, portail Patrimoine oral du Massif central) peut contribuer à sa sauvegarde, sa pérennisation et se retransmission. Notes et références Notes Références Voir aussi Bibliographie . . . . Françoise Étay, « La pédagogie des musiques traditionnelles françaises à l'épreuve de l'ouverture », intervention au symposium Musiques de tradition orale et éducation interculturelle, à la Cité de la musique de Paris, 3 et . . . . . . Articles connexes Musique traditionnelle, Musique régionale, Musique occitane Culture du Limousin, Conte limousin Violoneux Liens externes . . . . « Musiques et danses traditionnelles », Le magazine du plateau n°81, Télé Millevaches, . . . Limousin Limousin Musique en Corrèze Musique dans la Creuse Musique dans la Haute-Vienne
{ "redpajama_set_name": "RedPajamaWikipedia" }
7,338
<?xml version="1.0" encoding="utf-8"?> <reflection> <assemblies> <assembly name="PresentationCore"> <assemblydata version="4.0.0.0" culture="" key="0024000004800000940000000602000000240000525341310004000001000100B5FC90E7027F67871E773A8FDE8938C81DD402BA65B9201D60593E96C492651E889CC13F1415EBB53FAC1131AE0BD333C5EE6021672D9718EA31A8AEBD0DA0072F25D87DBA6FC90FFD598ED4DA35E44C398C454307E8E33B8426143DAEC9F596836F97C8F74750E5975C64E2189F45DEF46B2A2B1247ADC3652BF5C308055DA9" hash="SHA1" /> <attributes> <attribute> <type api="T:System.Reflection.AssemblyKeyFileAttribute" ref="true" /> <argument> <type api="T:System.String" ref="true" /> <value>f:\dd\wpf\src\windows.snk</value> </argument> </attribute> <attribute> <type api="T:System.Reflection.AssemblyDelaySignAttribute" ref="true" /> <argument> <type api="T:System.Boolean" ref="false" /> <value>True</value> </argument> </attribute> <attribute> <type api="T:System.Resources.NeutralResourcesLanguageAttribute" ref="true" /> <argument> <type api="T:System.String" ref="true" /> <value>en-US</value> </argument> </attribute> <attribute> <type api="T:System.Resources.SatelliteContractVersionAttribute" ref="true" /> <argument> <type api="T:System.String" ref="true" /> <value>4.0.0.0</value> </argument> </attribute> <attribute> <type api="T:System.Reflection.AssemblyInformationalVersionAttribute" ref="true" /> <argument> <type api="T:System.String" ref="true" /> <value>4.0.30319.0</value> </argument> </attribute> <attribute> <type api="T:System.Reflection.AssemblyFileVersionAttribute" ref="true" /> <argument> <type api="T:System.String" ref="true" /> <value>4.6.81.0</value> </argument> </attribute> <attribute> <type api="T:System.Reflection.AssemblyCopyrightAttribute" ref="true" /> <argument> <type api="T:System.String" ref="true" /> <value>© Microsoft Corporation. All rights reserved.</value> </argument> </attribute> <attribute> <type api="T:System.Reflection.AssemblyProductAttribute" ref="true" /> <argument> <type api="T:System.String" ref="true" /> <value>Microsoft® .NET Framework</value> </argument> </attribute> <attribute> <type api="T:System.Reflection.AssemblyCompanyAttribute" ref="true" /> <argument> <type api="T:System.String" ref="true" /> <value>Microsoft Corporation</value> </argument> </attribute> <attribute> <type api="T:System.Runtime.InteropServices.ComVisibleAttribute" ref="true" /> <argument> <type api="T:System.Boolean" ref="false" /> <value>False</value> </argument> </attribute> <attribute> <type api="T:System.Reflection.AssemblyDescriptionAttribute" ref="true" /> <argument> <type api="T:System.String" ref="true" /> <value>PresentationCore.dll</value> </argument> </attribute> <attribute> <type api="T:System.Reflection.AssemblyTitleAttribute" ref="true" /> <argument> <type api="T:System.String" ref="true" /> <value>PresentationCore.dll</value> </argument> </attribute> <attribute> <type api="T:System.Security.SecurityCriticalAttribute" ref="true" /> </attribute> <attribute> <type api="T:System.Security.AllowPartiallyTrustedCallersAttribute" ref="true" /> </attribute> <attribute> <type api="T:System.CLSCompliantAttribute" ref="true" /> <argument> <type api="T:System.Boolean" ref="false" /> <value>True</value> </argument> </attribute> <attribute> <type api="T:System.Reflection.AssemblyDefaultAliasAttribute" ref="true" /> <argument> <type api="T:System.String" ref="true" /> <value>PresentationCore.dll</value> </argument> </attribute> </attributes> </assembly> </assemblies> <apis> <api id="N:System.Windows.Media.Media3D.Converters"> <topicdata group="api" /> <apidata name="System.Windows.Media.Media3D.Converters" group="namespace" /> <elements> <element api="T:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer" /> <element api="T:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer" /> <element api="T:System.Windows.Media.Media3D.Converters.Point3DValueSerializer" /> <element api="T:System.Windows.Media.Media3D.Converters.Point4DValueSerializer" /> <element api="T:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer" /> <element api="T:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer" /> <element api="T:System.Windows.Media.Media3D.Converters.Size3DValueSerializer" /> <element api="T:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer" /> <element api="T:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer" /> </elements> <file name="e5339c6b-0f7f-d6fa-d8bd-b6e98e0ea90b" /> </api> <api id="T:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer"> <topicdata group="api" allMembersTopicId="AllMembers.T:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer" /> <apidata name="Matrix3DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer.#ctor" /> <family> <ancestors> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> <type api="T:System.Object" ref="true" /> </ancestors> </family> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> </containers> <file name="824cd8e2-f64c-3015-4bac-b85a451d6372" /> </api> <api id="AllMembers.T:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer"> <topicdata name="Matrix3DValueSerializer" group="list" subgroup="members" typeTopicId="T:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer" /> <apidata name="Matrix3DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer.#ctor" /> <element api="M:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer" /> </containers> <file name="ffb72c62-5ebd-25a6-c10b-86f30b28c96f" /> </api> <api id="Methods.T:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer"> <topicdata name="Matrix3DValueSerializer" group="list" subgroup="Methods" typeTopicId="T:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer" /> <apidata name="Matrix3DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer" /> </containers> <file name="c7f208c0-ece4-e60c-a336-04c1f83f3bd9" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer.#ctor"> <topicdata group="api" /> <apidata name=".ctor" group="member" subgroup="constructor" /> <memberdata visibility="public" special="true" /> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer" ref="true" /> </containers> <file name="98c42a09-ae7b-a368-073b-091d3afb72a2" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer" ref="true" /> </containers> <file name="a6d84e14-e7af-3ea8-834b-7750f2315895" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer" ref="true" /> </containers> <file name="f76eebbf-ae7e-57e1-34ed-83edbb6356b3" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Object" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer" ref="true" /> </containers> <file name="cf0f8efa-0a46-0858-57a8-e218dc46c1cd" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.String" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Matrix3DValueSerializer" ref="true" /> </containers> <file name="ee2c7a4b-884c-c72c-230f-706d2b1c377c" /> </api> <api id="T:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer"> <topicdata group="api" allMembersTopicId="AllMembers.T:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer" /> <apidata name="Point3DCollectionValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer.#ctor" /> <family> <ancestors> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> <type api="T:System.Object" ref="true" /> </ancestors> </family> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> </containers> <file name="77e94b1b-f82e-8d70-dee4-aa6781298dc7" /> </api> <api id="AllMembers.T:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer"> <topicdata name="Point3DCollectionValueSerializer" group="list" subgroup="members" typeTopicId="T:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer" /> <apidata name="Point3DCollectionValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer.#ctor" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer" /> </containers> <file name="7f7cdcf9-b4b4-c666-457f-706d03246c11" /> </api> <api id="Methods.T:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer"> <topicdata name="Point3DCollectionValueSerializer" group="list" subgroup="Methods" typeTopicId="T:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer" /> <apidata name="Point3DCollectionValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer" /> </containers> <file name="b175845a-c7fa-4c03-05cd-c5d8176bc279" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer.#ctor"> <topicdata group="api" /> <apidata name=".ctor" group="member" subgroup="constructor" /> <memberdata visibility="public" special="true" /> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer" ref="true" /> </containers> <file name="d9ad81d6-74df-ed4b-a198-ab2af573b197" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer" ref="true" /> </containers> <file name="a58c312f-cdc5-0645-4c7e-50b9d392531c" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer" ref="true" /> </containers> <file name="a894e14a-3ec0-6f36-3053-143420742a2a" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Object" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer" ref="true" /> </containers> <file name="6e82562d-7e0d-ab7a-22ac-d997466062d6" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.String" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point3DCollectionValueSerializer" ref="true" /> </containers> <file name="adc1b2a0-8c9e-edca-71e8-c1203a7257b0" /> </api> <api id="T:System.Windows.Media.Media3D.Converters.Point3DValueSerializer"> <topicdata group="api" allMembersTopicId="AllMembers.T:System.Windows.Media.Media3D.Converters.Point3DValueSerializer" /> <apidata name="Point3DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Point3DValueSerializer.#ctor" /> <family> <ancestors> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> <type api="T:System.Object" ref="true" /> </ancestors> </family> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> </containers> <file name="31859039-bb14-f872-32e2-a432a0eb3750" /> </api> <api id="AllMembers.T:System.Windows.Media.Media3D.Converters.Point3DValueSerializer"> <topicdata name="Point3DValueSerializer" group="list" subgroup="members" typeTopicId="T:System.Windows.Media.Media3D.Converters.Point3DValueSerializer" /> <apidata name="Point3DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Point3DValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DValueSerializer.#ctor" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point3DValueSerializer" /> </containers> <file name="bcac00a1-2d06-e558-7016-c0c9e37d6974" /> </api> <api id="Methods.T:System.Windows.Media.Media3D.Converters.Point3DValueSerializer"> <topicdata name="Point3DValueSerializer" group="list" subgroup="Methods" typeTopicId="T:System.Windows.Media.Media3D.Converters.Point3DValueSerializer" /> <apidata name="Point3DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Point3DValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point3DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point3DValueSerializer" /> </containers> <file name="5e929f1a-1162-29c3-4fe0-b014c50e586b" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Point3DValueSerializer.#ctor"> <topicdata group="api" /> <apidata name=".ctor" group="member" subgroup="constructor" /> <memberdata visibility="public" special="true" /> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point3DValueSerializer" ref="true" /> </containers> <file name="dde74cb8-5443-2987-ef41-d540ab6967f9" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Point3DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point3DValueSerializer" ref="true" /> </containers> <file name="33454445-0a76-0069-397f-f800f6081cb8" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Point3DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point3DValueSerializer" ref="true" /> </containers> <file name="5b9760de-d0c1-548c-706f-24ba6a7c9eee" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Point3DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Object" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point3DValueSerializer" ref="true" /> </containers> <file name="b32a6fd7-a42f-d31a-ee9e-ff90c008fcac" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Point3DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.String" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point3DValueSerializer" ref="true" /> </containers> <file name="73238f1e-bc6c-8b4b-19f9-5fb5b551ec59" /> </api> <api id="T:System.Windows.Media.Media3D.Converters.Point4DValueSerializer"> <topicdata group="api" allMembersTopicId="AllMembers.T:System.Windows.Media.Media3D.Converters.Point4DValueSerializer" /> <apidata name="Point4DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Point4DValueSerializer.#ctor" /> <family> <ancestors> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> <type api="T:System.Object" ref="true" /> </ancestors> </family> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> </containers> <file name="b9735e7b-a13c-34f2-b440-5f1b1a975203" /> </api> <api id="AllMembers.T:System.Windows.Media.Media3D.Converters.Point4DValueSerializer"> <topicdata name="Point4DValueSerializer" group="list" subgroup="members" typeTopicId="T:System.Windows.Media.Media3D.Converters.Point4DValueSerializer" /> <apidata name="Point4DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Point4DValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point4DValueSerializer.#ctor" /> <element api="M:System.Windows.Media.Media3D.Converters.Point4DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point4DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point4DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point4DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point4DValueSerializer" /> </containers> <file name="0f92e82f-0390-4e3c-81f8-8fe717c3cf1e" /> </api> <api id="Methods.T:System.Windows.Media.Media3D.Converters.Point4DValueSerializer"> <topicdata name="Point4DValueSerializer" group="list" subgroup="Methods" typeTopicId="T:System.Windows.Media.Media3D.Converters.Point4DValueSerializer" /> <apidata name="Point4DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Point4DValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point4DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point4DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point4DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Point4DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point4DValueSerializer" /> </containers> <file name="3574595c-430f-f49f-431e-cfe2271b773f" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Point4DValueSerializer.#ctor"> <topicdata group="api" /> <apidata name=".ctor" group="member" subgroup="constructor" /> <memberdata visibility="public" special="true" /> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point4DValueSerializer" ref="true" /> </containers> <file name="31d14f1c-1413-90b1-a868-42383e2e7742" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Point4DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point4DValueSerializer" ref="true" /> </containers> <file name="8a73060f-5bb6-d89f-8439-f603cc3a5b67" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Point4DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point4DValueSerializer" ref="true" /> </containers> <file name="c5d6c0c8-d43f-2d56-97a6-851f238632d3" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Point4DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Object" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point4DValueSerializer" ref="true" /> </containers> <file name="4b665c3c-ad78-5894-db2a-d61f2cc20093" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Point4DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.String" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Point4DValueSerializer" ref="true" /> </containers> <file name="96f9450d-ec69-0db1-3073-3d9d2aff684c" /> </api> <api id="T:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer"> <topicdata group="api" allMembersTopicId="AllMembers.T:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer" /> <apidata name="QuaternionValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer.#ctor" /> <family> <ancestors> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> <type api="T:System.Object" ref="true" /> </ancestors> </family> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> </containers> <file name="ce51c3a7-1973-ebce-b3ac-8f8e3d7c24b5" /> </api> <api id="AllMembers.T:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer"> <topicdata name="QuaternionValueSerializer" group="list" subgroup="members" typeTopicId="T:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer" /> <apidata name="QuaternionValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer.#ctor" /> <element api="M:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer" /> </containers> <file name="3a04f237-7e19-5699-3091-b01a198b5af1" /> </api> <api id="Methods.T:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer"> <topicdata name="QuaternionValueSerializer" group="list" subgroup="Methods" typeTopicId="T:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer" /> <apidata name="QuaternionValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer" /> </containers> <file name="041d45a6-02a0-a64c-2410-8d2fc7a0097e" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer.#ctor"> <topicdata group="api" /> <apidata name=".ctor" group="member" subgroup="constructor" /> <memberdata visibility="public" special="true" /> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer" ref="true" /> </containers> <file name="437cd768-3542-ecd2-6b8b-374556063c98" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer" ref="true" /> </containers> <file name="95f227d6-058f-413d-a4a3-e07b8c7ed130" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer" ref="true" /> </containers> <file name="91f24bf9-85b4-d7bf-b716-d950b9d112ac" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Object" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer" ref="true" /> </containers> <file name="b9b4d8a8-0361-b488-f7b5-dec4ac5ded41" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.String" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.QuaternionValueSerializer" ref="true" /> </containers> <file name="02547238-79fd-6a48-10fc-1073060c35f2" /> </api> <api id="T:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer"> <topicdata group="api" allMembersTopicId="AllMembers.T:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer" /> <apidata name="Rect3DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer.#ctor" /> <family> <ancestors> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> <type api="T:System.Object" ref="true" /> </ancestors> </family> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> </containers> <file name="207078fc-34bc-db44-50f0-187b4925ab3d" /> </api> <api id="AllMembers.T:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer"> <topicdata name="Rect3DValueSerializer" group="list" subgroup="members" typeTopicId="T:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer" /> <apidata name="Rect3DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer.#ctor" /> <element api="M:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer" /> </containers> <file name="b4d8b3a3-6e70-e3b5-5a78-38558c60ad66" /> </api> <api id="Methods.T:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer"> <topicdata name="Rect3DValueSerializer" group="list" subgroup="Methods" typeTopicId="T:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer" /> <apidata name="Rect3DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer" /> </containers> <file name="2f3d850c-78e7-a872-929f-19b13fe44372" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer.#ctor"> <topicdata group="api" /> <apidata name=".ctor" group="member" subgroup="constructor" /> <memberdata visibility="public" special="true" /> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer" ref="true" /> </containers> <file name="a148d3fd-648c-fb4c-922a-c30b8800fdea" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer" ref="true" /> </containers> <file name="9891bbb7-9bc6-bf57-9cb9-3ef66253a6d3" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer" ref="true" /> </containers> <file name="b145b50c-4e89-9195-8e0e-8aa8f6457fda" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Object" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer" ref="true" /> </containers> <file name="c9f49829-83e2-ed03-cae1-a095d1badf02" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.String" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Rect3DValueSerializer" ref="true" /> </containers> <file name="ab123dd5-3c02-1f5e-f41a-b51ab67744e0" /> </api> <api id="T:System.Windows.Media.Media3D.Converters.Size3DValueSerializer"> <topicdata group="api" allMembersTopicId="AllMembers.T:System.Windows.Media.Media3D.Converters.Size3DValueSerializer" /> <apidata name="Size3DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Size3DValueSerializer.#ctor" /> <family> <ancestors> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> <type api="T:System.Object" ref="true" /> </ancestors> </family> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> </containers> <file name="a7a4f8ce-d06d-2a09-dc4b-998050d4f8a3" /> </api> <api id="AllMembers.T:System.Windows.Media.Media3D.Converters.Size3DValueSerializer"> <topicdata name="Size3DValueSerializer" group="list" subgroup="members" typeTopicId="T:System.Windows.Media.Media3D.Converters.Size3DValueSerializer" /> <apidata name="Size3DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Size3DValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Size3DValueSerializer.#ctor" /> <element api="M:System.Windows.Media.Media3D.Converters.Size3DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Size3DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Size3DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Size3DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Size3DValueSerializer" /> </containers> <file name="bc42c4f7-d0df-80b0-8d15-a4eb937cfc85" /> </api> <api id="Methods.T:System.Windows.Media.Media3D.Converters.Size3DValueSerializer"> <topicdata name="Size3DValueSerializer" group="list" subgroup="Methods" typeTopicId="T:System.Windows.Media.Media3D.Converters.Size3DValueSerializer" /> <apidata name="Size3DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Size3DValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Size3DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Size3DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Size3DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Size3DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Size3DValueSerializer" /> </containers> <file name="f18ca323-a05f-5646-2af0-519018d5ce51" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Size3DValueSerializer.#ctor"> <topicdata group="api" /> <apidata name=".ctor" group="member" subgroup="constructor" /> <memberdata visibility="public" special="true" /> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Size3DValueSerializer" ref="true" /> </containers> <file name="3c015c50-5aaf-54f9-d5f0-f8b07b067ace" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Size3DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Size3DValueSerializer" ref="true" /> </containers> <file name="3a1f8869-9037-3557-ad1f-971ff1270fff" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Size3DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Size3DValueSerializer" ref="true" /> </containers> <file name="6d7ca516-05d5-6d1f-f8b6-633a2719d029" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Size3DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Object" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Size3DValueSerializer" ref="true" /> </containers> <file name="a8ae5b80-1aca-6b52-186c-59abe47d87ce" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Size3DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.String" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Size3DValueSerializer" ref="true" /> </containers> <file name="399640dd-4c25-b3aa-88a6-1648c9a4c90d" /> </api> <api id="T:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer"> <topicdata group="api" allMembersTopicId="AllMembers.T:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer" /> <apidata name="Vector3DCollectionValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer.#ctor" /> <family> <ancestors> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> <type api="T:System.Object" ref="true" /> </ancestors> </family> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> </containers> <file name="1630e125-2034-3dbf-bdbc-4ca44e5b798e" /> </api> <api id="AllMembers.T:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer"> <topicdata name="Vector3DCollectionValueSerializer" group="list" subgroup="members" typeTopicId="T:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer" /> <apidata name="Vector3DCollectionValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer.#ctor" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer" /> </containers> <file name="5a8be527-aa3c-9451-1a76-df48b1d3c6fb" /> </api> <api id="Methods.T:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer"> <topicdata name="Vector3DCollectionValueSerializer" group="list" subgroup="Methods" typeTopicId="T:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer" /> <apidata name="Vector3DCollectionValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer" /> </containers> <file name="d28c3e1a-19df-dfff-217a-cd8a56b5fc3f" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer.#ctor"> <topicdata group="api" /> <apidata name=".ctor" group="member" subgroup="constructor" /> <memberdata visibility="public" special="true" /> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer" ref="true" /> </containers> <file name="02f55492-d3cf-778c-8f76-fc6a8bb2788e" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer" ref="true" /> </containers> <file name="082887e5-5ed0-d11e-1219-ea97ee02a676" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer" ref="true" /> </containers> <file name="e2b3873d-a6b3-1117-17d9-783a648e7f0f" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Object" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer" ref="true" /> </containers> <file name="983f1a12-ffbf-32e2-c918-26126f656147" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.String" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Vector3DCollectionValueSerializer" ref="true" /> </containers> <file name="26839e99-89e9-f45e-a686-5770dc00fce9" /> </api> <api id="T:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer"> <topicdata group="api" allMembersTopicId="AllMembers.T:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer" /> <apidata name="Vector3DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer.#ctor" /> <family> <ancestors> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> <type api="T:System.Object" ref="true" /> </ancestors> </family> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> </containers> <file name="f918e038-b77d-d46d-3db7-a8e40e208799" /> </api> <api id="AllMembers.T:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer"> <topicdata name="Vector3DValueSerializer" group="list" subgroup="members" typeTopicId="T:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer" /> <apidata name="Vector3DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer.#ctor" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer" /> </containers> <file name="8b8343ae-851d-5ef1-61d0-04378aff8d9d" /> </api> <api id="Methods.T:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer"> <topicdata name="Vector3DValueSerializer" group="list" subgroup="Methods" typeTopicId="T:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer" /> <apidata name="Vector3DValueSerializer" group="type" subgroup="class" /> <typedata visibility="public" serializable="false" defaultConstructor="M:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer.#ctor" /> <elements> <element api="M:System.Object.Equals(System.Object)" /> <element api="M:System.Object.Finalize" /> <element api="M:System.Object.GetHashCode" /> <element api="M:System.Object.GetType" /> <element api="M:System.Object.MemberwiseClone" /> <element api="M:System.Object.ToString" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertFromException(System.Object)" /> <element api="M:System.Windows.Markup.ValueSerializer.GetConvertToException(System.Object,System.Type)" /> <element api="M:System.Windows.Markup.ValueSerializer.TypeReferences(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)" /> <element api="M:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)" /> </elements> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer" /> </containers> <file name="673c6713-5dbe-2df0-8e65-a3295b5288a9" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer.#ctor"> <topicdata group="api" /> <apidata name=".ctor" group="member" subgroup="constructor" /> <memberdata visibility="public" special="true" /> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer" ref="true" /> </containers> <file name="0782b1fd-a6c0-9666-73c8-781ec61c5ad5" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer" ref="true" /> </containers> <file name="fce27764-5a7d-34cc-82e3-81bcc1fdc0d0" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="CanConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.CanConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Boolean" ref="false" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer" ref="true" /> </containers> <file name="9c882194-fc3d-3aae-0c24-e5d35b767d72" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertFromString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertFromString(System.String,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.String" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.Object" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer" ref="true" /> </containers> <file name="7445ce2e-265b-e94b-2ab1-4b6df343cdc2" /> </api> <api id="M:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <topicdata group="api" /> <apidata name="ConvertToString" group="member" subgroup="method" /> <memberdata visibility="public" /> <proceduredata virtual="true" /> <overrides> <member api="M:System.Windows.Markup.ValueSerializer.ConvertToString(System.Object,System.Windows.Markup.IValueSerializerContext)"> <type api="T:System.Windows.Markup.ValueSerializer" ref="true" /> </member> </overrides> <parameters> <parameter name="value"> <type api="T:System.Object" ref="true" /> </parameter> <parameter name="context"> <type api="T:System.Windows.Markup.IValueSerializerContext" ref="true" /> </parameter> </parameters> <returns> <type api="T:System.String" ref="true" /> </returns> <containers> <library assembly="PresentationCore" module="PresentationCore" kind="DynamicallyLinkedLibrary"> <assemblydata version="4.0.30319.0" /> </library> <namespace api="N:System.Windows.Media.Media3D.Converters" /> <type api="T:System.Windows.Media.Media3D.Converters.Vector3DValueSerializer" ref="true" /> </containers> <file name="a25680f9-c8ce-d52b-35cb-8ca4be6c8d95" /> </api> </apis> </reflection>
{ "redpajama_set_name": "RedPajamaGithub" }
7,618
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=2 sw=2 et tw=78: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "XPCWrapper.h" #include "AccessCheck.h" #include "WrapperFactory.h" #include "AccessCheck.h" using namespace xpc; namespace XPCNativeWrapper { static inline JSBool ThrowException(nsresult ex, JSContext *cx) { XPCThrower::Throw(ex, cx); return false; } static JSBool UnwrapNW(JSContext *cx, unsigned argc, jsval *vp) { if (argc != 1) { return ThrowException(NS_ERROR_XPC_NOT_ENOUGH_ARGS, cx); } jsval v = JS_ARGV(cx, vp)[0]; if (JSVAL_IS_PRIMITIVE(v)) { return ThrowException(NS_ERROR_INVALID_ARG, cx); } JSObject *obj = JSVAL_TO_OBJECT(v); if (!js::IsWrapper(obj)) { JS_SET_RVAL(cx, vp, v); return true; } if (WrapperFactory::IsXrayWrapper(obj) && AccessCheck::wrapperSubsumes(obj)) { return JS_GetProperty(cx, obj, "wrappedJSObject", vp); } JS_SET_RVAL(cx, vp, v); return true; } static JSBool XrayWrapperConstructor(JSContext *cx, unsigned argc, jsval *vp) { if (argc == 0) { return ThrowException(NS_ERROR_XPC_NOT_ENOUGH_ARGS, cx); } if (JSVAL_IS_PRIMITIVE(vp[2])) { return ThrowException(NS_ERROR_ILLEGAL_VALUE, cx); } JSObject *obj = JSVAL_TO_OBJECT(vp[2]); if (!js::IsWrapper(obj)) { *vp = OBJECT_TO_JSVAL(obj); return true; } obj = js::UnwrapObject(obj); *vp = OBJECT_TO_JSVAL(obj); return JS_WrapValue(cx, vp); } // static bool AttachNewConstructorObject(XPCCallContext &ccx, JSObject *aGlobalObject) { JSFunction *xpcnativewrapper = JS_DefineFunction(ccx, aGlobalObject, "XPCNativeWrapper", XrayWrapperConstructor, 1, JSPROP_READONLY | JSPROP_PERMANENT | JSFUN_STUB_GSOPS | JSFUN_CONSTRUCTOR); if (!xpcnativewrapper) { return false; } return JS_DefineFunction(ccx, JS_GetFunctionObject(xpcnativewrapper), "unwrap", UnwrapNW, 1, JSPROP_READONLY | JSPROP_PERMANENT) != nullptr; } } // namespace XPCNativeWrapper namespace xpc { JSObject * Unwrap(JSContext *cx, JSObject *wrapper, bool stopAtOuter) { if (js::IsWrapper(wrapper)) { if (xpc::AccessCheck::isScriptAccessOnly(cx, wrapper)) return nullptr; return js::UnwrapObject(wrapper, stopAtOuter); } return nullptr; } } // namespace xpc namespace XPCWrapper { JSObject * UnsafeUnwrapSecurityWrapper(JSObject *obj) { if (js::IsProxy(obj)) { return js::UnwrapObject(obj); } return obj; } } // namespace XPCWrapper
{ "redpajama_set_name": "RedPajamaGithub" }
6,587
Niel Garden by mICHELE&mIQUEL On a slight elevation placed on Garonne's riverside, safe from its floods, the site has been turned into a particular place across the history. Archaeological works carried out on this site, recently discovered an Iron Age graveyard, a second century craftsman area and an early roman era forum. During the following centuries, it became a military stronghold and it served also as an observation point through 1814 Toulouse battle. Built in the nineteenth, the Niel barracks is made up of two entrance pavilions and in the opposite side, the building Major State. The linear barracks occupied the north and south lateral sides. In the middle, a large square of arms of 200mx100m. Niel barrancks is a military construction formed by two entrance pavillions and, in the opposite side, a Major State building both of them built in the nineteenth. The north and south lateral sides were ocuppied by lineal barraks and, in the middle of the area, there was a large square of arms of 200mx100m. In the middle of the 20th century, the barracks were abandoned. Surrounded by high insurmountable walls, it constituted a site "out of the city". The place was turned into a breaking point between the districts of Busca and Empalot, and also between Busca district and Garona River. OUR VALORATION A garden that appears clear and balanced where the topography, the vegetation, the pathways, the water, the materiality merge into a single intervention, which we have wanted as simple as precise. A garden that keep in mind different scales. From the whole city, a green corridor connecting with Garona river and Midi canal, to the surrounding area with its playgrounds, amphitheater and little squares. A topography that allows the protection of the archaeological remains, manages to generate different spatial experiences in an enclosure of rectangular and uniform dimensions. A topography that is the support for the different pathways and the base for the different types of vegetation which evoked by the agricultural landscape of the region. A sustainable construction with a self-management of the water, a recycling materials – excavation lands of the surrounding buildings and the pre-existing asphalt – and a differentiated garden management. Finally, a new island of freshness in the middle of the city. A materiality typical of Toulouse city. The bricks, ingeniously put by a system that combines the constructive efficiency, the ability to follow the new topography and the magic to erase the boundaries between mineral and vegetable. A single gesture, a single movement, an undulating veil that goes gradually from the mineral to the vegetal. Nowadays, it seems so natural that we can wonder if the landscapers have really worked there. What else can we add if the birds and children have invaded the garden and now move freely? Entrant office name: mICHELE&mIQUEL – architecture – urbanism – landscape design; michèle ORLIAC & miquel BATLLE Role of the entrant in the project: Conception, design and construction supervision. Website: www.michele-miquel.com Other designers involved in the design of landscape: Alberto HERNÁNDEZ MEDINA collaborator architect , VEGETATION – TERESA GALI – ARQUITECTURA AGRONOMIA S.L.P., FACILITIES – SATEC – BET Infrastructure, media and construction economy, STRUCTURES – BOMA, SL, ROBERT BRUFAU & Associats – Architect and Structures. Project location (Street, City, Country): Toulouse, France Year Built: 2014-2016 Video youtube: https://www.youtube.com/watch?v=MUWvlkCCu_I Entrant name: mICHELE&mIQUEL Award category: Projects Projects 2018
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
6,584
Outsourcing has been a staple of corporate strategy for years, but CIOs may be getting weary of it - and wary of its consequences, says silicon.com editor Steve Ranger. Outsourcing has long been a standard component of business strategy, a popular choice for firms keen to get rid of tech infrastructure that provides little competitive advantage. The idea is that outsourcers - with their scale and greater experience - can deliver IT as a service more efficiently and cheaply than their customers ever could through having their own IT staff. Add in offshore support and the cost savings really start to pile up, so the argument goes. It's persuasive and the idea of getting someone to take away the complications of owning and running IT, and replacing it with a predictable cost for a service, has been highly attractive to boards - and CIOs - for years. A significant proportion of outsourcing has been about two things: cutting costs, and offloading a problem - for example, an IT department loaded with expensive staff serving a creaky, ageing tech infrastructure. It's rarely about innovation. I've seen very few companies bragging about how their IT outsourcing strategy has cost more than doing it inhouse, and still being happy with the additional cost because it's boosted their capacity for innovation. But perhaps I just missed that case study. And maybe it's because the narrow focus of outsourcing has been on cutting costs and offloading problems that I've been picking up on a lot of outsourcing fatigue among the CIOs I've been talking to. Perhaps this is from a small sample, but I hear CIOs and IT directors expressing doubts about the wisdom of outsourcing, on a number of levels. Now, such reservations aren't new in themselves but rarely have I heard so many CIOs and IT directors sounding so uncomfortable about such a key element of business strategy. First, they complain that it doesn't always work. As soon as you announce outsourcing plans, you lose your best people, who don't want to stick around for all the pain and uncertainty of the restructuring. And when the outsourcing deal is done, outsourced workers and their new managers are far less loyal and no longer willing to go that extra mile for your business. Those attitudes make IT innovation far harder.
{ "redpajama_set_name": "RedPajamaC4" }
1,862
Polylepis racemosa är en rosväxtart. Polylepis racemosa ingår i släktet Polylepis och familjen rosväxter. Underarter Arten delas in i följande underarter: P. r. racemosa P. r. triacontandra Bildgalleri Källor Externa länkar Rosväxter racemosa
{ "redpajama_set_name": "RedPajamaWikipedia" }
7,250
Glasgow City of Scientists Our Demonstrator Projects Strategic Programme Board Education and Skills Development Engineering, Design and Manufacturing Low Carbon and Renewables Facilities and Property There is so much happening across Glasgow and the West of Scotland and our news portal will allow us to share it with you. HOME | News Whether it is ground breaking research, a new invention or a high profile science engagement activity, we want to hear about it. Our news-hub will share science-related stories which are taking place across the Glasgow city region. We want it to be a one-stop portal for science activities across Glasgow and the West of Scotland, so don't hesitate to let us know of anything and everything you are involved with. Simply send us your news updates, images and/or videos and we'll share them here. Chancellor announces £23.9m business, life science and arts funding for Glasgow UK Chancellor George Osborne has announced nearly £24m in new funding for Glasgow's life sciences, business and arts sectors. Glasgow sows the seeds for transforming stalled spaces across Scotland Glasgow's award winning Stalled Spaces initiative is being rolled out across Scotland. Glasgow becomes first Scottish city to offer free Wi-fi Glasgow has become the first city in Scotland to offer free Wi-fi throughout the city's streets and public spaces. City cycle hire scheme rolls out Glasgow's new Mass Automated Cycle Hire (MACH) scheme has been launched. A smart way to get active around MyCity: Glasgow With the Commonwealth Games set to open in Glasgow in less than a month, an award winning smartphone app has been released to encourage people to boost their fitness and find out more about the host city in the process. OPAL UK joins Glasgow's science hub to total 55 partners Glasgow City of Science has signed up its 55th partner, as Open Air Laboratories (OPAL) network joins the umbrella organisation. You can send us your news updates, images and/or videos and we'll share them on our site. Subscribe to keep up to date on our latest news, blog posts and events. If you have something you'd like to say to us here at Glasgow City of Science & Innovation, simply fill out our contact form with your questions, comments or ideas and we'll get back to you as soon as we can. This is a living, breathing website with regular updates on news, blogs and events. It's the place to come back to again and again if you want to know what's happening in the science and technology world in Glasgow and the West of Scotland. © 2010 - 2018 Glasgow City of Science and Innovation
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
1,112
Kevin Stuhr Ellegaard (né Kevin Stuhr Larsen, born 23 May 1983) is a retired Danish professional footballer who played as a goalkeeper. He has played for Manchester City and Blackpool in England, as well as German club Hertha BSC and Dutch club SC Heerenveen. Ellegaard has played 56 games for various Danish youth selections, including 20 games for the Denmark U21 national team. He has been selected several times for the Denmark national team, but has not appeared in a match. Club career Born in Copenhagen, Ellegaard spent his youth years in various Zealand clubs. In July 2002 he moved from Farum BK to Manchester City. In the 2003–04 season, he made two full appearances and two substitute appearances for the club in the league, replacing David Seaman both times when coming on as a substitute. He was loaned out to Blackpool FC for a month in January 2005, where he played two league matches in Football League One. Stuhr-Ellegaard moved to Germany to play for Hertha BSC in summer 2005. He had a hard time forcing his way into the starting line-up, and only played two Bundesliga games for the team. He moved to Danish club Randers FC as a free agent in the summer 2007. In his first half season with Randers, Ellegaard conceded only 15 goals in 18 matches. He went on to play all Randers games in his first two seasons with the club, helping Randers finish sixth in the 2007–08 and fifth in the 2008–09 Superliga season. In the 2009–10 Superliga season, Ellegaard conceded 15 goals in the first seven games, and was dropped from the starting line-up by coach John Jensen in August. As Randers' results did not improve with new goalkeeper Nathan Coe, Ellegaard was reinstated in October 2009, and conceded only eight goals in the 16-game unbeaten run that secured Randers FC another season in the top flight. Ellegaard and Randers decided to part their ways in June 2010, as they could not agree on a new contract. In season 2010–11, Dutch club SC Heerenveen had big problems with their goalkeepers, so they decided that they needed to contract a new goalie for a short period. The first-keeper Brian Vandenbussche got injured during a training session, which left Heerenveen with only one goalie, Kenny Steppe. Heerenveen gave Ellegaard a trial at the club, which led to him signing a one-year contract. In the beginning, Heerenveen saw him as a second-keeper behind Steppe, but Ellegard impressed so much in training that the trainer Ron Jans made him first-choice. Despite Ellegard playing 28 matches and impressing both the audience and the trainers with his play, they decided not to renew his contract, because Vandenbussche was coming back from his injury. On 23 January 2012, Ellegaard signed a three-year contract with Swedish club IF Elfsborg as a free agent. On 24 January 2020, Ellegaard returned to Denmark and signed with Danish 2nd Division club FC Helsingør. He reached promotion to the Danish 1st Division in his first six months with the club. On 22 December 2021, 38-year old Ellegaard announced his retirement from football. However, on 2 May 2022, Ellegaard came out of retirement, when he signed a short-term contract with Danish Superliga club AaB for the rest of the season. International career Ellegaard made his debut for the Danish under-16 youth national team in September 1998, and went on to represent various youth national teams for a combined total of 56 games. He made his debut for the Denmark U21 national team in September 2004, and represented the team at the 2006 Under-21 European Championship tournament in May 2006. While at Manchester City, Ellegaard served as a stand-in for Thomas Sørensen in the initial Denmark national team training sessions ahead of the 2004 European Championship. He was not a part of the Danish squad at the tournament. In the fall of 2007, while playing for Randers, Ellegaard was called up as the Danish third choice goalkeeper due to the injury of Jesper Christiansen. In August 2008, Ellegaard was called up for the national team again, due to a new Jesper Christiansen injury. Career statistics Honours IF Elfsborg Allsvenskan: 2012 Svenska cupen: 2014 Individual Randers FC Player of the year: 2007–08 Eredivisie ING Fair Play-reward Denmark's best penalty keeper – Recent statistics showed that out of 29 Danish keepers, Ellegaard is the best one in saving penalties, making him a penalty specialist. He is known for his "double save" in a penalty against AFC Ajax. He had a fantastic 45% rate in penalty saves, which made him the best Danish penalty keeper. References External links Danish Superliga statistics 1983 births Living people Danish men's footballers Denmark youth international footballers Denmark under-21 international footballers Association football goalkeepers Hvidovre IF players Manchester City F.C. players Hertha BSC players Hertha BSC II players Randers FC players SC Heerenveen players IF Elfsborg players FC Helsingør players AaB Fodbold players Premier League players English Football League players Footballers from Copenhagen Danish Superliga players Bundesliga players Eredivisie players Allsvenskan players Danish 1st Division players Danish expatriate men's footballers Danish expatriate sportspeople in England Danish expatriate sportspeople in Germany Danish expatriate sportspeople in the Netherlands Expatriate footballers in England Expatriate footballers in Germany Expatriate footballers in the Netherlands
{ "redpajama_set_name": "RedPajamaWikipedia" }
7,108
{"url":"https:\/\/www.gamedev.net\/forums\/topic\/454808-qwerty\/","text":"# qwerty\n\n## Recommended Posts\n\nint array[100]; memset(array, 1, sizeof(sint)*100); instead of filling up array with 1, it fills up with 0x01010101 Is that normal and what would be a better function to use? edit : sorry about the title. I guess I forgot to change it. [Edited by - V-man on July 7, 2007 8:48:53 PM]\n\n##### Share on other sites\nThat's how it works. It filled the array with the character sized variable 1 (0x0F or 00001111b).\n\nThe reason 0 works to fill it with all 0s is because it is made of all 0x itself (0x00 or 00000000b).\n\nIf you want to fill it up with all 1s, then use the number made of all 1s (11111111b = 0xFF = 255)\n\nmemset(array,255,sizeof(int)*100);\n\n##### Share on other sites\nQuote:\n Original post by V-manint array[100];memset(array, 1, sizeof(sint)*100);instead of filling up array with 1, it fills up with 0x01010101Is that normal and what would be a better function to use?\n\nYes, this is normal -- memset only deals with the concept of bytes.\n\nIf this is C++, you should prefer it's standard library options when available over the legacy inherited from C (which memset is a part of). In this case, C++ has std::fill available:\n\nint array[100];std::fill( array, array+100, 1 );\n\nstd::fill is a template (and thus C++ only) function, which allows it to detect that array is a bunch of ints, and fill on a per-int basis instead of a per-byte basis. Note that you don't even need sizeof() anymore. It will also work with iterators if you're not using a raw array like you are in this example.\n\n##### Share on other sites\nI prefer to use \"raw arrays\" instead of std classes. It is a question of personal taste and memory alignment, although I use C++.\nI was wondering if there is a good old fashion alternative.\n\n##### Share on other sites\nQuote:\n Original post by V-manAbout std:fill, thanks, I didn't know about it until now.I prefer to use \"raw arrays\" instead of std classes. It is a question of personal taste, although I use C++.\n\nSo in other words you don't use C++.\n\nDo you have some reason for this decision?\n\n##### Share on other sites\nMemory alignment I guess is the technical reason.\n\n##### Share on other sites\nint array[100];\nmemset(array, 1, sizeof(sint)*100);\n\n0x01010101 0x01010101 0x01010101 ...\n\n--------------------------------\n\nint array[100];\nmemset(array, 1, sizeof(int)*100);\n\n0x00000001 0x00000001 0x00000001\n\n-me\n\n##### Share on other sites\nQuote:\n Original post by V-manMemory alignment I guess is the technical reason.\n\nMeaning what?\n\nAre you trying to ensure the memory allocated is contiguous? Because std::vector does that automatically.\n\nIf you're trying to ensure that your memory is 4 8 or 16 byte aligned for some reason you can probably do that too though I'll defer to the more expert members of the forum for the best way's to do so.\n\n##### Share on other sites\n\"If you're trying to ensure that your memory is 4 8 or 16 byte aligned for some reason you can probably do that too though I'll defer to the more expert members of the forum for the best way's to do so.\"\n\nyes\n\nAlso, it's difficult to watch during debugging. I used VC++6 and now .NET 2003\nIf I use the std::vector class and if I were to put my variable in the watch window like this\n\nmything[0]\n\nit gives an error message :\nerror : object \"mything\" doesn't have an indexer\n\nI've never figure this out. Anyone know what's the matter with MS-VC?\n\n##### Share on other sites\nQuote:\n Original post by V-manMemory alignment I guess is the technical reason.\n\nUsing the default allocator, C++ containers will give you the same alignment guarantees as C-style arrays.\n\n##### Share on other sites\nQuote:\n Original post by V-manI've never figure this out. Anyone know what's the matter with MS-VC?\n\nLots of things were wrong with 6.0. For starters, it was released pre-standard and thus does not support a lot of features of the C++ language. 2003 is much better, but still had sub-par standard library debugging, IMO. 2005 is much better at that sort of thing, and the express edition is free (and highly recommended).\n\n##### Share on other sites\n1) For the love of all that is holy, put some thought into your thread title.\n\n2)\nQuote:\n Original post by erissianThat's how it works. It filled the array with the character sized variable 1 (0x0F or 00001111b).The reason 0 works to fill it with all 0s is because it is made of all 0x itself (0x00 or 00000000b).If you want to fill it up with all 1s, then use the number made of all 1s (11111111b = 0xFF = 255)memset(array,255,sizeof(int)*100);\n\nI (and others) assume he wants to fill the array with ints of value 1, rather than with all set bits.\n\n3)\nQuote:\n Original post by Palidineint array[100];memset(array, 1, sizeof(sint)*100);0x01010101 0x01010101 0x01010101 ...--------------------------------int array[100];memset(array, 1, sizeof(int)*100);0x00000001 0x00000001 0x00000001\n\nNot the problem this time, but you should match these things up in general.\n\nAlthough there is really no reason to use memset() in C++.\n\n4)\nQuote:\n Original post by V-manAbout std:fill, thanks, I didn't know about it until now.I prefer to use \"raw arrays\" instead of std classes. It is a question of personal taste and memory alignment, although I use C++.I was wondering if there is a good old fashion alternative.\n\na) std::fill *isn't* a class; it's a function. You are still using a \"raw array\" here; it's just a matter of how you specify how you're filling it.\nb) The \"std classes\" are part of the standard library of the C++ language. Writing C++ code without them is like writing C code without the stuff found in stdio.h, stdlib.h etc. And there is a *lot* of stuff in there that you probably don't even realize (because many compilers let you get away with omitting those #includes even when they shouldn't, relying on \"builtins\" instead). If you're insistent on writing C code, though, please don't pretend you're writing C++.\n\n5)\nQuote:\n Original post by V-manMemory alignment I guess is the technical reason.\n\nLike Sneftel said.\n\nAnd yes, MSVC++ 6.0 is ancient. It boggles the mind how many people out there are still trying to make it work. Or for that matter, how many *new* beginning programmers are somehow ending up with it. I don't even know where or how you could find it any more.\n\n##### Share on other sites\nQuote:\nOriginal post by Sneftel\nQuote:\n Original post by V-manMemory alignment I guess is the technical reason.\n\nUsing the default allocator, C++ containers will give you the same alignment guarantees as C-style arrays.\n\nIt's not enough. I'm using some other function for 16 byte alignment.\n_aligned_malloc and _aligned_free\n\n##### Share on other sites\nQuote:\nOriginal post by Driv3MeFar\nQuote:\n Original post by V-manI've never figure this out. Anyone know what's the matter with MS-VC?\n\nLots of things were wrong with 6.0. For starters, it was released pre-standard and thus does not support a lot of features of the C++ language. 2003 is much better, but still had sub-par standard library debugging, IMO. 2005 is much better at that sort of thing, and the express edition is free (and highly recommended).\n\nBut why does the same problem appear in VC++.NET 2003\n\nI just tried with the latest VC++Express, it says\n\nI can only Watch the variable with\nmyarray._Myfirst[0]\n\nAlso, if anyone knows the C equivalent of memset for 32 bit writes, I would like to know. I ask because memset is efficient. It's better than writing a for loop.\nint myarray[100];\nfor(i=0 i<mytotal; i++)\n{\nmyarray[i]=myvalue;\n}\n\n[Edited by - V-man on July 10, 2007 10:42:58 PM]\n\n##### Share on other sites\nQuote:\n Original post by V-manIt's not enough. I'm using some other function for 16 byte alignment._aligned_malloc and _aligned_free\n\nSimple. Just declare your type as __declspec(align(16)) and C++ containers will do that for you.\n\n##### Share on other sites\nQuote:\n Original post by V-manAlso, if anyone knows the C equivalent of memset for 32 bit writes, I would like to know. I ask because memset is efficient. It's better than writing a for loop.\n\nCouldn't tell you offhand. On modern compilers, std::fill delegates to a tightly optimized routine written in assembler when you use it on things like integers. I guess C might have something like that, though it'd be compiler-dependent.\n\n##### Share on other sites\nQuote:\n Original post by V-manIt's not enough. I'm using some other function for 16 byte alignment._aligned_malloc and _aligned_free\n\nThen assuming you only want the array aligned to 16 byte boundaries you can use a custom allocator which allocated with _aligned_malloc and releases with _aligned_free or if you want the individual elements aligned then you can just declare the type with __declspec(align(16)) as Sneftel said.\n\nQuote:\n Couldn't tell you offhand. On modern compilers, std::fill delegates to a tightly optimized routine written in assembler when you use it on things like integers. I guess C might have something like that, though it'd be compiler-dependent.\n\nTo be more specific on VS05 std::fill will compile be equivalent to memset for all fundamental types and and types which satisfy __is_pod(type).\n\n##### Share on other sites\nFrom the looks of it, in .NET 2003 \"xutility\". it only uses memset for 8 bit types, else a standard for loop.\n\ntemplate<class _FwdIt,\nclass _Ty> inline\nvoid fill(_FwdIt _First, _FwdIt _Last, const _Ty& _Val)\n{ \/\/ copy _Val through [_First, _Last)\nfor (; _First != _Last; ++_First)\n*_First = _Val;\n}\n\ninline void fill(char *_First, char *_Last, int _Val)\n{ \/\/ copy char _Val through [_First, _Last)\n::memset(_First, _Val, _Last - _First);\n}\n\ninline void fill(signed char *_First, signed char *_Last, int _Val)\n{ \/\/ copy signed char _Val through [_First, _Last)\n::memset(_First, _Val, _Last - _First);\n}\n\ninline void fill(unsigned char *_First, unsigned char *_Last, int _Val)\n{ \/\/ copy unsigned char _Val through [_First, _Last)\n::memset(_First, _Val, _Last - _First);\n}\n\n##### Share on other sites\nQuote:\n Original post by SneftelSimple. Just declare your type as __declspec(align(16)) and C++ containers will do that for you.\n\nUntil you try using the type with std::vector and the code blows up because the standard requires objects to be passed by value as arguments in some very fundamental function calls. Unfortunately there's no good work around for that (and keep the std::vector usage).\n\n##### Share on other sites\nYou can't use memset() with non-POD types safely, so yes, it has to loop for those types it cannot specialize. memset() is typically an intrinsic (so it becomes the aforementioned assembly on release). This is at least true in VS2005, I'm pretty sure it was still the case in VS2003.\n\nYou can use custom allocators or compiler builtins (since _aligned_malloc is nonstandard anyway) to ensure your types are appropriately aligned the way you want. Boost might have some allocators prewritten, but they're not hard to build yourself. Of course you have to ensure your types size is appropriate as well, or you can't align it, period, in any contiguous container. Legally.\n\nAre you sure this stuff is a bottleneck? Have you profiled it?\n\n##### Share on other sites\nQuote:\nOriginal post by V-man\nQuote:\n Original post by SneftelUsing the default allocator, C++ containers will give you the same alignment guarantees as C-style arrays.\n\nIt's not enough.\n\nPlease excuse me for not believing you. What are you trying to do that requires aligning things to 16-byte boundaries? People who need to do things that arcane are generally also people who can solve problems such as in the OP by themselves.\n\n##### Share on other sites\nGenerally, to take advantage of SIMD instructions, your data must be properly aligned. IIRC, Altivec requires 16-byte alignment, I think SSE does as well. Very compute-intensive tasks like matrix multiplication, fixed-point\/floating-point conversion, etc can be sped up by a factor of 4 (or more, with library's like SAL, which optimize for CPU cache configurations as well).\n\n## Create an account\n\nRegister a new account\n\n\u2022 ## Partner Spotlight\n\n\u2022 ### Forum Statistics\n\n\u2022 Total Topics\n627684\n\u2022 Total Posts\n2978627\n\n\u2022 9\n\u2022 14\n\u2022 12\n\u2022 10\n\u2022 12","date":"2017-10-20 11:02:17","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 1, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.17710140347480774, \"perplexity\": 3735.533385223617}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2017-43\/segments\/1508187824068.35\/warc\/CC-MAIN-20171020101632-20171020121632-00320.warc.gz\"}"}
null
null
website ======= [![Build Status](https://travis-ci.org/repkam09/website-react.svg?branch=master)](https://travis-ci.org/repkam09/website-react) [![Codacy Badge](https://api.codacy.com/project/badge/grade/9219a9d1b2ec4e4889f15a473ba2c970)](https://www.codacy.com/app/mark_12/website-react) Template files and CSS for my website https://repkam09.com This README file will also document some of the required packages that I use for various parts of the website so that I can remember what I've done and to help speed up deployment should I need to configure a new server in the future. Current server hosting is provided through a DigitalOcean VPS running Ubuntu 15.04. This is a rewrite of the website using some dynamic feature of reactjs instead of PHP! This repository can also be found on my Gogs deployment here: https://dev.repkam09.com/repkam09/website-react/
{ "redpajama_set_name": "RedPajamaGithub" }
1,562
\section{INTRODUCTION} \noindent{\footnotesize\sc This paper }uses two style files created for \LaTeX\ in order to format an article as in Automatica, the journal of the International Federation of Automatic Control. The style file {\tt automatica.sty} is to be used in combination with the substyles {\tt IFAC\-art9.sty}, {\tt IFAC\-art10.sty}, {\tt IFAC\-art12.sty} and {\tt IFAC.bst}. All these files should be in a path known by \LaTeX. \subsection{Main Changes} Input that is suitable for the \LaTeX\ {\tt article} style needs only a couple of modifications besides changing the document style to {\tt automatica}. The \verb+IEEEtran.sty+ style file first has to be selected with a command of the form \begin{center} \verb+\documentstyle[...]{automatica}+ \end{center} The main differences with the usual article style file pertain to some definitions in the title page. \begin{itemize} \item After the name of the author, go to the next line with \verb+\\[2mm]+ for the address. \item the syntax for keywords is \verb+\keywords{...}+ \item the syntax for for the newly defined item shortabstract is \verb+\shortabstract{...}+ \item \verb+\maketitle+ comes after title, keywords and shortabstract. \end{itemize} Automatica papers do not include author affiliations below or beside the name(s) of the author(s); use \verb+\thanks{...}+ to list addresses. The default font size is 10 points, and {\tt IFACart10.sty} will be used; the only other fonts available are 9 and 12 points, which are defined by {\tt IFACart9.sty} and {\tt IFACart12.sty} respectively. The default page style has been redefined and is now set by {\tt automatica.sty} to ``\verb+myheadings+''. \subsection*{Running headers} The running heads can be set with the command \verb+\markboth{leftTEXT}{rightTEXT}+. If the option {\tt twoside} is not selected, both even and odd headers will display {\tt rightTEXT} together with the page number. The command \verb+\head{headTEXT}+ will put the string \verb+headTEXT+ on the header of the title page. \begin{figure} \begin{center} \setlength{\unitlength}{0.0105in}% \begin{picture}(242,150)(73,660) \put( 75,660){\framebox(240,150){}} \put(105,741){\vector( 0, 1){ 66}} \put(105,675){\vector( 0, 1){ 57}} \put( 96,759){\vector( 1, 0){204}} \put(105,789){\line( 1, 0){ 90}} \put(195,789){\line( 2,-1){ 90}} \put(105,711){\line( 1, 0){ 60}} \put(165,711){\line( 5,-3){ 60}} \put(225,675){\line( 1, 0){ 72}} \put( 96,714){\vector( 1, 0){204}} \put( 99,720){\makebox(0,0)[rb]{\raisebox{0pt}[0pt][0pt]{\tenrm $\varphi$}}} \put(291,747){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{\tenrm $\omega$}}} \put(291,702){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{\tenrm $\omega$}}} \put( 99,795){\makebox(0,0)[rb]{\raisebox{0pt}[0pt][0pt]{\tenrm $M$}}} \end{picture} \end{center} \caption{This is a sample figure. The caption comes after the figure.} \end{figure} \nocite{TeX} The captions for figures and tables have been modified; the caption of the tables must come before the table. \begin{table}[hb] \caption{The caption comes before the table.} \begin{center} \begin{tabular}{|c||c|c|}\hline $\otimes$&0&1\\\hline\hline 0&0&1\\\hline 1&1&0\\\hline \end{tabular} \end{center} \end{table} The bibliography style file {\tt IFAC\-.bst} formats with BibTeX the references from the bibliography file according to IFAC instructions. It takes advantage of the definitions introduced by Peter Williams for the family of harvard bibliography style files. The references can be cited with the usual syntax ({\em e.g.\ \/}\verb+\cite{LaTeX}+, yielding the citation ``\cite{LaTeX}'') or with with a new command ({\em e.g.\ \/}\verb+\citeasnoun{LaTeX}+, yielding the citation ``\citeasnoun{LaTeX}''). The two commands support all original options in the book of \citeasnoun{LaTeX}. In figure \ref{figure2} we can see an example for the definition of the title page and of the main commands needed. \begin{figure}[htb] \mbox{}\hrulefill \vspace{-.3em} \begin{verbatim} \documentstyle[twocolumn]{automatica} \begin{document} \title{Sample LaTeX Article...} \markboth{S. Balemi}{Sample ..} \head{\scriptsize{\em Automatica}\hfill Sample file. c 1993 Silvano Balemi} \author{SILVANO BALEMI\thanks{ Automatic Control Laboratory,...} \shortabstract{This paper ..} \keywords{\LaTeX\ style files...} \maketitle \begin{abstract} This article explains how to use .. \end{abstract} \section{INTRODUCTION} \noindent {\footnotesize\sc This paper }uses.. \bibliographystyle{IFAC} \section{Introduction} Delay-Differential Equations (DDEs) are a convenient shorthand notation used to represent what is perhaps the simplest form of spatially-distributed phenomenon - transport. Because of their notational simplicity, it is common to use DDEs to model very complex systems with multiple sources of delay - including almost all models of control over and of ``networks''. To illustrate the ways in which delays can complicate an otherwise straightforward control problem, consider control of a swarm of $N$ UAVs over a wireless network. In this case, each UAV, $i$, has a state, $x_i(t)\in \mathbb{R}^{n_i}$ which may represent, e.g. displacement (the concatenation of all such states is denoted $x$). Each UAV has local sensors which measure $y_i$ and this information is transmitted to a centralized control authority. There is also a centralized vector of inputs, $u$, a regulated vector of outputs, $z$, and a vector of disturbances, $w$ - including both process and sensor noise. We model this system as follows. \begin{align} \dot x_i(t)&=a_ix_i(t)+\sum\nolimits_{j=1}^N a_{ij} x_j(t-\hat \tau_{ij})\notag \\ &\hspace{2cm}+b_{1i}w(t-\bar \tau_i)+b_{2i} u(t-h_i)\notag\\ z(t)&=C_{1} x(t)+D_{12}u(t)\notag \\ y_i(t)&=c_{2i} x_i(t-\tilde \tau_i)+d_{21i}w(t-\tilde \tau_i) \label{eqn:network1}\\[-6mm] \notag \end{align} \begin{itemize} \item $a_i$ is the internal dynamics of the UAV $i$ \item $a_{ij}$ is the effect of UAV $j$ on the state of UAV $i$. \item $b_{1i}$ is the disturbance to the motion of UAV $i$ \item $b_{2i}$ is the effect of the central command on UAV $i$ \item $c_{2i}$ is the measurement of the state of UAV $i$ \item $d_{21i}$ is the disturbance to the sensor on UAV $i$ \item $C_1$ gives the weight on states of the fleet of UAVs to minimize in the optimal control problem \item $D_{12}$ gives the weight on actuator commands to minimize in the optimal control problem \item $\hat \tau_{ij}$ is the time taken for changes in state of UAV $j$ to affect UAV $i$ \item $h_i$ is the time taken for a command from the central authority to reach UAV $i$ \item $\bar \tau_i$ is the time it takes the process disturbance (wind, tracking signal, et c.) to reach UAV $i$ \item $\tilde \tau_i$ is the time taken for measurements collected at UAV $i$ to reach the central authority \end{itemize} This relatively simple model shows that delayed channels are often low dimensional ($\mathbb{R}^{n_i}$ vs. $\mathbb{R}^{\sum n_i}$) and specifies four separate yet individually significant sources of delay. Specifically, we have: state delay ($\hat \tau_{ij}$); input delay ($h_i$); process delay ($\bar \tau_i$); and output delay ($\tilde \tau_i$). This UAV network is modeled as a DDE - a structure formulated in Eqn.~\eqref{eqn:DDE} in Sec.~\ref{sec:DDE}. If we consider control of such a network, however, we find that while there are algorithms for control of DDEs (See~\cite{peet_2020SICON}), these algorithms are complex and are memory-limited to a relatively small number of UAVs (perhaps 4-5). The premise of this paper, however, is that the limitations of these algorithms are not caused by inefficiency of the algorithms, but rather by the failure to account for the low dimensional nature of the delayed channels. Specifically, we note that in the UAV model, while the concatenated state, $x(t)$, is high-dimensional, the individual delayed channels, $x_i(t)$, are of much lower dimension. If we represent the network as a DDE using the formulation in Subsec.~\ref{subsec:UAV_DDE}, then the low-dimensional nature of the delayed channels is lost. Furthermore, DDEs cannot represent some important system designs - including a model of feedback described in Subsection~\ref{subsec:UAV_SOF}. For these reasons, in Sec.~\ref{sec:DDF}, we consider the use of Differential Difference Equations (DDFs). The DDF can be used to model both DDEs and neutral-type systems, while also allowing for the assignment of delayed information to heterogeneous low-dimensional channels. Specifically, the infinite-dimensional component of state-space (as defined in~\cite{gu_2010,pepe_2008}) of the UAV network in the DDF framework is $\prod_i L_2[-\tau_i,0]^{n_i}$ as opposed to $\prod_i L_2[-\tau_i,0]^{\sum n_i}$ using a DDE. In addition, DDFs allow us to represent difference equations which arise in some network models - See Subsection~\ref{subsec:UAV_SOF}. From the DDF model we turn to coupled ODE-PDE models in Sec.~\ref{sec:ODEPDE}. ODE-PDEs can be used to model a variety of systems. However, for the particular class of ODE-PDEs we use in Sec.~\ref{sec:ODEPDE}, the solutions to the ODE-PDE are equivalent to those of the DDF (as defined in Sec.~\ref{sec:DDF}). Backstepping methods have been developed for ODE-PDE models of delay (e.g.~\cite{krstic_2008,zhu_2015}) and the formulae we present for conversion of DDFs to ODE-PDEs may prove useful if the reader is interested in application or further development of these backstepping methods. Next, in Sec.~\ref{sec:PIE}, we consider Partial Integral Equations (PIEs)~\cite{appell_book}. PIEs are a generalization of integro-differential equations of Barbashin type which have been used since the 1950s to model systems in biology, physics, and continuum mechanics (See chapters 19-20 of~\cite{appell_book} for a survey). PIEs and ODE-PDEs define an equivalent set of solutions and in this section, we provide formulae for conversion of DDEs and DDFs to PIEs. PIE models have the advantage that they are defined by Partial Integral (PI) operators. Unlike Dirac and differential operators, PI operators are bounded and form an algebra. Furthermore, PIE models do not require boundary conditions or continuity constraints - simplifying analysis and optimal control problems. Indeed, it has been recently shown in~\cite{shivakumar_2019CDC,das_2019CDC} that many problems in analysis, optimal estimation and control of ODE-PDE models can be formulated as optimization over the cone of positive PI operators. In Sec.~\ref{sec:temp}, we show that the PIE formulation allows for $H_\infty$-optimal control of a 40 user, 80-state, 40-delay, 40-input, 40-disturbance network model of temperature control. \begin{figure*}[!t] \textbf{The Class of Delay-Differential Equations (DDEs):}\vspace{-3mm} \begin{align} &\bmat{\dot{x}(t)\\z(t) \\ y(t)}=\bmat{A_0 & B_1 & B_2\\ C_{10} & D_{11} &D_{12}\\ C_{20} & D_{21} &D_{22}}\bmat{x(t)\\w(t)\\u(t)}+\sum_{i=1}^K \bmat{A_i & B_{1i} & B_{2i}\\C_{1i} & D_{11i} & D_{12i}\\C_{2i} & D_{21i} & D_{22i}} \bmat{x(t-\tau_i)\\w(t-\tau_i)\\u(t-\tau_i)}\notag\\ & \hspace{3cm}+\sum_{i=1}^K \int\limits_{-\tau_i}^0\bmat{A_{di}(s) & B_{1di}(s) &B_{2di}(s)\\C_{1di}(s) & D_{11di}(s) & D_{12di}(s)\\C_{2di}(s) & D_{21di}(s) & D_{22di}(s)} \bmat{x(t+s)\\w(t+s)\\u(t+s)}ds\label{eqn:DDE} \end{align} \textbf{The Class of Differential-Difference Equations (DDFs):}\vspace{-3mm} \begin{align} \bmat{\dot{x}(t)\\ z(t)\\y(t)\\r_i(t)}&=\bmat{A_0 & B_1& B_2\\C_1 &D_{11}&D_{12}\\C_2&D_{21}&D_{22}\\C_{ri}&B_{r1i}&B_{r2i}}\bmat{x(t)\\w(t)\\u(t)}+\bmat{B_v\\D_{1v}\\D_{2v}\\D_{rvi}} v(t) \notag\\ v(t)&=\sum_{i=1}^K C_{vi} r_i(t-\tau_i)+\sum_{i=1}^K \int_{-\tau_i}^0C_{vdi}(s) r_i(t+s)ds.\label{eqn:DDF} \end{align} \textbf{The Class of Neutral-Type Systems (NDS):}\vspace{-3mm} \begin{align} &\bmat{\dot{x}(t)\\z(t) \\ y(t)}=\bmat{A_0 & B_1 & B_2\\ C_{10} & D_{11} &D_{12}\\ C_{20} & D_{21} &D_{22}}\bmat{x(t)\\w(t)\\u(t)}+\sum_{i=1}^K \bmat{A_i & B_{1i} & B_{2i}& E_i\\C_{1i}& D_{11i} & D_{12i} & E_{1i}\\C_{2i} & D_{21i} & D_{22i}&E_{2i}} \bmat{x(t-\tau_i)\\w(t-\tau_i)\\u(t-\tau_i)\\ \dot x(t-\tau_i)}\notag\\ &\hspace{2.6cm} +\sum_{i=1}^K \hspace{0mm}\int_{-\tau_i}^0\hspace{-1mm}\bmat{A_{di}(s) & \hspace{-1mm}B_{1di}(s) &\hspace{-1mm}B_{2di}(s)& \hspace{-1mm}E_{di}(s)\\C_{1di}(s) & \hspace{-1mm}D_{11di}(s) & \hspace{-1mm}D_{12di}(s)& \hspace{-1mm}E_{1di}(s)\\C_{2di}(s) &\hspace{-1mm}D_{21di}(s) & \hspace{-1mm}D_{22di}(s)& \hspace{-1mm}E_{2di}(s)} \bmat{x(t+s)\\w(t+s)\\u(t+s)\\ \dot x(t+s)}\hspace{-1mm}ds\label{eqn:NDS}\\[-8mm]\notag \end{align} \textbf{The Class of ODE-PDE Systems:}\vspace{-3mm} \begin{align} \bmat{\dot{x}(t)\\ z(t)\\y(t)\\ \phi_i(t,0)}&=\bmat{A_0 & B_1& B_2\\C_1 &D_{11}&D_{12}\\C_2&D_{21}&D_{22}\\C_{ri}&B_{r1i}&B_{r2i}}\bmat{x(t)\\w(t)\\u(t)}+\bmat{B_v\\D_{1v}\\D_{2v}\\D_{rvi}} v(t) \notag\\ \dot \phi_i(t,s)&=\frac{1}{\tau_i}\phi_{i,s}(t,s),\qquad v(t)=\sum_{i=1}^K C_{vi} \phi_i(t,-1)+\sum_{i=1}^K \int_{-1}^0\hspace{-1.5mm}\tau_iC_{vdi}(\tau_i s) \phi_i(t, s)ds\label{eqn:ODEPDE}\\[-8mm]\notag \end{align} \textbf{The Class of Partial Integral Equation (PIE) Systems:}\vspace{-3mm} \begin{align} \mcl T \dot{\mbf x}(t)+\mcl B_{T_1}\dot w(t)+\mcl B_{T_2}\dot u(t)&=\mcl A\mbf x(t)+\mcl B_1w(t)+\mcl B_2u(t)\notag\\ \hspace{-1cm}z(t)=\mcl C_1\mbf x(t)+\mcl D_{11}w(t)&+\mcl D_{12}u(t),\notag\\ \hspace{-1cm}y(t)=\mcl C_2\mbf x(t)+\mcl D_{21}w(t)&+\mcl D_{22}u(t)\label{eqn:PIE}\\[-7mm]\notag \end{align} \caption{Formulation of the DDE, DDF, NDS, ODE-PDE, and PIE Representations of Systems with Delay}\label{fig:representation}. \vspace{4mm} \end{figure*} Finally, we emphasize that this paper does not advocate for any particular time-domain representation (we do not consider the literature on analysis and control in the frequency domain), be it the DDE, DDF, ODE-PDE, or PIE formulation, and does not propose any new algorithms for analysis and control of delay systems per se. Rather, the purpose of this document is to serve as a guide to representation of delay systems in each framework. Specifically, for each representation, we: state the most general form of each representation - allowing for delays in input, output, process and state; define a notion of solution in each case; provide formulae for conversion between representations under which solutions are equivalent; and briefly list advantages and limitations of the representation as applied to network models of the form of Eqn.~\eqref{eqn:network1}. As discussed in the conclusions, these results can be used to establish notions of stability which are equivalent in all representations and to allow for conversion of optimal controllers and estimators between representations. While subsets of the DDF and ODE-PDE representations of delay systems can be found in the literature~\cite{bensoussan_book,gu_2010,pepe_2008,mazenc_2013,pepe_2008b,gu_2003,niculescu_book}, and some of these equivalences are known~\cite{karafyllis_2014,richard_2003}, previous works do not: consider all input-output signals and sources of delay; include PIEs; compare the relative advantages of the models as applied to networks; or provide formulae for conversion between representations. This guide, then, may be used as a convenient source of information for researchers interested in either selection of a representation or conversion of a representation to an alternative format. For convenience and comparison, all representations are listed in Figure~\ref{fig:representation}. All conversion formulae are listed in Figures~\ref{fig:formulae1} and~\ref{fig:formulae2}. Finally, note that all proofs have been omitted, but are included in the extended version of this paper on Arxiv~\cite{peet_2020arxiv_TDS}.\vspace{-4mm} \paragraph*{Notation} $I_n$ is the identity matrix in $\mathbb{R}^{n\times n}$, $e_i$ is the $i^{th}$ canonical unit vector, $\mbf 1_n$ is the dimension $n$ vector of all ones. $0_{n,m}$ is the zero matrix of dimension $\mathbb{R}^{n\times m}$ and $W^{n,2}[X]$ is the nth-order Sobolev subspace of $L_2$[X].\vspace{-4mm} \section{The DDE Representation}\label{sec:DDE}\vspace{-3mm} We begin by defining the signals in the Delay-Differential Equation (DDE) representation:\vspace{-3mm} \begin{itemize} \item The present state $x(t)\in \mathbb{R}^n$ \item The disturbance or exogenous input, $w(t)\in \mathbb{R}^m$ \item The controlled input, $u(t)\in \mathbb{R}^p$ \item The regulated or external output, $z(t)\in \mathbb{R}^q$ \item The observed or sensed output, $y(t)\in \mathbb{R}^r$\vspace{-2mm} \end{itemize} For convenience, we combine all sources of delay (state, input, output, process) into a single set of delays $\{\tau_i\}_{i=1}^K$ with $0<\tau_1<\cdots<\tau_K$. For given $u\in L_2^p$, $w \in L_2^m$, and initial condition $x_0\in W^{1,2}[-\tau_K,0]^n$, we say that $x:[-\tau_K,\infty]\rightarrow \mathbb{R}^n$, $z:[0,\infty]\rightarrow \mathbb{R}^q$, and $y:[0,\infty]\rightarrow \mathbb{R}^r$ satisfy the DDE defined by $\{A_{i}, B_i,C_i,D_{ij},\cdots\}$ if $x$ is differentiable on $[0,\infty]$ (from the right at $t=0$), $x(s)=x_0(s)$ for $s \in [-\tau_K,0]$, and Eqns.~\eqref{eqn:DDE} are satisfied for all $t \ge 0$. If any $B_{1i},D_{11i},D_{21i}\neq0$, we require $w\in W^{1,2}[0,\infty]^m$ and $w(s)=0$ for $s \le 0$. If any $B_{2i},D_{12i},D_{22i}\neq0$, we require $u\in W^{1,2}[0,\infty]^p$ and $u(s)=0$ for $s \le 0$. Under the conditions stated above, existence of a classical continuously differentiable solution $x$ is guaranteed as in, e.g. Thm. 3.3 of Chapter 3 in~\cite{kolmanovskii_book} (See also Thm.~1.1 of Chapter 6 in~\cite{hale_book}). Note that the dimensions of all matrices in this representation can be inferred from the dimension of the respective state and signals.\vspace{-4mm} \subsection{Advantages of the DDE Formulation}\vspace{-3mm} The DDE formulation is the prima facie modeling tool for systems with delay and as such is used in almost all network models. The DDE representation has a clear and intuitive meaning. Furthermore, most algorithms and analysis tools are built for this representation. Specifically, Lyapunov-Krasovskii and Lyapunov-Razumikhin stability tests are naturally formulated in this framework. However, the DDE does not allow for the representation of difference equations and does not allow us to identify which of the states and inputs are delayed by which amount. For this reason, we consider next the DDF representation.\vspace{-4mm} \section{The DDF Representation}\label{sec:DDF}\vspace{-3mm} A generalization of the DDE is the Differential-Difference (DDF) formulation. In addition to the signals included in the DDE, the DDF adds the following. \begin{itemize} \item The items stored in the signal $r_i(t)\in \mathbb{R}^{p_i}$ are the parts of $x$, $w$, $u$, $v$ which are delayed by amount $\tau_i$. The $r_i$ are the infinite-dimensional part of the system. \item The ``output'' signal $v(t)\in \mathbb{R}^{n_v}$ extracts information from the infinite-dimensional signals $r_i$ and distributes this information to the state, sensed output, and regulated output. This information can also be re-delayed by feeding back directly into the $r_i$. \end{itemize} The governing equations may now be represented in the more compact form of Eqns.~\eqref{eqn:DDF}. For given $u\in L_2^p$, $w \in L_2^m$, and initial conditions $x_0\in \mathbb{R}^n$, $r_{i0} \in W^{1,2}[-\tau_i,0]^{p_i}$ satisfying the ``sewing condition''\vspace{-2mm} \begin{align*} &r_{i0}(0)=C_{ri}x_0\\ &+D_{rvi}\left(\sum_{i=1}^K C_{vi} r_{i0}(-\tau_i)+\sum_{i=1}^K \int_{-\tau_i}^0C_{vdi}(s) r_{i0}(s)ds\right)\\[-7mm] \end{align*} for $i=1,\cdots,K$, we say that $x:[0,\infty]\rightarrow \mathbb{R}^n$, $z:[0,\infty]\rightarrow \mathbb{R}^q$, $y:[0,\infty]\rightarrow \mathbb{R}^r$, $r_i:[-\tau_i,\infty]\rightarrow \mathbb{R}^{p_i}$ for $i=1,\cdots,K$, and $v:[0,\infty]\rightarrow \mathbb{R}^{n_v}$ satisfy the DDF defined by $\{A_{i}, B_i,C_i,D_{ij},\cdots\}$ if $x$ is differentiable on $[0,\infty]$, $r_i(s)=r_{i0}(s)$ for $s \in [-\tau_i,0]$, $r_i(t+\cdot) \in W^{1,2}[-\tau_i,0]$ for $i=1,\cdots,K$, and Eqns.~\eqref{eqn:DDF} are satisfied for all $t \ge 0$. In this manuscript, we assume the $C_{vdi}$ are bounded and in the case where $B_{r1i}\neq 0$ or $B_{r2i}\neq 0$, we require $w\in W^{1,2}[0,\infty]^m$ with $w(s)=0$ for $s\le0$ or $u\in W^{1,2}[0,\infty]^p$ with $u(s)=0$ for $s\le0$, respectively. Under the conditions stated above, existence of a classical solution $x,r_i,v$ is guaranteed as in~\cite{hale_book}, Chapter 9, Thm.~1.1. Furthermore, the ``sewing condition'' and constraints on $w$ and $u$ ensure the solution $r_i$ is continuously differentiable as in~\cite{gil_book} p. 226; or~\cite{kolmanovskii_book}, Thms. 3.1 and 5.4. Note also that the condition $r_i(t+\cdot) \in W^{1,2}$ may be relaxed to continuity as treated in~\cite{henry_1974}.\vspace{-4mm} \subsection{DDEs are a special case of DDFs}\vspace{-3mm} Although Eqns.~\eqref{eqn:DDF} are more compact, they are more general than the DDEs in~\eqref{eqn:DDE}. Specifically, if we use the conversion formula defined in Eqn.~\eqref{eqn:DDEtoDDF}, then the solution to the DDF is also a solution to the DDE and vice-versa.\vspace{-2mm} \begin{lem} Suppose that $C_{vi}$, $C_{vdi}$, $C_{ri}$, $B_{r1i}$ , $B_{r1i}$, $D_{rvi}$, $B_v$, $D_{1v}$, and $D_{2v}$ are as defined in Eqns.~\eqref{eqn:DDEtoDDF}. Given $u$, $w$, $x_{0}$, the functions $x$, $y$, and $z$ satisfy the DDE defined by $\{A_{i}, B_i,C_i,D_{ij},\cdots\}$ if and only if $x$, $y$, $z$, and $r_i$ satisfy the DDF defined by $\{A_{i}, B_i,C_i,D_{ij},\cdots\}$ where\vspace{-2mm} \[ r_i(t)={\bmat{x(t)\\w(t)\\u(t)}},\quad r_{i0}={\bmat{x_0\\0\\0}}\qquad i=1,\cdots,K.\vspace{-6mm} \] \end{lem} \subsection{Neutral-Delay Systems (NDSs) are a special case of DDFs}\vspace{-3mm} DDFs are a natural extension of NDSs, which have the general form of Eqn.~\eqref{eqn:NDS} where for simplicity, we assume $x(t),w(t),u(t)=0$ for all $t\le 0$. The conversion from NDS to DDF is given in Eqn.~\eqref{eqn:NDStoDDF}. \begin{lem} Suppose that $C_{vi}$, $C_{vdi}$, $C_{ri}$, $B_{r1i}$ , $B_{r2i}$, $D_{rvi}$, $B_v$, $D_{1v}$, and $D_{2v}$ are as defined in Eqns.~\eqref{eqn:NDStoDDF}. Given $u$, $w$, the functions $x$, $y$, and $z$ satisfy the NDS defined by $\{A_{i}, B_i,C_i,D_{ij},\cdots\}$ if and only if $x$, $y$, $z$, $v$ and $r_i$ satisfy the DDF defined by $\{A_{i}, B_i,C_i,D_{ij},\cdots\}$ where $r_{i0}=0$ and \vspace{-2mm} \[ r_i(t)={\bmat{x(t) \\w(t)\\u(t)\\ \dot x(t)}}, \qquad i=1,\cdots,K.\vspace{1mm} \] and\vspace{-5mm} {\small\begin{align*} &v(t)=\sum_{i=1}^K \bmat{A_i & B_{1i} & B_{2i}& E_i\\C_{1i}& D_{11i} & D_{12i} & C_{1ei}\\C_{2i} & D_{21i} & D_{22i}&C_{2ei}} \bmat{x(t-\tau_i)\\w(t-\tau_i)\\u(t-\tau_i)\\ \dot x(t-\tau_i)}+\\ &\sum_{i=1}^K \int\limits_{-\tau_i}^0\bmat{A_{di}(s) & B_{1di}(s) &B_{2di}(s)& E_{di}(s)\\C_{1di}(s) & D_{11di}(s) & D_{12di}(s)& C_{1dei}(s)\\C_{2di}(s) &D_{21di}(s) & D_{22di}(s)& C_{2dei}(s)} \bmat{x(t+s)\\w(t+s)\\u(t+s)\\ \dot x(t+s)}ds. \end{align*}} \end{lem} \subsection{Advantages of the DDF Representation}\vspace{-3mm} The first advantage of the DDF is that it may include difference equations. To illustrate, suppose we set all matrices to zero except $D_{rvi}$ and $C_{vi}$. Then we have the following set of Difference Equations (DEs)\vspace{-2mm} \[ r_i(t)=\sum\nolimits_{j=1}^K D_{rvi}C_{vj} r_j(t-\tau_j)\qquad i=1,\cdots,K.\vspace{-2mm} \] Another example of DEs can be found in Subsec.~\ref{subsec:UAV_SOF}, where we provide a model of network control which can be represented as a DDF, but not a DDE. A related advantage of the DDF is the ability of DDFs to generate discontinuous solutions if the ``sewing condition'' on initial conditions is relaxed. This ability is not inherited using our formulation of ODE-PDE or PIE. The second advantage of the DDF occurs when the delayed channels only include subsets of the state. For example, if the matrices $A_i$ have low rank (ignoring input and disturbance delay), then $A_{i}=\tilde A_{i}\hat A_i$ for some $\hat A_i$, $\tilde A_i$ where $\hat A_i \in \mathbb{R}^{l_i \times n}$ with $l_i<n$ and we may choose $C_{vi}=\tilde A_{i}$ and $C_{ri}=A_i$. The dimension of $r_i(t)$ now becomes $\mathbb{R}^{l_i}$. This decomposition may be used to reduce complexity in the DDF formulation if $l_i<n$. This reduction is illustrated in detail using the UAV network model in Subsec.~\ref{subsec:UAV_DDF} and the temperature control network in Sec.~\ref{sec:temp}. A disadvantage of the DDF is that fewer tools are available for analysis and control of DDFs. This is partially because the class of DDFs is larger than the DDEs and thus the tools must be more general. However, we do note that versions of both the Lyapunov-Krasovskii (\cite{gu_2010}) and Lyapunov-Razumikhin (\cite{zhang1998new}) stability tests have been formulated in the DDF framework.\vspace{-3mm} \section{The Coupled ODE-PDE Representation}\label{sec:ODEPDE}\vspace{-3mm} We next consider the coupled ODE-PDE representation. Widely recognized as a physical interpretation of delay systems~\cite{richard_2003,hale_book}, ODE-PDE representations allow us to use backstepping methods originally developed for control of PDE models and which have recently been extended to systems with delay - See~\cite{krstic_2008,zhu_2015,karafyllis_2014}. The particular class of ODE-PDE systems, as given in Eqn.~\eqref{eqn:ODEPDE}, is equivalent to the class of DDFs. Since we have shown that DDEs are a special case of DDFs, we present only the conversion between DDF and ODE-PDE. Such conversion is trivial, however, as all matrices in the following ODE-PDE model are the same ones used to define the DDF. For given $u\in L_2^p$, $w \in L_2^m$, and initial conditions $x_0 \in \mathbb{R}^n$, $\phi_{i0} \in W^{1,2}[-1,0]^{p_i}$ satisfying the ``sewing condition''\vspace{-2mm} \begin{align} &\phi_{i0}(0)=C_{ri}x_0\label{eqn:sewing_PDE}\\ &+D_{rvi}\left(\sum_{i=1}^K C_{vi} \phi_{i0}(-1)+\sum_{i=1}^K \int_{-1}^0\tau_iC_{vdi}(\tau_i s) \phi_{i0}(s)ds\right)\notag\\[-6mm] \notag \end{align} for $i=1,\cdots,K$, we say that $x:[0,\infty]\rightarrow \mathbb{R}^n$, $z:[0,\infty]\rightarrow \mathbb{R}^q$, $y:[0,\infty]\rightarrow \mathbb{R}^r$, $\phi_i(t) \in W^{1,2}[-1,0]^{p_i}$ for $i=1,\cdots,K$, and $v:[0,\infty]\rightarrow \mathbb{R}^{n_v}$ satisfy the ODE-PDE defined by $\{A_{i}, B_i,C_i,D_{ij},\cdots\}$ if $x$ is differentiable and $\phi_i$ is Fr\'echet differentiable on $[0,\infty]$, $x(0)=x_0$, $\phi_i(0,s)=\phi_{i0}(s)$ for $s \in [-1,0]$ for $i=1,\cdots,K$, and Eqns.~\eqref{eqn:ODEPDE} are satisfied for all $t \ge 0$. As for the DDF, if $B_{r1i}\neq 0$ or $B_{r2i}\neq 0$, we require $w\in W^{1,2}$ or $u\in W^{1,2}$, respectively.\vspace{-2mm} In Eqns.~\eqref{eqn:ODEPDE}, the infinite-dimensional part of the state is $\phi_i$ - which represents a pipe through which information is flowing. Our formulation is somewhat atypical in that we have scaled all the pipes to have unit length and accelerated or decelerated flow through the pipes according to the desired delay. Solutions to Eqns.~\eqref{eqn:ODEPDE} and Eqns.~\eqref{eqn:DDF} are equivalent, as in the following lemma.\vspace{-2mm} \begin{lem} Suppose for given $u$, $w$, $r_{i0}$, that $x$, $r_i$, $v$, $y$, and $z$ satisfy the DDF defined by $\{A_{i}, B_i,C_i,D_{ij},\cdots\}$. Then for $u$, $w$, $\phi_{i0}(s)=r_{i0}(\tau_i s)$, we have that $x$, $v$, $y$, and $z$ also satisfy the ODE-PDE defined by $\{A_{i}, B_i,C_i,D_{ij},\cdots\}$ with $ \phi_i(t,s)=r_i(t+\tau_i s)$. Similarly, for given $u$, $w$, $\phi_{i0}$, if $x$, $v$, $y$, $\phi_i$ and $z$ satisfy the ODE-PDE defined by $\{A_{i}, B_i,C_i,D_{ij},\cdots\}$, then $x$, $v$, $y$, and $z$ satisfy the DDF with $r_i(t)=\phi_i(t,0)$ and $r_{i0}(s)=\phi_{i0}(s/\tau_i)$.\vspace{-3mm} \end{lem} \subsection{Advantages of the ODE-PDE Representation}\vspace{-3mm} In the ODE-PDE representation, the infinite-dimensional part of the state is $\mbf \phi(t) \in W^{1,2}[-1,0]^{\sum_i p_i}$. Significantly, by scaling the pipes (and ignoring the distributed delay), the ODE-PDE representation isolates the effect of the delay parameters to a single term - $\dot \phi_i(t,s)=\frac{1}{\tau_i}\phi_{i,s}(t,s)$. This feature makes it easier to understand the effects of uncertainty and time-variation in the delay parameter. Additionally, the ODE-PDE is the native representation used for recently developed backstepping methods for systems with delay, such as proposed in~\cite{krstic_2008,zhu_2015,karafyllis_2014} and use of the conversion formulae provided may allow these methods to be applied to solve a larger class of systems - including difference equations.\vspace{-4mm} \section{The PIE Representation}\label{sec:PIE}\vspace{-3mm} A Partial Integral Equation (PIE) has the form of Eqn.~\eqref{eqn:PIE}, where the operators $\mcl T, \mcl A, \mcl B_{i}, \mcl C_i, \mcl D_{ij}$ are Partial Integral (PI) operators and have the form\vspace{-2mm} \[ \left(\fourpi{P}{Q_1}{Q_2}{R_i}\bmat{x\\\mbf \Phi}\right)(s):= {\bmat{ Px + \int_{-1}^{0} Q_1(s)\mbf \Phi(s)ds\\ Q_2(s)x +\left(\mcl P_{\{R_i\}}\mbf \Phi\right)(s) }}\vspace{-2mm} \] where\vspace{-2mm} \begin{align*} &\left(\mcl{P}_{\{R_i\}}\mbf \Phi\right)(s):= \\ &R_0(s) \mbf \Phi(s) +\int_{-1}^s R_1(s,\theta)\mbf \Phi(\theta)d \theta+\int_s^0R_2(s,\theta)\mbf \Phi(\theta)d \theta.\\[-7mm] \notag \end{align*} For given $u\in L_2^p$, $w \in L_2^m$, and initial conditions $\mbf x_{0} \in \mathbb{R}^n \times L_2[-1,0]^{p}$, we say that $\mbf x(t) \in \mathbb{R}^n \times L_2[-1,0]^p$, $z:[0,\infty]\rightarrow \mathbb{R}^q$, $y:[0,\infty]\rightarrow \mathbb{R}^r$ satisfy the PIE defined by $\{\mcl T, \mcl A, \mcl B_i, \mcl C_i, \mcl D_{ij},\mcl B_{T_i}\}$ if $\mbf x$ is Fr\'echet differentiable on $[0,\infty]$, $\mbf x(0)=\mbf x_{0}$ and Eqns.~\eqref{eqn:PIE} are satisfied for all $t \ge 0$. As for the ODE-PDE, if $\mcl B_{T_1}\neq 0$ or $\mcl B_{T_2}\neq 0$ we require $w\in W^{1,2}$ or $u\in W^{1,2}$, with $w(0)=0$ or $u(0)=0$, respectively.\vspace{-2mm} Heretofore, we have shown that the DDE is a special case of the DDF, which is equivalent to a coupled ODE-PDE, where coupling occurs at the boundary. Given a DDF or ODE-PDE representation, it is relatively straightforward to convert to a PIE by defining the operators $\mcl T, \mcl A, \mcl B_i, \mcl C_i, \mcl D_{ij},\mcl B_{T_i}$ for which solutions to Eqns.~\eqref{eqn:PIE} also define solutions to Eqns.~\eqref{eqn:DDF} (DDF) and Eqns.~\eqref{eqn:ODEPDE} (ODE-PDE). Specifically, let us define $\{\mcl T, \mcl A, \mcl B_i, \mcl C_i, \mcl D_{ij},\mcl B_{T_i}\}$ as in Eqn.~\eqref{eqn:PIE_ops} where the required matrices are as defined in Eqns.~\eqref{eqn:PIE_Mats}. Then we have the following.\vspace{-2mm} \begin{figure*}[!t] \textbf{Conversion Formula from DDE to DDF:}\vspace{-4mm} \begin{equation} \bmat{B_v\\D_{1v}\\D_{2v}}=I,\; C_{vi}=\bmat{A_i & B_{1i} &B_{2i}\\C_{1i} & D_{11i}&D_{12i} \\ C_{2i} & D_{21i}&D_{22i}},\; C_{vdi}(s)=\bmat{A_{di}(s) & B_{1di}(s) &B_{2di}(s)\\C_{1di}(s) & D_{11di}(s)&D_{12di}(s) \\ C_{2di}(s) & D_{21di}(s)&D_{22di}(s)},\; D_{rvi}=0,\; \bmat{C_{ri}&B_{r1i}&B_{r2i}}=I.\label{eqn:DDEtoDDF} \end{equation} \textbf{Conversion Formula from NDS to DDF:}\vspace{-3mm} \begin{align} &D_{rvi}=\bmat{0&0&0\\0&0&0\\0&0&0\\I&0&0},\qquad \bmat{C_{ri}&B_{r1i}&B_{r2i}}=\bmat{I_n & 0& 0\\ 0&I_m&0\\0&0&I_p\\A_0 & B_1 & B_2} ,\qquad\bmat{B_v\\D_{1v}\\D_{2v}}=I_{n+q+r},\notag\\[-0mm] &C_{vi}= \bmat{A_i & B_{1i} & B_{2i}& E_i\\C_{1i}& D_{11i} & D_{12i} & E_{1i}\\C_{2i} & D_{21i} & D_{22i}&E_{2i}},\quad C_{vdi}(s)=\bmat{A_{di}(s) & B_{1di}(s) &B_{2di}(s)& E_{di}(s)\\C_{1di}(s) & D_{11di}(s) & D_{12di}(s)& E_{1di}(s)\\C_{2di}(s) &D_{21di}(s) & D_{22di}(s)& E_{2di}(s)}\label{eqn:NDStoDDF}\\[-6mm]\notag \end{align} \textbf{Conversion Formula from ODE-PDE or DDF to PIE:}\vspace{-3mm} \begin{align} \mcl A&=\fourpi{\mbf A_0}{\mbf A}{0}{I_\tau,0,0},& \mcl T&=\fourpi{I}{0}{\mbf T_0}{0,\mbf T_a,\mbf T_b}, & \mcl B_{T_1}&=\fourpi{0}{\emptyset}{\mbf T_{1}}{\emptyset}, &\mcl B_{T_2}&=\fourpi{0}{\emptyset}{\mbf T_{2}}{\emptyset},&\notag\\ \mcl B_1&=\fourpi{\mbf B_1}{\emptyset}{0}{\emptyset}, &\mcl B_2&=\fourpi{\mbf B_2}{\emptyset}{0}{\emptyset},& \mcl C_1&=\fourpi{\mbf C_{10}}{\hspace{2mm}\mbf C_{11}}{\emptyset}{\emptyset},& \mcl C_2&=\fourpi{\mbf C_{20}}{\hspace{2mm}\mbf C_{21}}{\emptyset}{\emptyset},&\mcl D_{ij}=\fourpi{\mbf D_{ij}}{\emptyset}{\emptyset}{\emptyset}\label{eqn:PIE_ops}\\[-7mm] \notag \end{align} where\vspace{-3mm} \begin{align} &\hat C_{vi}=C_{vi} +\int_{-1}^0 \tau_iC_{vdi}(\tau_i s)ds,\qquad D_I=\left(I_{n_v}-\left(\sum_{i=1}^K \hat C_{vi}D_{rvi}\right)\right)^{-1},\quad C_{Ii}(s)=-D_I\left(C_{vi} +\tau_i\int_{-1}^sC_{vdi}(\tau_i \eta) d \eta \right)\notag\\ &\bmat{\mbf T_0 & \mbf T_1 &\mbf T_2}=\bmat{C_{r1} & B_{r11} & B_{r21}\\ \vdots&\vdots&\vdots \\C_{rK} & B_{r1K}& B_{r2K}} +\bmat{D_{rv1} \\ \vdots \\D_{rvK} }\bmat{C_{vx}&D_{vw}&D_{vu}},\quad \bmat{C_{vx}&D_{vw}&D_{vu}}=D_I\sum_{i=1}^K \hat C_{vi} \bmat{C_{ri}&B_{r1i}&B_{r2i}}\notag\\ &\mbf T_{a}(s,\theta)=\bmat{D_{rv1} \\ \vdots \\ D_{rvK}}\bmat{C_{I1}(\theta)&\cdots&C_{IK}(\theta)},\qquad \mbf T_{b}(s,\theta)=-I_{\sum_i p_i}+\mbf T_{a}(s,\theta),\quad I_\tau=\bmat{\frac{1}{\tau_1}I_{p_1}&&\\&\ddots&\\&&\frac{1}{\tau_K}I_{p_K}}, \notag\\ &\bmat{\mbf A(s)\\\mbf C_{11}(s) \\\mbf C_{21}(s) }=\bmat{B_v\\D_{1v} \\D_{2v} }\bmat{C_{I1}(s)&\cdots&C_{IK}(s)},\; \bmat{ \mbf A_0&\mbf B_1&\mbf B_2\\ \mbf C_{10}&\mbf D_{11}&\mbf D_{12}\\ \mbf C_{20}&\mbf D_{21}&\mbf D_{22}}=\bmat{ A_0& B_1&B_2\\ C_{10}& D_{11}& D_{12}\\ C_{20}&D_{21}& D_{22}}+\bmat{B_v\\D_{1v} \\D_{2v} }\bmat{C_{vx}&D_{vw}&D_{vu}}.\label{eqn:PIE_Mats}\\[-8mm]\notag \end{align} \caption{Conversion formulae from DDE to DDF, NDS to DDF, and DDF/ODE-PDE to PIE}\label{fig:formulae1} \hrulefill \end{figure*} \begin{lem} Given $u$, $w$, and $x_0$, $\phi_{i0}$ satisfying the ``Sewing Condition~\eqref{eqn:sewing_PDE}'', Suppose $x$, $\phi_i$, $v$, $y$, and $z$ satisfy the ODE-PDE defined by $\{A_{i}, B_i,C_i,D_{ij},\cdots\}$. Then $y$ and $z$ also satisfy the PIE defined by $\{\mcl T, \mcl A, \mcl B_i, \mcl C_i, \mcl D_{ij},\mcl B_{T_i}\}$ with $\mcl T, \mcl A, \mcl B_i, \mcl C_i, \mcl D_{ij},\mcl B_{T_i}$ as defined in Eqn.~\eqref{eqn:PIE_ops} and\vspace{-3mm} \[ \mbf x(t):={\bmat{x(t)\\ \partial_s \phi_{1}(t,\cdot)\\\vdots\\ \partial_s \phi_{K}(t,\cdot)}}\quad \mbf x_0:={\bmat{x_0\\ \partial_s \phi_{10}\\ \vdots \\ \partial_s \phi_{K0}}}.\vspace{-3mm} \] Furthermore, for given $u$, $w$, $\mbf x_{0} \in \mathbb{R}^n \times L_2[-1,0]^p$, if $y$, $z$ and $\mbf x$ satisfy the PIE defined by $\{\mcl T, \mcl A, \mcl B_i, \mcl C_i, \mcl D_{ij},\mcl B_{T_i}\}$, then $x$, $\phi_i$, $v$, $y$, and $z$ satisfy the ODE-PDE defined by $\{A_{i}, B_i,C_i,D_{ij},\cdots\}$ where\vspace{-2mm} \[ {\bmat{x(t)\\ \phi_{1}(t,\cdot)\\\vdots\\ \phi_{K}(t,\cdot)}}=\mcl T \mbf x(t)+\mcl B_{T1}w(t)+\mcl B_{T2}u(t),\; {\bmat{x_0\\ \phi_{10}\\\vdots\\ \phi_{K0}}}=\mcl T \mbf x_0.\vspace{-4mm} \] \end{lem} Note that while solutions of the ODE-PDE are equivalent to those of the PIE, some notions of stability of such solutions may not be. \vspace{-4mm} \subsection{Advantages of the PIE Representation} \vspace{-3mm} Like the DDF and ODE-PDE, PIEs can be used to represent low-dimensional delay channels. An additional advantage is the lack of boundary conditions or the `sewing' constraint on the initial condition in, e.g. Eqn.~\eqref{eqn:sewing_PDE}. This is significant in that the implicit dynamics in an ODE-PDE imposed by boundary conditions on $\phi_i$ complicate stability and optimal control problems. By contrast, in PIEs, the infinite-dimension part of the state is $\partial_s \phi_i$ which is in $L_2$ but is otherwise unconstrained. Furthermore, PIEs are defined using the algebra of Partial Integral (PI) operators. The algebraic nature of PI operators implies that most tools developed for matrices can be extended to PIEs - including the LMI framework. Specifically, the LMIs for $H_{\infty}$-optimal observer and controller synthesis have been extended to PIEs, as can be found in~\cite{shuangshuang_2020CDC} and~\cite{shivakumar_2020CDC}, respectively. We refer to Linear PI Inequalities (LPIs) as this extension of the LMI framework and a Matlab toolbox for solving LPIs can be found in~\cite{shivakumar_2020ACC}. An example of these synthesis results can be found in Sec.~\ref{sec:temp}.\vspace{-3mm} \subsection{Conversion from DDE to PIE}\label{subsec:DDEtoPIE}\vspace{-3mm} In this subsection, we bypass the DDF and give a formula for direct conversion between the DDE and PIE representations. This formula is given in Eqns.~\eqref{eqn:DDEtoPIE}.\vspace{-4mm} \begin{figure*}[!t] \textbf{Conversion Formula from DDE to PIE:} $\mcl T, \mcl A, \mcl B_i, \mcl C_i, \mcl D_{ij},\mcl B_{T_i}$ are as defined in Eqn.~\eqref{eqn:PIE_ops} where now \begin{align} &I_\tau=\bmat{\frac{1}{\tau_1}I_{n+m+p}&&\\&\ddots&\\&&\frac{1}{\tau_K}I_{n+m+p}},\; \mbf T_0={\scriptsize \bmat{\bmat{I_{n}&0_{n,m} & 0_{n,p}}^T\\ \vdots \\\bmat{I_{n}&0_{n,m} & 0_{n,p}}^T}},\; \mbf T_1={\scriptsize \bmat{\bmat{0_{m,n}&I_{m} & 0_{m,p}}^T\\ \vdots \\\bmat{0_{m,n}&I_{m} & 0_{m,p}}^T}},\; \mbf T_2={\scriptsize \bmat{\bmat{0_{p,n}&0_{p,m} & I_{p}}^T\\ \vdots \\\bmat{0_{p,n}&0_{p,m} & I_{p}}^T}},\notag\\ &\mbf T_a=0_{(n+m+p)K},\quad \mbf T_b=-I_{(n+m+p)K},\notag \\ &\bmat{\mbf A(s) \\ \mbf C_{11}(s) \\ \mbf C_{21}(s)}=-\bmat{X_{1}(s)&\cdots&X_{K}(s) },\; \qquad X_{i}(s)=\bmat{A_i & B_{1i} &B_{2i} \\C_{1i} & D_{11i}&D_{12i} \\ C_{2i} & D_{21i}&D_{22i}} +\tau_i\int_{-1}^s\bmat{A_{di}(\tau_i\eta) & B_{1di}(\tau_i\eta) &B_{2di}(\tau_i\eta) \\ C_{1di}(\tau_i\eta) & D_{11di}(\tau_i\eta)&D_{12di}(\tau_i\eta)\\C_{2di}(\tau_i\eta) & D_{21di}(\tau_i\eta)&D_{22di}(\tau_i\eta) } d \eta,\notag\\ &\bmat{ \mbf A_0&\mbf B_1&\mbf B_2\\ \mbf C_{10}&\mbf D_{11}&\mbf D_{12}\\ \mbf C_{20}&\mbf D_{21}&\mbf D_{22}}=\bmat{ A_0& B_1&B_2\\ C_{10}& D_{11}& D_{12}\\ C_{20}&D_{21}& D_{22}}+\sum_{i=1}^K\bmat{ A_{i}& B_{1i}&B_{2i}\\ C_{1i}& D_{11i}& D_{12i}\\ C_{2i}&D_{21i}& D_{22i}}+\int_{-1}^0\sum_{i=1}^K \tau_i \bmat{ A_{di}(\tau_is)& B_{1di}(\tau_is)&B_{2di}(\tau_is)\\ C_{1di}(\tau_is)& D_{11di}(\tau_is)& D_{12di}(\tau_is)\\ C_{2di}(\tau_is)&D_{21di}(\tau_is)& D_{22di}(\tau_is)}ds\label{eqn:DDEtoPIE}\\[-8mm]\notag \end{align} \caption{Direct conversion formula from DDE to PIE, bypassing the DDF.}\label{fig:formulae2} \hrulefill \end{figure*} \section{Modeling of a Network of UAVs}\label{sec:UAV} \vspace{-3mm} To compare the DDE, DDF, ODE-PDE and PIE representations, we return to control of a network of UAVs. In this section, we focus on the DDE and DDF representations, as conversion from DDF to ODE-PDE or PIE is straightforward using the formulae provided. For simplicity, we eliminate the state delays $\hat \tau_{ij}$ governing interactions between UAVs (we will consider state delays in Sec.~\ref{sec:temp}) and map the process, input, and output delays to a common set of delays, $\{\tau_j\}_{j=1}^{3N}$ where the index for the process delay for UAV $i$ is as $\tau_i=\bar \tau_i$, the index for input delay for UAV $i$ is as $\tau_{N+i}=h_i$, and the index of the output delay from UAV $i$ is as $\tau_{2N+i}=\tilde \tau_i$. The process noise is dimension $w(t)\in\mathbb{R}^m$, the common input is dimension $u(t)\in\mathbb{R}^p$, all states are dimension $x_i(t) \in \mathbb{R}^n$ and the outputs are all dimension $y_i(t)\in\mathbb{R}^r$. In this case, we re-write the network model in Eqns.~\eqref{eqn:network1} as\vspace{-2mm} \begin{align*} \dot x_i(t)&=a_ix_i(t)+\sum\nolimits_{j=1}^N a_{ij} x_j(t)\\ &\hspace{2cm}+b_{1i}w(t- \tau_{i})+b_{2i} u(t-\tau_{N+i})\\ z(t)&=C_{1} x(t)+D_{12}u(t)\\ y_i(t)&=c_{2i} x_i(t-\tau_{2N+i})+d_{21i}w(t-\tau_{2N+i}).\\[-8mm] \end{align*} \subsection{The DDE Representation} \label{subsec:UAV_DDE}\vspace{-3mm} To model this network as a DDE, we consider Eqn.~\eqref{eqn:DDE} where $K=3N$ for a given $C_{10}$ and $D_{12}$. First, we define $A_0$ blockwise as\vspace{-3mm} \[ [A_0]_{ij}=\begin{cases} a_i, & i=j\\ a_{ij} & \text{otherwise} \end{cases}\vspace{-3mm} \] and define the following matrices blockwise for $i=1,\cdots,N$ as\vspace{-3mm} \begin{align*} &B_{1,i}= e_i \otimes b_{1i},\quad B_{2,N+i}=e_i \otimes b_{2i},\\ &C_{2,2N+i}= e_i \otimes c_{2i},\quad D_{21,2N+i}=e_i \otimes d_{2i}.\\[-6mm] \end{align*} All other undefined matrices in Eqn.~\eqref{eqn:DDE} are $0$. The DDE representation of the network has the obvious disadvantage that there are $3N$ delays and each delayed channel contains all states and inputs - yielding an aggregate delayed channel of size $\mathbb{R}^{3N(nN+m+p)}$.\vspace{-3mm} \subsection{The DDF Representation} \label{subsec:UAV_DDF}\vspace{-3mm} To efficiently model the network model as a DDF, we retain the matrix $A_0$ from the DDE model in Subsec.~\ref{subsec:UAV_DDE}, set $C_1=C_{10}$ and leave $D_{12}$ unchanged. Our first step is to define the vectors $r_i(t)$ and $v(t)$ using $B_{r1i}$, $B_{r2i}$,$C_{ri}$, $C_{vi}$, $B_v$, and $B_{2v}$ (all other matrices are $0$). The first $3$ sets of matrices are defined for $i=1,\cdots,N$ as $B_{r1,i}= b_{1i}$, $B_{r1,2N+i}= d_{21i}$, $B_{r2,N+i}=b_{2i}$, and $C_{r,2N+i}= c_{2i}$. We presume the UAV state dimensions ($n$) are less than the size of the aggregate input ($m$) and disturbance vectors ($p$) (i.e. $n<m$ and $n<p$). In this case it is preferable to delay only the part of the input and disturbance signals which affects each UAV. We now have the following definition for $r_i$ for $i=1,\cdots,3N$.\vspace{-2mm} \begin{align*} &r_i(t)=\\ &\begin{cases} b_{1i}w(t)& i\in [1,N]\\ b_{2,i-N}u(t)& i\in [N+1,2N]\\ c_{2,i-2N}x_{i-2N}(t)+d_{21,i-2N}w(t) & i\in [2N+1,3N]. \end{cases}\\[-5mm] \end{align*} Next, we construct output $v(t)$ by defining $C_{vi}$ for $i=1,\cdots,3N$ as $C_{vi}=e_i\otimes I_{p_i}$ which yields\vspace{-2mm} \[ v(t)=\bmat{r_1(t-\tau_1)^T& \cdots & r_{3N}(t-\tau_{3N})^T}^T.\vspace{-2mm} \] Finally, we feed $v(t)$ back into the dynamics using\vspace{-2mm} \[ B_v=\bmat{I&\cdots&I&I&\cdots&I&0},\; D_{2v}=\bmat{0&\cdots&0&0&\cdots&0&I},\vspace{-2mm} \] which recovers the network model.\vspace{-3mm} \subsection{Complexity of DDEs vs. DDFs} \vspace{-3mm} In the DDF model, the infinite-dimensional state is $r_i$. In our DDF formulation of the UAV model: each process delay adds $n$ states; each input delay adds $n$ states; and each output delay adds $r$ states to this vector. The aggregated infinite-dimensional state is then $L_2\text{\textasciicircum}(\sum_{p_i}=(2n+r)N)$. Assuming that optimal control and estimation problems are tractable when the number of infinite-dimensional states is less than 50~\cite{peet_2020SICON}, and if we suppose $n=r=1$, then it is possible to control $17$ UAVs. By contrast, in the DDE model of our UAVs, the infinite-dimensional state is $L_2^{3N(m+p+r)}$ ( meaning we can control at most $5$ or $6$ UAVs). \vspace{-4mm \section{A Network which is a DDF, but not a DDE}\label{subsec:UAV_SOF} \vspace{-3mm} In this subsection, we present a network model which can be represented using DDFs, ODE-PDEs, and PIEs, but not using DDEs. These models arise from the use of static feedback - i.e. $u(t)=Fy(t)$ where $y(t)$ is the concatenated vector of outputs from the UAVs. Note that $y$ may include measurement of all states (the static state feedback problem). In this example, let us ignore output, process and state delay, but retain input delay and add a term which models the impact of actuator input $u(t)$ on the sensors as\vspace{-2mm} \[ y_i(t)=c_{2i} x_i(t)+d_{21i}w(t)+d_{22i}u(t-\tau_i).\vspace{-2mm} \] Let $A_0$, $C_1$, $D_{12}$, $B_{2i}$, $C_{vi}$ be as defined in Subsec.~\ref{subsec:UAV_DDF} and define\vspace{-2mm} \[ B_{1}={\bmat{b_{11}\\ \vdots \\ b_{1N}}},\quad D_{21}={\bmat{d_{21,1}\\ \vdots \\ d_{21,N}}}\vspace{-2mm} \] \[ C_2=\mathop{\mathrm{diag}}(c_{2,1},\cdots,c_{2,N}),\quad D_{22i}=e_i \otimes d_{22i}.\vspace{-0mm} \] Aggregating the measurements, we have\vspace{-2mm} \[ y(t)=C_2 x(t)+D_{21}w(t)+\sum\nolimits_{i=1}^N D_{22i}u(t-\tau_i).\vspace{-2mm} \] Now, substituting $u(t)=Fy(t)$ into the sensed output term, we obtain solutions of the form\vspace{-2mm} \begin{align} \dot x(t)&=A_0x(t)+B_{1}w(t)+\sum\nolimits_{i=1}^N B_{2i} F y(t-\tau_i)\notag \\ z(t)&=C_{1} x(t)+D_{12}Fy(t)\label{eqn:UAV_SOF} \\ y(t)&=C_{2} x(t)+D_{21}w(t)+\sum\nolimits_{i=1}^N D_{22i}Fy(t-\tau_i).\notag\\[-8mm]\notag \end{align} Clearly, there is no DDE model with solutions which satisfy Eqns.~\eqref{eqn:UAV_SOF} due to the recursion in the output~\cite{henry_1974}. However (assuming appropriate initial conditions), these solutions can be constructed using the DDF (and consequently the ODE-PDE and PIE frameworks). To construct such a model, we define the following terms.\vspace{-2mm} \begin{align} \tilde D_{12}&=D_{12}F D_{21},\quad \tilde D_{22}=0,\quad \tilde C_1 =C_1+D_{12}F C_2\notag\\ C_{ri}&=FC_2, \qquad B_{r1i}=FD_{21}, \qquad [D_{rvi}]_i=FD_{22i}\notag\\ B_{v}&=\bmat{B_{21}& \cdots & B_{2N}},\quad C_{vi}=e_i \otimes I \notag \\ D_{1v}&=D_{12}F D_{2v},\quad D_{2v}=\bmat{D_{22,1}& \cdots & D_{22,N}}\label{eqn:UAV_SOF_defs}\\[-7mm] \notag \end{align} \begin{lem} For given $r_{i0}$, $x_0$, suppose $r_i$, $v$, $y$, $x$, and $z$ satisfy the DDF defined by\vspace{-2mm} \[ \{A_0,B_1,B_v,\tilde C_{1},\tilde D_{12},D_{1v},D_{2v},C_{ri},B_{r1i},D_{rvi},C_{vi}\}\vspace{-2mm} \] given by Eqns.~\eqref{eqn:UAV_SOF_defs}. Then $x$, $z$ and $y$ also satisfy Eqns.~\eqref{eqn:UAV_SOF}. \end{lem} \vspace{-5mm} \section{Optimal Control of a Large Network}\label{sec:temp} \vspace{-3mm} To illustrate the computational advantages of the DDFs, ODE-PDEs, and PIEs for controller synthesis problems, we consider the scalable network model with state-delay for centralized control of water temperature for multiple showering customers as defined in~\cite{peet_2020SICON}. If $T_{1i}$ is the tap position and $T_{2i}$ is the temperature for user $i$, then the dynamics of this model are given by\vspace{-2mm} \begin{align} &\dot T_{1i}(t)=T_{2i}(t)-w_i(t) \label{eqn:shower}\\ &\dot T_{2i}(t)=-\alpha_i \left( T_{2i}(t-\tau_i) -w_i(t)\right) \notag\\ &\qquad + \sum\nolimits_{j \neq i}^N \gamma_{ij} \alpha_{j}\left(T_{2j}(t-\tau_j)-w_j(t)\right) + u_i(t) \notag\\[-1mm] & z(t) = \bmat{\sum\nolimits_{i=1}^N T_{1i}(t)& .1 \sum\nolimits_{i=1}^N u_i(t)}^T.\notag\\[-6mm]\notag \end{align} For $N$ users, we choose $\alpha_i=1$, $\gamma_{ij}=1/N$, $\tau_i=i$, and $w_i(t)=N$.\vspace{-3mm} \subsubsection{DDE Formulation of the Network}\vspace{-3mm} In~\cite{peet_2020SICON}, we formed the aggregate state vector as\vspace{-2mm} \[ x(t)=\bmat{T_{11}(t)& \cdots & T_{1N}(t)& T_{21}(t)&\cdots &T_{2N}(t)}^T\vspace{-2mm} \] and defined the DDE model using\vspace{-2mm} \begin{align*} A_0&=\bmat{0_{N \times N} & I_N\\ 0_{N \times N} & 0_{N \times N}},\quad A_i=\bmat{0_{N \times N} & 0_{N \times N}\\ 0_{N \times N} & \hat A_i}\\ \hat A_i&=\Gamma*\mathop{\mathrm{diag}}(e_i)=\Gamma*\mathop{\mathrm{diag}}\left(\bmat{0_{1 \times i-1} &1& 0_{1 \times N-i}}\right)\\ B_1&=\bmat{-I_N\\-\Gamma},\;\qquad B_2=\bmat{0_{N \times N}\\I_N}\\ [\Gamma]_{ij}&=\begin{cases}\gamma_{ij}\alpha_j&i\neq j\\ -\alpha_i & i=j\end{cases} \qquad i,j=1,\cdots,N\\ C_{1}&=\bmat{\mbf 1_N^T&0_{1 \times N}\\0_{1 \times N}&0_{1 \times N}},\;\; D_{11}=\bmat{0_{2 \times N}},\; D_{12}=\bmat{0_{1 \times N}\\.1 \mbf 1_N^T}.\\[-8mm]\notag \end{align*} In this formulation, we have $n=2N$ states, $m=N$ disturbances, $p=N$ inputs, $q=2$ regulated outputs and $K=N$ delays ($\tau_{ij}=\tau_j$). Using the SOS-based $H_\infty$-optimal controller synthesis algorithm for DDEs as presented in~\cite{peet_2020SICON}, we were able to design controllers for $N=4$ users. This corresponds to an infinite-dimensional channel of size $L_2^{nK=32}$.\vspace{-3mm} \subsubsection{DDE Formulation of the problem}\vspace{-3mm} To construct the DDF formulation of the problem, $x(t)$ is unchanged. However, we now define the delayed channels as\vspace{-2mm} \[ r_i(t)=\bmat{0_{1\times N+i-1}&1&0_{1 \times N-i}}x(t)=T_{2i}(t).\vspace{-2mm} \] This is done by defining $C_{ri},B_{r1i}, B_{r2i} $ and $D_{rvi}$ as\vspace{-2mm} \begin{align*} C_{ri}&=\bmat{0_{1\times N+i-1}&1&0_{1 \times N-i}}\\ B_{r1i}&=0_{1 \times N}\;\qquad B_{r2i}=0_{1 \times N}\;\qquad D_{rvi}=0_{1 \times N}.\\[-7mm]\notag \end{align*} We would like the output of the delayed channels to be the delayed states as\vspace{-2mm} \[ v(t)=\bmat{T_{21}(t-\tau_1)&\cdots &T_{2N}(t-\tau_N)}^T.\vspace{-2mm} \] This is accomplished by defining\vspace{-2mm} \[ C_{vi}=e_i=\bmat{0_{1 \times i-1}&1&0_{1 \times N-i}}^T,\qquad C_{vdi}=0_{2\times N}.\vspace{-2mm} \] Finally, we retain $A_0,B_1,B_2,C_1,C_2,D_{11},D_{12}$ from the DDE formulation, and use $B_v$ and $D_{1v}$ to model how the delayed terms affect the state dynamics and output signal. \vspace{-2mm} \[ B_v=\bmat{0_{N \times N}\\ \Gamma},\qquad D_{1v}=0\vspace{-2mm} \] In the DDF formulation, we have $n=2N$ states, $m=N$ disturbances, $p=N$ inputs, $q=2$ regulated outputs, $K=N$ delays ($\tau_{ij}=\tau_j$), and $K$ delay channels, each of dimension $L_2^1$.\vspace{-2mm} \subsection{$H_\infty$-optimal Control Using PIETOOLS 2020a} \vspace{-2mm} For $H_\infty$-optimal controller synthesis, we used the DDF to PIE converter \texttt{convert\_PIETOOLS\_DDF} and $H_\infty$-optimal synthesis option in the PIETOOLS 2020a Matlab toolbox, as described in~\cite{shivakumar_2020ACC} and available online at~\cite{PIETOOLS_website}. The DDF system input format for this toolbox is described in detail in the user manual~\cite{PIETOOLS_website}, as is the converter and controller synthesis feature. In this toolbox, the extreme performance option was selected to decrease computation times and reduce memory usage. The $H_\infty$-optimal controller synthesis feature in PIETOOLS solves the optimal control problem for a PIE and is based on the result in~\cite{shivakumar_2020CDC}. The numerical test was performed on a desktop computer with 128GB RAM and a 3 GHz intel processor. CPU seconds is as listed for the interior-point calculations determined by Sedumi. The computation times, indexed by number of users, are listed in Table~\ref{tab:computation}. In all cases, the achieved closed-loop $H_\infty$-norm was in the interval $[.3,3]$. Practically, we observe that the controller synthesis problem is tractable up to 40 users - a significant improvement from the $4$ users in~\cite{peet_2020SICON}. Note that 40 users corresponds to an aggregated infinite-dimensional channel of size $L_2^{\sum_i p_i=N=40}$. Also recall that for 40 users, we have 80 states, 40 inputs, 40 disturbances and 40 delays. Note that the PIETOOLS 2020a toolbox does not require use of the PIE formulation and will convert a DDE to a DDF, if desired. There is also a feature for constructing minimal DDF representations of DDEs - which can be very useful for solving large network problems. The conversion from a NDS to DDF is also included in the PIETOOLS library \texttt{examples\_DDF\_library\_PIETOOLS.m}. \begin{table} \caption{IPM CPU sec vs. \# of states ($N$) for $H_\infty$ control of Eqn.~\eqref{eqn:shower}.} \label{tab:computation} \begin{center}{\scriptsize \begin{tabular}{c|c|c|c|c|c|c|c} {\hspace{-2mm}\tiny \text{N}$\rightarrow$ }\hspace{-2mm} & $1$ & $3$ & $5$ & $10$ & $20$ & $30$ & $40$\\ \hline \tiny \text{CPU sec} & .48 & .638 & 2.42 & 94.7 & 5455 & 35k&157k\\ \end{tabular}} \end{center} \end{table}\vspace{-3mm} \section{Conclusion}\vspace{-3mm} This paper summarizes four possible representations for systems with delay: the Delay-Differential Equation (DDE) form; The Differential Difference (DDF) form; the ODE-PDE form; and the Partial Integral Equation (PIE) form. Formulae are given for conversion between these representations, although direct conversion from DDE to DDF is not advised if the delayed channels are low-dimensional (although PIETOOLS 2020a includes a feature for constructing minimal DDF representations of DDEs). Using the given formulae and definitions of solution, we show that the set of solutions for the DDF, ODE-PDE, and PIE are equivalent. These results imply that if there is a valid conversion formula, many solutions to the $H_\infty$-optimal control and estimation problems can be converted between representations by applying this formula to the closed-loop system. However, this only works if optimality is defined in terms of the finite-dimensional vectors, $x_0,u,w,x,y,z$. This is because any input-output pair $(u,w,x_0) \mapsto (y,z,x)$ which defines a solution to one representation also defines a solution for every other representation for which there is a valid conversion formula. Likewise, stability of the representations is equivalent as long as the stability definition only involves the finite-dimensional vectors, $x_0,x,u,w,y,z$. The results and formulae in this paper are meant to provide a convenient reference for researchers interested in exploring alternative representations of delay systems. A summary of the representations and conversion formulae is given in Table~\ref{tab:summary}, along with examples of simulation tools and controller synthesis results. We have shown using an example of a network of UAVs that some networks cannot be modeled in the DDE formulation and that careful choice of representation can significantly reduce the complexity of the underlying analysis and control problems. Finally, we have shown that $H_\infty$-optimal control in the DDF/ODE-PDE/PIE framework allows up to 40 agents, while formulation in the DDE framework only allows for control of 4 agents. \begin{table}[hb] \caption{Conversion formulae (DDF,PDE,PIE), simulation tools (Sim), controller design tools ($H_\infty$), and model definitions (Model) for each class of systems (PDE$\rightarrow$ODE-PDE).} \label{tab:summary} \begin{center}{\scriptsize \begin{tabular}{c|c|c|c|c|c|c} {\hspace{-2mm}\tiny Need$\rightarrow$ }\hspace{-2mm} & DDF & PDE & PIE & Sim & $H_\infty$ &Model\\ \hline \tiny DDE & \eqref{eqn:DDEtoDDF} & \eqref{eqn:DDEtoDDF}+\eqref{eqn:ODEPDE} & \eqref{eqn:DDEtoPIE} & \cite{bellen_book}& \cite{peet_2020SICON} & \eqref{eqn:DDE}\\ \tiny Neut. & \eqref{eqn:NDStoDDF} & \eqref{eqn:NDStoDDF}+\eqref{eqn:ODEPDE} & \eqref{eqn:NDStoDDF}+\eqref{eqn:PIE_Mats} & \cite{bellen_book} & \cite{xu_2003} & \eqref{eqn:NDS}\\ \tiny DDF & X & \eqref{eqn:ODEPDE} & \eqref{eqn:PIE_Mats} & - & - & \eqref{eqn:DDF}\\ \tiny PDE & X & X & \cite{shivakumar_2019CDC} & \cite{wouwer_book} & \cite{krstic_2008} & \eqref{eqn:ODEPDE}\\ \tiny PIE & X & X & X & - & \cite{shivakumar_2020CDC} & \eqref{eqn:PIE}\\ \end{tabular}} \end{center}\vspace{-2mm} \end{table} \vspace{-4mm} \bibliographystyle{plain}
{ "redpajama_set_name": "RedPajamaArXiv" }
6,226
_This book is dedicated to_ **MY DAD, JACK MIXON,** * * * **AND TO THE OTHER EARLY PITMASTERS** _who helped pave the way for me_. **TABLE _of_ CONTENTS** _how to use this book_ : **THE RULES FOR BBQ RULES** _introduction:_ **THE TRUE STORY OF BARBECUE** **CHAPTER 1: RULES & HELPERS** * * * **RULES** _Rule 1: Use the Right Wood_ _Rule 2: Build Yourself a Pit_ _Rule 3: Learn to Fire the Pit_ _Rule 4: Maintain Consistent Heat in the Pit_ _Rule 5: Choose Your Fire Wisely_ **HELPERS** _Mustard-Based Barbecue Sauce_ _Vinegar-Based Barbecue Sauce_ _Pit Brine_ _Pit Mop_ **CHAPTER 2: THE HOG** * * * **WHOLE HOG** _Coal-Fired Pit-Smoked Whole Hog_ **HOG PARTS** _Smoked Shoulder_ _Smoked Spareribs_ _Smoked Pig Tails_ _Smoked Trotters_ _Smoked Whole Ham_ **HOG EXTRAS** _Smoked Snout Sandwich_ _Pork Cracklin's_ _Brunswick Stew_ _Hog-Skin Collards_ **CHAPTER 3: BIRDS** * * * **WHOLE BIRDS** _Smoked Butterflied Chicken_ _Smoked Whole Turkey_ _Smoked Butterflied Turkey_ **BIRD PARTS** _Smoked Half Chickens_ _Smoked Chicken Wings_ _Smoked Turkey Legs_ _Smoked Turkey Wings_ **BIRD EXTRAS** _The Pitmaster's Turkey Sandwich_ _Smoked Chicken or Turkey Pitmaster Stock_ _The Pitmaster's Smoked Chicken Salad Sandwiches_ **CHAPTER 4: THE COW** * * * **BEEF CUTS** _Smoked Beef Ribs_ _Smoked Beef Short Ribs_ _Smoked Beef Tenderloin_ _Smoked Brisket_ _Smoked Oxtails_ _Smoked Prime Rib_ **STEAKS** _Smoked Rib-Eye Steaks_ _The Jack Mixon T-bone Steak_ **BEEF EXTRAS** _Smoked Burgers_ _T-bone Bone, Tomato & Onion Soup_ _Smoked Beef Stock_ _Smoked Meatloaf_ **CHAPTER 5: EXTRAS** * * * **EXTRAS** _Smoked Salt_ _Smoked Hoop Cheese_ _Smoked Whole Trout_ _Old-Fashioned Pulled Candy_ _Smoked Cornbread_ _Smoked Blackberry Cobbler_ _Smoked Chocolate Skillet Cake_ _Handmade Pit Ash Lye Soap_ **TOOLS & MEAT PREP** **ACKNOWLEDGMENTS** **ABOUT THE AUTHORS** **INDEX OF SEARCHABLE TERMS** **The early American pitmasters were** * * * **SURVIVORS** * * * **trying to make their way in this new country. And with pit barbecue they hit a home run right off the bat. The best combination of flavors today is still vinegar, salt, and pepper.** A **HOW TO USE THIS BOOK** * * * **THE RULES FOR BBQ RULES** This is a book that will talk about fire and meat. It will give you trustworthy formulas for many simple preparations of foods like pork, chicken, and beef. But there are a few things you need to keep in mind before you get started. One: I believe smoking meat on a coal-fired masonry pit makes the best barbecue there is. Before we even get to the first point, you should understand that this book is not about cooking with a direct flame, otherwise known as cooking meat over an open flame, otherwise known as grilling. In this book I'm talking about **barbecuing**. Both methods involve cooking outside. But the similarities to what I'm doing and grilling end there. Grilling is cooking food fast over high heat: 350 to 400°F (175 to 205°C) and up. It's a technique made for meat that's relatively tender and quick to cook. There are lots of good books that can tell you how to do that, but not this one. I specialize in true barbecuing, otherwise known as **smoking** and **pit smoking** (both of which involve cooking meat over fire but rely on different types of heat, which I'll get to in a moment), which not only cooks the meat at 350°F (175°C) or lower, but also infuses and tenderizes the meat with smoke and other natural flavors that can only come from cooking food by means of either direct or indirect wood-fired heat. You should know that, unlike a traditional smoker, a pit offers direct heat: The heat source is placed directly beneath the meat with no barrier—except for the occasional grate—between the coals and the meat. (Traditional smoking, however, is cooking food slowly over a low temperature that is fueled by the heat emitting from smoking wood chips in a vessel we call a _smoker;_ there is technically no fire in the smoking process because you're burning embers. When you cook in a masonry pit, there's a fire to stoke.) This book is going to show you how to do that at a level that will make your neighbors weep with admiration and envy, like mine do. A **RULE 1:** **TRY MAKING YOUR OWN COALS** We are cooking everything in a masonry pit that is fired by coals we'll make ourselves out of fresh wood we source locally. That may sound intimidating, but it's a very easy and inexpensive process that I'm going to talk you through in its entirety. (I will also give alternative recipes for other smokers for many of the dishes.) Turn to _this page_ to see my precise pit setup instructions. (Or turn to _this page_ for tips and advice on modern smokers.) **RULE 2:** **WHEN THE PIT IS HOT, COOK A BUNCH OF THINGS** Once you commit to a session of coal-fired pit cooking, you should cook as many things as possible so you make the most out of your time and the heat in the pit. It takes about three hours to get a pit up and running to where you've burned your wood into coals and you've used those coals to heat the pit: Now it's ready for cooking. I say once you've spent three hours getting your pit going, why not take advantage by cooking a bunch of stuff in it? Are you making a brisket? Might as well smoke some cheese in there too, throw in a pork shoulder, or toss a belly on the coals so you can make barbecue sandwiches. The inside of a pit is larger than the inside of a commercial oven and probably larger than the inside of your backyard smoker or grill, even if you're working with professional equipment. **A NOTE ABOUT PIT CAPACITY:** ONE: You have room to cook multiple things at the same time, so do it if you can. TWO: You can scale the amounts in the recipes I'm giving you to suit your needs. Don't need a 20-pound (9-kg), 5-bone prime rib this time? Get a 10-pound (4.5-kg), 3-bone one instead, and adjust the cooking time. (Don't change the technique, just alter the cooking time. My rule of thumb is that you need to cook the meat to the same internal temperature I describe in the larger-sized recipes, just reduce the cooking time by 30 minutes per pound of meat.) **RULE 3:** **KEEP THE HEAT CONSISTENT** Don't open your pit or your smoker if you don't have to. Every time you open it, you lower the temperature inside by about 5°F (3°C) for every minute it's open. It'll take several additional minutes of cooking time to make up for that loss of heat. When you're cooking barbecue it's very important to maintain a consistent temperature inside the pit. **RULE 4:** **LET THE MEAT REST** When you take it out of the pit, let the meat rest, loosely covered in a pan or on a cutting board. It has to rest after you cook it so that the flavors you worked so hard to infuse concentrate, the texture solidifies, and the temperature regulates throughout the piece of meat. Never skip this step, no matter how much of a hurry you may be in to eat. A **INTRODUCTION** * * * **THE TRUE STORY OF BARBECUE** I am going to tell you the story of how I got into barbecue, which includes the story of the art form that we today call barbecue plus the story of how I learned to barbecue from my daddy, Jack Mixon. That's a lot of story, and it's going to involve an idea that I think gets overused a lot, one I don't often refer to and sure don't take lightly, and that is "old school." Some people in the world of barbecue claim the term "old school" just because they're cooking with wood ("stick burning," as we call it), but that's not the real deal. In barbecue, old school means cooking on a homemade coal-fried masonry pit, where the first step is burning down wood to make your own coals, then shoveling those coals beneath the meat, and finally cooking that meat over direct heat. That's old school. And that's the way I learned how to do it and how I'll teach you how to do right here, right now. A **MY FIRST TIME** The first experience I had cooking something like what you'd call "barbecue" was with my dad when I was a kid, and it wasn't at the pits where he barbecued for his take-out business. It was when he allowed me to cook a steak at home on our grill. Every Saturday night, we always had steak. And the steak of choice for Jack Mixon was a T-bone. I was probably eight or nine years old when he allowed me to cook the steaks for supper one night on our charcoal grill. Well, that was a big responsibility for me because my dad, Jack, he was pretty damn tough. He'd tear your ass up for spilling tea at the table. All I could think about was what he would do if I messed up his steaks. But I was also intrigued enough to want to do it anyway, mostly because it was something I never got to do before in my life—and, you know, at eight or nine years old, I hadn't been here long enough to do all that much, so this was exciting. And I wanted to show my dad that I could do it. But little did I know then that there was no way, short of turning the steaks into charcoal, that I could mess them up because Daddy liked his steaks well done—cooked until they were dead. Needless to say, they turned out great. And from that point forward, I was interested in anything to do with fire, and especially when that fire was cooking meat. _At the pit as a young'un with my dad, Jack Mixon._ A * * * * * * **About Hog Cooking** * * * I have been called "The Best Hog Cooker in the World," and, no lie, I think it's true. I have a unique history with the animal, having grown up around folks who were barbecuing it since before I can remember. And in barbecue competitions, pork has always been a winning category for me. This is in part because the natural sweetness in the meat pairs so nicely with the fruit woods I like to use in my fire. When I got started cooking barbecue, the top dog on the competition circuit was Pat Burke and his Tower Rock BBQ team from Murphysboro, Illinois, and I took inspiration from him. He wasn't playing around drinking and partying at contests—he was working hard instead. His team members were not hobbyists, no sir, and they were there to cook and make money. At the time I started cooking in competitions, I focused on my hogs because I'd seen my dad do them countless times in our own pits. I had to learn, though, how to take my knowledge and turn it into something that could impress those folks judging the contests. At Memphis in May events, whole hog is the key category. You are visited in person at your cooking site by a judge who sits down, chats, and judges your hog on taste, tenderness, and appearance. I decided on a hog-cooking philosophy early on, which is to focus on helping my meat taste like meat, and not like grape jelly or maple syrup or any of the other so-called secret ingredients my competitors claim. So even though I've learned a whole lot about presentation and making up the beautiful-looking Styrofoam boxes of pork we submit to judges at contests, I've held on to my simple philosophy of taste since the beginning, and it's been a winner: Great pork barbecue should have the flavor of the meat coming through first and foremost, it should be moist and tender, and it should have layers of flavor when you eat it. The first layer is the natural flavor of the meat. The second is the flavors it picks up from the salt, pepper, and vinegar you seasoned it with before you cooked it. The third layer comes from the smoke that enters the meat from the burning of the wood you've selected and turned into coals. Smoke and meat: That's what makes pork become that thing we know and love as "barbecue." * * * A **THE FIRST PITMASTERS** I was raised shoveling coals under masonry pits, and I thought that was the only way that barbecue was done—I thought that was the norm. Little did I know that what I was doing as a kid was an ancient form of cooking that came from the time the forefathers of this country first started barbecuing. (My first realization that coal-fired pit barbecue was an unusual throwback type of cooking didn't happen until I was in my teens, when I started noticing that everybody else but our family and a few others in town had the kind of metal cookers like the ones I use now at competitions.) We know man has been cooking meat over fire since the dawn of time, and we know that the people who first came to this country did the same. They were just trying to make food to feed themselves and their tribes back then, of course, and they sure weren't trying to become well-known pitmasters or some other title that we like to give ourselves in this day and age, like "grill master," "lord of the grill," or whatever. These gentlemen—and women, too—were trying to feed large families and to do it cheaply with what they had. And they took the livestock they had on the farms and the trees they had surrounding them as their fuel, the wood they needed to burn for their fires, and they used things they had on the farm for seasoning, such as what they'd put up in the root cellar. Back then you had pepper, you had salt, and you'd distill your own vinegars. That's why today we relate vinegar to Carolina barbecue and southern barbecue in general, because it has been a part of cooking meat down here since people first camped out on this land. The combination of meat and smoke with vinegar-based sauces has been around almost as long as this country has been inhabited. And let's not forget that vinegar has an added benefit: In those days it wasn't just a flavoring agent, but it was also used as a preservative and an important antibacterial agent. So it kept your meat tasting good, and it kept it safe to eat, too. Those early Americans were pretty smart, if you ask me. So we're talking about necessity here, not about somebody saying, "I want to be a great chef." I'd argue that without this type of barbecuing, we wouldn't have great chefs. That's because the evolution of cooking meat over fire led to our first pitmasters, otherwise known as the people who were in charge of the barbecuing, whether it was in encampments, backyards, little shacks along the side of the road, or, eventually, in proper restaurants. These are the folks who got their start cooking on the grounds at the local churches or for the politicians' fund-raisers. These people became very well known and respected in southern communities because they were the keepers of the fire. That was a pitmaster. Back then the title meant respect. These people learned their art; they learned how to select meats, they learned how to select the woods they wanted to use, and they learned how to manage fire. Today, "pitmaster" usually refers to people cooking on TV more than it corresponds to a respected community leader. Let's change that right here and now. Now, recall that barbecuing isn't just about cooking meat and turning out food. It's about family; it's about the people around you when you eat it and the shared experience of enjoying it. Barbecue is the most social food we have. What I mean by "social food" is that I never heard of anybody eating barbecue all alone; that's probably why "barbecue" means both a style of cooking and also the event where that style of food is eaten. When you say "barbecue" in the South you're likely referring to the type of events that support communities and raise money for a whole range of causes, like local church groups and Little League teams and stumping politicians. Some of us southerners have historically had barbecue at our wedding receptions. Now I see that our trend is spreading around the country, in part I think because barbecue just lends itself to gatherings and togetherness. The flavors of the rural South can be found proudly displayed at occasions all over the country, not just the ones in our own backyards down here. It tickles me that the people who used to want white tablecloths and finger sandwiches cut out like little moons and stars or whatever at their weddings are now calling up pitmasters like me, asking us to bring our own homegrown legacies to their celebrations. That's how social barbecue is: It's the thing to eat at parties everywhere. And what's more, pitmasters today are once again important parts of their communities. Hell, I became mayor of Unadilla, Georgia. _Three generations of pitmasters: my son Michael, me, my brother Tracy, and my mother's brother Charles Cross._ A **ABOUT THE RECIPES IN THIS BOOK** I am going to take you back to the way real barbecue is done here, like I've promised, and I'm going to do that through my recipes. There are no complicated recipes with exotic ingredients in this book. Because that's not what early barbecue was about, and even today it shouldn't be about anything "complex." Barbecue is a simple food, and it was always intended to be that way. It's about meat, it's about smoke, and it's about fire: That's all it's about. I think that sometimes, some celebrity chefs get out in the world and try to justify their salaries by making barbecue out to be something I don't think it was intended to be. It's supposed to be something you can do, and do very well, in your own backyard. I tell a lot of people who come to my cooking schools that there's nothing wrong with putting your stamp on your food, with trying to make a little tweak here or there to make the barbecue unique. But I caution you that whatever spice rub or sauce you concoct for yourself, try to keep some form of balance there, something that relates to this old-school way of doing things. I say keep to the lines of tradition. I'm not saying you've always got to make the strict traditional coal-fired masonry pit–style barbecue I was raised on—consisting mostly of salt, pepper, and vinegar for flavoring—but try to keep it simple as much as you can. What I'm saying is that just because you can do something with a piece of meat doesn't necessarily mean you need to do that. For example, I love chocolate cake and I love spareribs—but I don't necessarily want my spareribs to taste like chocolate cake. That's a little bit of me on a soapbox right there, but to me barbecue's not just about cooking food—it's my life, and it was a big part of my dad's life, and it's been a big part of my family's life for a long time. The thing is, if you ride through south Georgia right now, especially in the more rural areas, I guarantee that in 90 percent of the yards you drive past there will be some sort of smoker or grill or pit—and that's just the way it is, that's the way we were raised down here. You have a car, you have a tractor in the barn, and you have a smoker in your backyard. These are the accessories of living in the South. We take barbecue for granted a lot, and most of us were unaware that everybody else didn't enjoy what we do until recently. Now, though, barbecue has been put out there in front of the country, as a matter of fact in front of the whole world, and it's a cool thing. Right now if you look around, especially on TV, everything that's popular revolves around the South. I think everybody's intrigued by ideas about the southern lifestyle, and a big part of that southern lifestyle is barbecue. And barbecue itself is a lifestyle. I believe that firmly, because I learned it from my dad, who learned it from his dad, who learned it from his dad, right on down through the generations, stretching from the time the Mixon family first hit the shores of Jamestown, Virginia, in 1650. It's how we live; it's what we do. And now I'm going to teach it to you. _Celebrating a win early on with my son Michael and members of the Jack's Old South team._ A * * * * * * **How Much Have I Won?** * * * I entered my first barbecue competition, the Lock-&-Dam BBQ Contest in Augusta, Georgia, six months after my father died in January 1996. I did it mostly to see if I could sell some of the bottled sauce my parents were making and selling, get a little name recognition out there for our brand. I took first place in the whole-hog category, first place in the ribs category, and third place in the pork shoulder category. At my first contest ever. Once I got a taste of how successful I could be at competitive barbecue, I never looked back; I entered as many contests as I could, pushing myself to make better and more delicious barbecue all along the way. _Early days on the competition circuit._ Since then, I've won more barbecue competitions than anyone else in the world, and I have earned more than $1 million in prize money. Here's why you ought to pay attention to what I say: Since 1996, I've won more than two hundred grand championships, thirty state championships (including wins in Georgia, Florida, Alabama, Virginia, Arkansas, Mississippi, Kentucky, Illinois, South Carolina, and Tennessee), and eleven national championships, and I've earned more than eighteen hundred trophies. My team has taken three first-place whole-hog awards at the Jack Daniel's World Championship Invitational Barbecue competition, and we have been crowned grand champion at the World Championship in Memphis three times (in 2001, 2004, and 2007). We have also taken first place in the whole-hog category at the World Championship four times (in 2001, 2003, 2004, and 2007). Jack's Old South has been the Memphis in May Team of the Year with the highest number of points for eight years (from 1999 through 2004, and also in 2007 and 2009). We are also the only team to win grand championships in the Memphis in May, Kansas City BBQ Society, and Florida BBQ Association contests in the same year. I was inducted into the Barbecue Hall of Fame in Kansas City in 2013. * * * A **RULES** _Rule 1: Use the Right Wood_ _Rule 2: Build Yourself a Pit_ _Rule 3: Learn to Fire the Pit_ _Rule 4: Maintain Consistent Heat in the Pit_ _Rule 5: Choose Your Fire Wisely_ A **RULES** * * * **RULE 1:** **USE THE RIGHT WOOD** Now, when I say, "Use the right wood," I'm referring to the process of choosing the type of wood you'll use to smoke your meat, which is the very first step of cooking real pit-smoked barbecue. When I was a kid and it was time to get cooking, our first job was to pick the woods my dad liked, and the woods he liked were, number one, local, meaning whatever grew in south Georgia, and, number two, fresh and green. Jack liked to use a blend of hickory and oak for this style of cooking, and it had to be green, by which I mean young and flexible. Green wood is important because its moisture ensures that when you burn it down for coals, the coals are big and pop off easily. Think of it this way: With today's type of barbecue technology, we all use seasoned wood, because when it cooks down it makes ash (and we find the smoky flavor from that ash desirable); but when we cook old school, we want the wood to burn down and make coals that will have to burn again, and for that process to work well you need to start with green wood. More about that wood: The best advice I can give you is the advice my dad followed, which is to use what grows close around you. I get asked a lot about the best woods to use for barbecue, and I have my own distinct opinion on that based on my own experience: You already know I'm down in south Georgia, and that my dad taught me how to use a combination of young hickory and oak from the time I was a little boy. Then I got into competitive barbecue, and because I am in the Peach State, I have access to peach wood (I'll sell you some and ship it to you if want—www.jacksoldsouth.com). So I'm also a big fan of peach wood because as I've progressed as a pitmaster I've learned how to blend woods. I like peach wood blended in with what started me out, hickory and oak. When I'm asked about which wood to use, I ask people where they live and what they have around them. If you have access to young green fruit woods, take advantage of them: Apple wood, pear wood, apricot wood, grapevine wood, or cherry wood—any and all of these are great choices for barbecue and should be available locally in your groceries and wherever you can buy a smoker. That's what the first pitmasters did: If they lived in Missouri, they didn't have the option of having peach wood shipped to them like you and I do today. They had to cook with the wood that was around them. So my answer to which wood to use is this: There are a lot of different woods that are great for barbecuing, and what you should use just depends on where you're located and what kind of wood or trees you have in your area. **The very first step of cooking real pit-smoked barbecue is choosing** * * * **THE RIGHT WOOD.** * * * **And by that I mean local. Use wood that grows around where you live.** A **RULE 2:** **BUILD YOURSELF A PIT** _materials and tools_ **Large heavy-duty shovel** **Rake or hoe** **Hand tamp** **Pick** **Tape measure** **48 cinder blocks (standard 8 × 8 × 16-inch / 20.5 × 20.5 × 40.5-cm size)** **Level** **Four 2 × 4-inch (5 × 10-cm) stakes** **Masonry sand** **Carpenter's square** **1 sheet of 48 × 80-inch (122 × 203-cm) expanded metal** **2 sheets of 4 × 4-foot (1.2 × 1.2-m) 16-gauge steel** **1 sheet of 2 × 4-foot (0.6 × 1.2-m) 16-gauge steel** Building a backyard barbecue pit is about half a day's worth of good hard labor, but it's as straightforward as it gets and doesn't require a high degree of skill. All the materials are available at a good local hardware store or home improvement center, and you can get it all for less than $400. **PICK A SITE** You need a good spot for your pit and its 48 × 80-inch (122 × 203-cm) foundation. You need level ground that is a safe distance (about 20 feet / 6 m) from your house and from any wooden structures like decks or gazebos, and as clear of dry vegetation as possible. **LAY THE GROUNDWORK** Once you've settled on a spot, take your time to use the shovel, rake, and tamp as necessary to level the ground thoroughly. You will need to remove grass, weeds, and rocks from the site. Dig out any high spots in the area; picks and shovels work fine for this. 1) Take 14 cinder blocks and assemble them into a rectangle that is 48 × 80 inches (122 × 203 cm); it will be five cinder blocks long and two cinder blocks wide, as shown. 2) Place the 2 × 4-inch (5 × 10-cm) stakes at the four inside corners. Then remove the cinder blocks. Excavate topsoil from the interior area of the pit (the area inside the stakes) and refill it with the masonry sand—you'll end up removing 4 to 6 inches (10 to 15 cm) of topsoil and replacing it with 4 to 6 inches (10 to 15 cm) of masonry sand. * * * _* Note: That's the basic method, and it will have the grease from what's cooking in the pit run down laterally into the topsoil, which is fine. If you want to do something a little more elegant, you can dig down a full foot (30.5 cm) and grade the excavated area so that any cooking grease will run down to a central point that is 12 inches (30.5 cm) deep—then just refill the hole with sand or line it with fire brick. Again, this is not necessary, but it is a nice feature_. * * * 3) Reassemble the first 5 × 2 rectangle of cinder blocks. Decide which of the shorter, two-block ends you want to be the front of the pit and remove those two blocks. This gap will be used to load fresh coals in during the cook, so make sure it's facing an area that provides enough space for you to do something like maneuver a hog gurney and swing a shovel. **LAY THE PIT** 4) Stack a second layer of blocks atop the first. Make sure all the blocks line up with those under them and use the carpenter's square to ensure that the two blocks at each corner are flush, then lay the sheet of expanded metal on top of that. 5) Stack two more layers of cinder blocks on top of the expanded metal grill and place the two 4 × 4-foot (1.2 × 1.2-m) steel sheets over the top as a roof. There is going to be a bit of overlap between the sheets as well as over the edges of the pit—that's good. The overlap will allow you to grip the sheet and slide it off when checking on a cook and maintaining the cooking temp in the pit. 6) Finally, set the 2 × 4-foot (0.6 × 1.2-m) steel sheet against the open end of the pit, allowing for ventilation, and that's it. Now you just need some wood _(see Use the Right Wood,this page)_, meat or different kinds of food to cook on the pit's 10 square feet (0.9 sq m) of grill space, and all your friends. A **RULE 3:** **LEARN TO FIRE THE PIT** Making your own coals is very easy when you understand how the process works. All you need is a 55-gallon (208-L) barrel, green wood, and fire. I make coals by burning the green wood, which contains a significant amount of moisture, in a large heavy barrel until that wood pops off chunks of smoldering wood. Those chunks are going to be your coals. So first you load your barrel with green wood, then you let it burn, and finally the coals fall to the bottom of the barrel, where you have made a hole large enough that you can reach in with a shovel to remove them. Once you have burned your wood and created your own coals, all you need to do is transfer those coals into your pit. Use a long-handled heavy-duty shovel for that job, wear heavy work gloves, and take care not to burn yourself on your way from the barrel to the pit. * * * **LIGHTERED KNOTS** My daddy loved doing everything the old-fashioned way, including lighting fires. This means he lit them using a technique known as "lightered knots." If you want to feel especially authentic, like you may have been the one who invented fire, give it a try: 1) You need dead wood for this, which has the richest resin and is driest to burn. Walk around a forest (or your backyard) and look for old pine trees, which are an excellent source. Once you've found one, saw off a dead branch. This is some high-quality firewood. 2) Using a small sharp knife, carve the branch down to expose the "fatwood," which looks like small, unblemished dry matchsticks. 3) Whittle those shavings of fatwood until they resemble fine chips. 4) Light your chips and aerate properly in order to create a healthy flame. That's old-school fire. * * * A **RULE 4:** **MAINTAIN CONSISTENT HEAT IN THE PIT** The single most important job for any pitmaster is to manage the fire in the pit and keep the pit temperature consistent. My dad did this with two pieces of old tin he used to cover the pits that could slide back and forth. They were loose enough to allow some smoke to seep out and some oxygen to get in and allow the embers to glow, but tight enough to keep heat inside the chamber. We maintained the temperature by running our hands over the bare tin: If we could hold our hands on that tin for a count of 10 without snatching our hand back, we knew it was time to re-fire the pit. As you can see, we didn't use a thermometer or other kind of gauge to measure the temperature. In my adulthood I've experimented with the 10-count method: I used a temperature gauge in my pit just to find out what cooking temp my dad was likely maintaining. And that gauge was telling me something around 250 to 275°F (120 to 135°C). So you can use the Jack Mixon 10-Count System, or you can use a temperature gauge; that part is up to you. What's important is mastering the art of keeping the temperature consistent in that pit. Don't get impatient and over-fire and try to get the cooking done quicker, because then you will have a flame-up and when that grease hits too much flame or too many coals down there, it will burn up anything cooking in the pit. About every 30 minutes, run your hand over that tin and see what's what. (If you would like to use a thermometer, check it every 30 minutes and aim for a steady and consistent 250 to 275°F / 120 to 135°C.) If you can rest your hand on the top for 10 seconds (or more), you know it's time to shovel in some new coals. First collect the coals in a canister or in the body of a kettle grill. Light them and get them good and hot. Then use your shovel to transfer the hot coals—always transfer hot coals that are lit into the smoker. Make sure you shovel them right onto the hot coals burning in there so they can keep your temp consistent where it's already been firing. Final rule for maintaining your heat: Try hard not to let any heat out of the pit if you don't have to; don't open your pit any more than necessary. Remember: Every time you open the pit, you lower the temperature inside by about 5°F (3°C) for every minute it's open, and it takes several additional minutes of cooking time to make up for that loss of heat. A big part of maintaining a consistent temperature is keeping that pit closed as much as you can. A **RULE 5:** **CHOOSE YOUR FIRE WISELY** The first decision you have to make is what kind of cooker you're going to use. Listen, you don't need competition-level equipment to make excellent barbecue. There is an incredible range of smokers on the market, from the cheap charcoal "bullet" jobs to our top-of-the-line Myron Mixon Smokers (myronmixonsmokers.com), and the best one for you is the one that you feel most comfortable using. * * * _* Note: I believe there is no substitute for making your own coals and smoking meat in a masonry pit—that's why I'm writing this book, because the particular flavor that process imbues is unique, and once you've tried it you'll be hooked. However, it's not the only way to make good barbecue. Smoking meat in a smoker using a water pan is what I do in competitive barbecue and it results in a more polished product, if you will—it's not as "rough and ready" as pit-smoked barbecue. It is also delicious and prized, but it tastes a little different than the barbecue I grew up eating. And some folks, god bless them, actually prefer it. And as much as I want you to build a pit and learn to cook like my daddy taught me, you can use any one of these options and get a good result if you follow my techniques_. * * * Most American households own a grill, and the most popular type of these is a **gas grill**. If you have a gas grill, you can adapt it to smoke food. Here's what you do: Take your favorite wood chips, put them in a bowl, and cover them with water; let the wood soak overnight. When you're ready to cook, drain the chips, wrap them in one or two packets of foil, and use a fork to poke several holes in the top of each packet; set aside. Close the grill's side vents—don't worry about doing an airtight job of it, just do the best you can to shut them. Now, these days most gas grills have either two or three burners that can be controlled individually. On a two-burner gas grill, light only one side; on a three-burner grill, light the two outside burners and leave the burner in the middle unlit. Place the packet of wood chips on the lit section (or sections); then place the meat on the unlit section and cook it—the flame will smolder the wet wood chips, producing the smoky flavor in your meat. You can use any recipe in this book designed for a pit in any of these smokers, no problem: They will work perfectly with my recipes. I have always said that there is no need for high-end equipment to make good barbecue. I tell folks who come to my barbecue cooking schools that it's easier to learn on simple equipment and then move on to more advanced types of cookers, so save your money. A * * * * * * **HOW A SMOKER WORKS** * * * The smoker contains racks that the meat cooks on. The smoker's base is filled with wood to provide heat and smoke. (Sometimes charcoal is used to start the wood burning, or charcoal is used for fuel and wood or wood chips are added on top in a pan—like the one shown above—for smoke.) The smoke flows around the meat on the racks, flavoring it, while the indirect heat cooks it. It's a simple thing. * * * A * * * * * * **CHARCOAL GRILL** * * * **Charcoal grills** are also known as kettle grills. To smoke food instead of grill on this type of equipment, simply soak your wood chips in water overnight, drain them, and set them aside. Start your fire and get your coals nice and hot. Now rearrange your coals so that you bank all of your charcoal on one side of the kettle, leaving a cold area where you'll place your meat. Arrange a grilling rack over your coals. Lay your food on the grill. Finally, place the soaked and drained wood chips directly onto your coals—they'll smoke right up. Place the lid on the kettle, experiment with how much or how little to open the vent on the top of the kettle to regulate the heat, and close the lid and cook. * * * A * * * * * * **BULLET SMOKER** * * * A **bullet smoker** is an electric or charcoal smoker and is what most grilling enthusiasts progress to when they first start smoking. It is called a "bullet" or sometimes a "torpedo" as a nickname for its long, cylindrical shape. The electric bullet smokers have a coil ring of heat in the bottom. The charcoal kind come with a charcoal chamber with a perforated ring that rests on top of the grill grate—all you have to do is pour a chimney starter of hot coals into the ring. Most bullet smokers come with a water bowl, which rests inside the center station. Fill it with liquid—water, apple juice, or beer are all good choices. This liquid keeps a nice steam circulating inside the bullet to keep the meat from drying out _(see my water pan,this page)_. Then close the door of your smoker and place the smaller, lower cooking grate inside the center station. Next, rest the larger upper cooking grate at the top of the center station. If you're using the charcoal kind, check the temperature every 20 minutes or so and shovel in hot coals as necessary to maintain your heat. If you're using the electric kind, you don't have to worry about shoveling coals. In both cases, you control the heat by opening and closing the vents as necessary. * * * A * * * * * * **OFFSET SMOKER** * * * An **offset smoker** (also called an offset barrel smoker, a horizontal smoker, a pipe smoker, and a stick burner) is a charcoal smoker that has two parts: a cylindrical cooking chamber that is set horizontally (and not vertically, like the bullet) attached to a firebox, which has top or side door access and an adjustable vent. This design fairly well mimics a pit: The fire is built in one chamber and the smoke and heat surround the food in the other. This way the heat is next to (not under) the meat. A good offset smoker does not leak smoke through the doors; the connection between the firebox and the smoking chamber should be as airtight as possible (although no smoker is ever completely airtight)—many of them have doors and seams that seal tightly and so it's best to use those if you can. Some have a convection plate, a perforated metal plate you can slide back and forth under the food in the smoke chamber to regulate airflow. Some have internal piping, baffles, and a chimney, which is also designed to regulate temperature evenly. To get to work, soak your wood chips overnight in water, then drain them when you're ready to cook. Begin with your vents open. Start your charcoal in a chimney starter and then spread them out over the rack at the bottom of the firebox. Use the vents to regulate the temperature. Put your meat on the grate in the cook chamber. Put your soaked wood on top of your burning coals in the firebox. Check your temp every 20 minutes, add new hot coals as necessary, and slide your vents to manage the heat. * * * A * * * * * * **CERAMIC SMOKER** * * * A **ceramic smoker** was designed in homage to the Japanese cooking urns that have been popular in Asia for centuries. Some are made of terra cotta, cement, and even lava rock, which are thought to be excellent insulating materials and especially good at radiating and retaining heat; I've seen some do-it-yourself types build them out of larger flower pots. You can use ceramic cookers as pizza ovens and tandoori ovens for Indian food. You use these, essentially, by filling the bottom of the chamber up with charcoal—up to the bottom air holes. Start your fire with the bottom vent open, then sprinkle your presoaked wood chips on top of the fire. These smokers usually come with a perforated plate that rests over the charcoal chamber. A grate fits over the top of that. Close the smoker and open the vent in the lid. Let the smoker come to temperature. Then regulate your heat as you would with any other cooker, opening and closing the bottom and top vents as necessary. * * * A * * * * * * **H2O COOKER** * * * The **H2O Cooker** is what I use when I'm competing or whenever I'm smoking in a modern way. I have designed a whole line of these smokers—Myron Mixon Smokers (myronmixonsmokers.com)—that use a trademarked Waterpan Technology™, which uses an indirect water cooking system to evenly disperse heat throughout the cooking chamber and lock in the meat's natural juices while giving you the option of cooking hot and fast or low and slow. While I think our smokers are the best on the market, these types of water cookers are very popular with many of the best barbecue competition teams. Like most smokers, they are fueled by wood that provides smoke and heat. What makes these cookers special, though, is the water, which provides moisture, and the thick, insulated walls that keep the heat in. Whether you're cooking hot and fast or slow and low, these smokers allow you to cook three different ways at the same time: by smoking, roasting, and steaming. That allows you a lot of flexibility in how you can cook and the results can be amazing. * * * A * * * _Now that we've got that out of the way_ **LET'S TALK THE TWO-STEP PROCESS** _to get started with a smoker_ * * * **STEP 1: SET UP THE WATER PAN** I like to use a water pan in the bottom of my smoker, and I get an awful lot of questions about it. Like everything else I do, it has a purpose, but this is a step you can certainly skip; using a water pan in your smoker is not a barbecue requirement, but I think it's a worthwhile enhancement. I happen to like it because it creates a water bath system inside the smoker that helps maintain the meat's moisture; it's like a maintenance device that helps keep some moisture in the smoker and helps tenderize the meat while you're barbecuing. Here's the method: Fill a heavy-bottomed medium pan (no bigger than a 13 × 9-inch / 33 × 23-cm lasagna pan) about halfway with water and clear an area in the middle of the bottom of your smoker to accommodate it, leaving coals stacked up on either side of the water pan. **STEP 2: LIGHT THE FIRE** When I start my smoker, I have never apologized for the fact that I like using lighter fluid to get a good blaze going and burn my wood—and let me be clear, I am what's known on the competition circuit as a "stick burner," which means I believe that nothing flavors meat better than whole sticks of wood and the natural smoke flavor that comes from burning them. I hate it when people say that if you use lighter fluid, the meat you cook will end up tasting like lighter fluid—that's only true if you don't know how to use it. After you apply the fluid, just let the coals burn for a while so the fluid burns off before you put your meat on to smoke. A **HELPERS** _Mustard-Based Barbecue Sauce_ _Vinegar-Based Barbecue Sauce_ _Pit Brine_ _Pit Mop_ A **HELPERS** * * * The earliest pitmasters used what they had on hand. I'm talking about the first settlers to the American colonies who came from England. They had large families to feed and they had to feed them cheaply, so they fed them the livestock they had on the farms, they used the woods around them and the trees as their fuel, and they barbecued. And the way they barbecued is what we're going to do now—basically what we term "pit-fired" barbecue using masonry pits that are coal-fired—where we burn down the woods, make our coals, and shovel those coals into the pits to maintain a consistent temperature. And that's what they did. So when I say "meat helpers," I'm talking about the ingredients they used as far as flavoring. When we talk about sauces, brines, and mops, they all have distilled vinegar as a base. Vinegar was used not only because it was a flavor additive but also because it was a preservative. The vinegar preserved the meat, kept it from going rank or bad. Our forefathers also had pepper flakes and salt, ingredients they kept down in the root cellar, and that's what they used for seasoning. It was very, very simple. These early pitmasters were survivors, trying to make their way in this new country. And with pit barbecue they hit a home run right off the bat. The technique called barbecuing traveled all the way down the East Coast and into the South; this was before there was a Kansas City, before there was a Memphis, before people started migrating toward the Midwest, and before they came up with ketchup-based this and that and whatever. This is the way the majority of people on the East Coast barbecued, from Virginia all the way down through Georgia and Alabama. The flavors are vinegar based, the enhancers are red pepper flakes and salt, and that's what you had. And guess what? It's still a great combination of flavors today. I'm going to give you two sauces: One is a vinegar-based sauce, a basic barbecue sauce you can use on anything, anytime, anywhere. The second one is a mustard-based sauce, which I'm including here because it's the sauce that my dad, Jack Mixon, made for my mom, who loved the pop and tang of mustard sauce. These two sauces are perfection, and I can't build on something better than that. A **BARBECUE** **SAUCES** _Makes:_ **1 QUART (0.9 L)** **MUSTARD-BASED** **Mustard-based barbecue sauce is traditional in parts of South Carolina and Georgia—some historians think that's because these areas had a high population of German immigrants who came over in the 1700s and brought their tastes along with them. My mother happened to like mustard-based barbecue, and this is the sauce my daddy would whip up for her.** _ingredients_ **2 cups (480 ml) distilled white vinegar** **2 teaspoons freshly ground black pepper** **2 teaspoons granulated sugar** **2 teaspoons red pepper flakes** **2 teaspoons ground chili powder** **1 cup (220 g) packed dark brown sugar** **2½ cups (600 ml) prepared yellow mustard** **¼ cup (60 ml) ketchup** In a large heavy saucepan or stockpot, combine all the ingredients and whisk well. Cook over low heat for 20 minutes, whisking occasionally to combine. Do not bring the mixture to a boil. Let the sauce cool completely, about 30 minutes, then funnel it into a refrigerator-safe container. The sauce will keep, refrigerated, for up to a year. A **VINEGAR-BASED** **This is the basic formula for a classic vinegar sauce. You can add any or all of the following optional ingredients based on your personal flavor preferences: 1 cup (240 ml) ketchup, ½ cup (120 ml) hot sauce, ½ cup (100 g) sugar. Follow the exact same procedure, adding the extra ingredients to the pot with the others.** _ingredients_ **1 quart (0.9 L) distilled white vinegar** **1 tablespoon kosher salt** **1 tablespoon coarsely ground black pepper** **1 tablespoon red pepper flakes** In a large heavy saucepan or stockpot over medium heat, combine all the ingredients. Stir to dissolve the salt completely, cooking for 3 to 5 minutes of constant stirring. Do not bring the mixture to a boil. When the salt is thoroughly dissolved and the spices have infused the vinegar with their flavor, remove from the heat and let the sauce cool completely, about 30 minutes. Funnel the sauce into a refrigerator-safe container. The sauce will keep, refrigerated, for up to a year. A **PIT BRINE** **A brine is a solution of salt and water; if you soak meat in it before cooking, it will tenderize it and keep it from drying out, which is a danger in all cooking but especially in pit smoking. Brining enhances juiciness and moisture when you submerge your meat in the solution. Here I'm giving you a recipe for basic brine that I like for pit smoking in particular, where fancy flavors are not necessary. If you want to add some other ingredients in here, like your favorite herbs and spices, that's fine. I don't, because for me cooking old-school barbecue isn't about trying to make something that you'd find in a damn French restaurant; we're talking about the basics here, how barbecue got started.** _Makes:_ **ABOUT 3 GALLONS (11.4 L)** _ingredients_ **3 cups (720 g) kosher salt** **3 cups (600 g) sugar** In a large heavy stockpot over high heat, bring 3 gallons (11.4 L) water to a boil. Add the salt and sugar and stir until dissolved. Remove from the heat and let cool completely, about 30 minutes. If reserving for later use, funnel the brine into a container and store in the refrigerator for up to 2 weeks. A **PIT MOP** **A mop is basically a meat-moistening agent, and its main ingredient is vinegar _(seethis page for the importance of vinegar to barbecue)_. The mop also infuses a little flavor into the meat, an added layer of complexity. We mop almost all of our long-cooking meats at varying intervals dependent on how many pounds of meat we're dealing with—each recipe has a specific time at which to open the pit and mop, ranging from every fifteen minutes to every thirty after a meat's "crust" forms. Mopping is especially important in smoking because we're cooking with actual coals beneath the meat, and we don't want our meat to dry out.** _Makes:_ **ABOUT 1 ½ GALLONS (5.7 L)** _ingredients_ **1 gallon (3.8 L) distilled white vinegar** **½ cup (40 g) red pepper flakes** **½ cup (120 g) kosher salt** **4 lemons, cut in half** Combine all the ingredients and ½ gallon (1.9 L) water in a large pot over medium-high heat. Bring to a boil, and let the pepper flakes open up and the pepper seeds open out into the liquid and give it some seasoning. Let the mop cool completely, about 1 hour, then funnel it into a container large enough to hold it. It will keep for year in the refrigerator, and you can portion it out as you cook your barbecue along the way. To apply the mop to the meat, take a brand-new floor mop, cutting the handle to a size short enough so that it's easy for you to manipulate, and periodically use it to dab the liquid mop onto the meat to give it moisture and also to give it flavor, that southern-style barbecue flavor that a mop's all about. A **WHOLE HOG** _Coal-Fired Pit-Smoked Whole Hog_ A **WHOLE HOG** * * * The very best reason to consider making a whole hog is because you're fixin' to have a whole lot of people over to your house for someone's birthday or an engagement celebration, or because it's the Fourth of July (come to think of it, that's a birthday party, too). Remember when I said that barbecue is a social food? (You can refresh yourself by turning to _this page_.) Well, the whole hog is the most social entrée in the field. To me a pig pickin' is the ultimate expression of our southern culture, because it's the time when friends and family get together over a meal to share stories and make memories; it's the ritual that bonds us. I'm not going to pretend with you: Making a whole hog is something that requires effort and patience. But when you master it, it's like mastering any other craft because you acquire knowledge that will last you a lifetime. In this case, you also have something uniquely delicious to show for it. Come on, I'll teach you. A **COAL-FIRED** **PIT-SMOKED** **WHOLE HOG** **Whole hog was something my dad didn't cook a lot of: He liked to do the joint meats more, the hams and shoulders. He did whole hogs as special requests, or when he wanted to show off a little bit at Christmastime when he had all the family over.** **Remember when I told you about how my dad never used meat thermometers or gauges? (If you don't, turn to _this page_ and see how he ran his hand on top of a sheet of tin and counted to ten.) Learning to test meat for doneness without the aid of a thermometer is one of the marks of an experienced pitmaster. The ability to do it comes with time and repetition. You will make some mistakes along the way, but mastering the ability to test meat for doneness will give you a whole lot of confidence in your barbecue and certainly take away a lot of stress. When it comes to the whole hog, it's like your personal learning laboratory: Pay attention to the way different meats on the hog look when it's ready to be pulled from the pit.** _Cooking time:_ **10 TO 12 HOURS** _Makes:_ **20 TO 25 SERVINGS** _for the hog_ **1 (160-pound / 73-kg) hog, head-on, butterflied** **1 quart (0.9 L) distilled white vinegar** **1 gallon (3.8 L) Pit Mop** _(this page)_ **Vinegar-Based Barbecue Sauce** _(this page)_ **1 cup (240 g) kosher salt** **1 cup (96 g) coarsely ground black pepper** _for the gurney_ **2 to 3 × 6-foot (0.6 to 0.9 × 1.8-m) welded wire fence, cut to fit the hog (hardware or home improvement stores can cut it for you)** **3 feet (0.9 m) aluminum wire, for fastening** **2 black pipes a bit longer than the length of the hog (4 to 5 feet / 1.2 to 1.5 m), ½ inch (12 mm) thick** **Fencing pliers** **1: SELECT AND PREP THE MEAT** Daddy always liked to get an old breed of pork, a Duroc or a Hampshire or a cross between the two. These are old breeds that came over in the early 1800s; they're very red-meated hogs with a high marbling content and a lot of flavor, and you know that's where it's at. (Other good old American domestic hog breeds to consider are the Berkshire and the Yorkshire. These old breeds are making a comeback; you can ask any butcher to order one of these for you.) The hogs he selected would have marbling all the way down through the meat: I ain't just talking about the fat on the outside of the hog when he split it open, I'm talking about the marbling running down the side of it. He liked to use about a 160-pound (73-kg) hog with the head on; he'd have him what we call "butterflied," where the belly is actually split right down the middle and the hog will lay flat out. Again, a butcher can do this for you. There's not a lot of prep involved. The first thing you need to do is make a prep area: On a long table covered with clean butcher paper or another sanitary covering, lay your hog on its back with its cavity facing up. Next Jack would go in and remove the feet, and then he'd remove all the fat that was inside the cavity that he could pull away with his hands; you will ask your butcher to do those things for you. Then he'd take white vinegar and rub it all over the inside of the cavity, touching any exposed meat. There are a couple of reasons for this: One is because of that flavor profile we all associate with barbecue in the South—vinegar, vinegar sauce, and vinegar-based pork. Another is that vinegar is a disinfectant that kills bacteria that might have gotten on the hog. Finally, the vinegar wet the inside of the hog where he was going to apply his seasoning, which was just salt and pepper. He also applied it all over the outside, on the skin, too. **2: MAKE THE GURNEY** Most of the cookers you see these days don't require you to flip the hog; it cooks in the exact position in which you lay it in there. In pit-fired barbecue, the meat has to be flipped. The way you do that with a 160-pound (73-kg) piece of meat is to prepare it first, and then mount it on a gurney. So what my dad did, like everybody before him, was make a "hog gurney" in order to handle the whole animal. Here's how to make one: 1. Lay one piece of fencing out on a table, then pick the hog up and put him belly down onto the wire. Then lay the second piece of fencing directly on top of the hog in line with the sheet beneath the hog. 2. Wire the fencing together all the way down both sides of the hog. 3. Take the two pieces of black pipe and run them down each side of the fencing. Wire the pipe to the fencing to secure each side, and now you've got a gurney. **3: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ My dad liked to use a ratio of 3:1 in his combination of woods in his burn barrel to make coals. He'd use three sticks of green red oak (green being fresh cut) to one stick of green hickory. He'd start his fire early in the morning because it takes 3 to 4 hours to get a good bucket of coals burning out of your burn barrel. Once that happened, he'd fire the pits and start getting the pits up to temp, to about 250°F (120°C). (If you put the meat on first and then start firing your pits, you're prolonging the cooking process.) So we'd get the pits fired, get the masonry up to temp, and then we'd put the hog, belly down in the gurney, on top of the grate in the pit. It takes about 2 hours until you can actually hear the dripping fat hitting the coals beneath it and then steaming and sizzling back up. That is a big part of the flavor profile of this style of cooking: Where that meat and fat is rendering, it drops that fat down onto those coals below, hits the coals, and steams back up and hits the meat, providing a unique flavor profile that you can't get any other way. **4: KEEP THE TEMP CONSISTENT, FLIP THE HOG, MOP THE HOG** This hog is cooking, rendering and dripping fat, and I wish we had dang scratch 'n' sniff for this book because that smell is unique to this style of cooking and like no other you'll ever smell. We'll cook this hog for about 6 hours, all the while shoveling the coals, keeping that temperature around 250 to 275°F (120 to 135°C), and you just hear the meat sizzling in there and hear the dripping, hear the coals steaming and giving the meat that flavor that we're looking for. And at that 6-hour mark, we'll go in with our leather gloves, and with two people, we'll grab the ends of the gurney by the pipe, then flip it on its back. We've got a crust built up inside that cavity now, and we'll need to take our mop _(seethis page)_ and apply it all over the hog, and in that cavity. Now we put the lid back down and keep the pits fired. About every hour and a half, Daddy would pull the tin back and we'd go in and do another mop, all down inside the cavity and over the skin. **5: TEST FOR DONENESS** Now we're getting to our 10-hour mark, and we start checking this meat for doneness. My daddy did this by grabbing the leg bones and the rib bones to see how they're pulling and if they're getting tender. You want it almost falling-off-the-bone tender. If the bones are slipping easily, we're going to pull the hog out; if not, we're going on that 12-hour mark. And again we're mopping about every hour, hour and a half, with our vinegar mop. **6: PULL AND EAT THAT MEAT** Once it's done, put on your heavy-duty work gloves and then pull the tin back, pull the hog out, and put it on the picnic table. Cut the fencing loose. If we were having a pig picking, Daddy would pull the spine out, pull the rib bones back, and then apply some vinegar-based sauce _(seethis page)_, and that's what you should do, too: Get your heavy-duty gloves on and pull the spine out of your way (discard it), pull the ribs aside, and you'll have a whole bunch of meat facing you. The meat now has got a flavor that's out of this world, and you've also got the skin. The skin now's like cracklings, with crispy edges that you can just break off and enjoy as a tasty on-the-spot treat right there. A * * * * * * **IN THE SMOKER** * * * **_I sincerely want everyone to try the unique coal-fired pitmaster whole hog, and I've made it as simple for you as I can (remember, for less than $400 you can build the whole pit yourself; seethis page). However, for a down-and-dirty easy method of smoking a whole hog in an industrial-size smoker, here you go:_** _**PREP:**_ **Start with your 160-pound (73-kg) hog, butterflied and with extra fat removed for you by your butcher. On a long table covered with clean butcher paper or another sanitary covering, lay long strips of aluminum foil. Lay the hog flat on its back on top of the foil. Apply white vinegar, salt, and pepper all over the hog, inside and out, including on the skin, taking care to sprinkle the salt and pepper throughout the cavity and on the surface of any exposed meat. Gather up the foil you've laid the hog on and use it to loosely wrap the entire hog. Let the hog rest this way for 1 hour while you light the smoker and bring it to 250°F (120°C).** _**COOK:**_ **Place the hog in the smoker. Close the smoker and let the hog smoke for about 20 hours, or until the internal temperature of the meatiest part of the shoulder is 205°F (95°C). (I often set my hog on the smoker at noon the day before I want to eat it; then I remove it at 8:00 the next morning and let it rest until I'm ready to eat.) Unwrap the foil and, using a brush, apply a mop you've made of white vinegar, salt, and red pepper flakes throughout the inside of the cavity. Rewrap the hog loosely in the foil. Leaving the hog in the smoker, let the temperature fall from 250°F(120°C) until the hog is cool enough to handle and remove from the pit (no more wood is needed at this point). This resting allows the hog meat to redistribute its juice and for the meat to cool down just enough so that folks can start pulling the meat without getting hurt.** _**EAT:**_ **Unless you need to present the whole hog, the hog is left in the smoker while it is picked and pulled and, best of all, eaten. In true southern tradition, the hog is never "carved," per se. Wearing clean heavy-duty gloves and using either large tongs or your hands, gently pull the meat out of the hog in chunks and pile it onto large trays or straight onto plates.** * * * A * * * **GROWING UP _with_ JACK MIXON** MY DADDY ALWAYS made my brother, Tracy, and me help him cook the food for his barbecue restaurant. It was a carryout place, open only on Thursdays, Fridays, and Saturdays—but only on Saturdays if there was any meat leftover and he hadn't sold out before then. Most of the time he wore what he always wore: a white T-shirt with the sleeves cut off, a pair of damn suspenders holding up his jeans, his work boots, and his truck driver's cap. He'd sit on this five-gallon bucket he'd turn upside down and watch us shovel coals into the pits, and then every so often he'd get up and run his hand over the sheet of tin that covered the pits and check to make sure the temperature inside them was consistent. At that time in my life, I hated barbecue; it was hard work, especially turning on one of them gas grills. All I could think about in my younger days was when I was going to be able to get away from there, to get the hell out of that mess. Little did I know it, but I was learning the whole time. You can probably call my dad's style "tough love." Under that kind of love you either get stronger or you break down. I know that if he hadn't made us learn how to become pitmasters, we wouldn't have. Tracy didn't pick it up like I did; he was younger than me. But he also had it easier: As the oldest, I came under the harshest guns of Jack. My brother got to go hang out with his friends but I didn't, and if I bitched about it I got in serious trouble. When my mom passed away and Tracy started working with me, he learned the things he never learned from Daddy. I don't think that Jack didn't want Tracy to learn it, I just think I brought out the worst in my dad somehow; the bright side of that is the fact that I learned the most from him—probably because I was most like him. Sometimes to this day Tracy misspeaks and calls me "Daddy" when I'm arguing with him about something. In truth my dad had to have us work for him because we had to have the money; most of the time he couldn't afford to hire labor. I remember one time we had a sawmill where we custom-cut cypress that we logged out of the swamps, and we hired a cutting crew to turn it into lumber. We had a yard full of timber and we custom-cut it for decking and stuff like that. We probably had twelve people working with us; I was about fourteen or fifteen years old. We were squaring up the logs and it was so messy; cypress wood is full of water, and the sawdust comes off wet and sticks to you like cornmeal. One day I told Tracy, "We're working as hard as people in the world, and we ought to get paid." That night while we were sitting at the dinner table, I was full of vinegar and feeling like I was grown-up and ought to be treated like it. I told my dad I thought we ought to get paid. He said, "You sit your ass at my table, you eat the food I buy, you wear the clothes I buy you, you sleep under my roof, you are getting paid." And that was the end of that. My momma was sensitive, and Tracy is too. My momma always said they ought to saint her for putting up with Jack. With him, everything was either black or white; there wasn't any gray. Now, my daddy didn't sweat the big stuff, but something small would happen and he could get off on a tangent. You could do something that was a major screwup and he'd be OK, but if you did something little he could lose it. Nevertheless, he was beloved. In a county of 10,000 people, about 1,000 came to my daddy's funeral, including the local law enforcement who loved him. He knew a lot of people; he was a great athlete when he was in high school, and to the end he was charismatic. Daddy would not believe the damn notoriety of where barbecue is today. For southerners like him, barbecue was just a way of life. Right now in this country southern stuff is cool and everyone wants to get something that's supposedly southern, which he wouldn't understand, either. My dad thought "southern" meant working your ass off and getting by, and there was nothing wrong with that. * * * A **HOG PARTS** _Smoked Shoulder_ _Smoked Spareribs_ _Smoked Pig Tails_ _Smoked Trotters_ _Smoked Whole Ham_ A **HOG PARTS** * * * I'm not going to lie to you: The whole hog is easily the Mount Everest of barbecue, the biggest, baddest challenge out there. It is much easier to cook the hog parts separately simply because they're easier to handle when they're smaller, from managing their prep to monitoring their cook times. You might consider these "hog parts" the building blocks of whole-hog cooking, or simply the meats to cook for smaller occasions. I've thrown a couple of personal favorite recipes in here for cooking some cuts of hog that are a bit more unusual, too, such as the snout and the tail. Big-city chefs and restaurateurs may brag about "nose-to-tail" cooking, but southern barbecue guys like me have been cooking all the parts of the hog worth eating since before there was a borough called Brooklyn (by about fifty years, in fact). **There is** * * * **NO SUBSTITUTE** * * * **for the barbecue you get when you smoke meat over homemade coals in a masonry pit.** A **SMOKED** **SHOULDER** **It will generally take about the same amount of time to cook a couple of pork shoulders as it will to cook the whole hog. The reason for that: When you look at a whole hog to determine the length of time you'll have to cook it, you're looking at the size of the biggest pieces of meat on there, and that will be the shoulders (and hams). Because these shoulders are about the same size as the ones on the whole hog, you're looking at 10 to 12 hours here.** _Cooking time:_ **10 TO 12 HOURS** _Makes:_ **25 TO 30 SERVINGS** _ingredients_ **1 (15- to 20-pound / 6.8- to 9-kg) bone-in whole pork shoulder** **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_ **Vinegar-Based Barbecue Sauce** _(this page)_ **1: SELECT AND PREP THE MEAT** Daddy didn't trim pork shoulders too much—maybe he cut away extra skin or large hunks of sinew hanging off a shoulder, but mostly he kept it simple and liked to take the shoulders straight from the butcher, lay them out on a table covered with butcher paper or another sanitary covering, and rub them down all over the inside and the outside with white vinegar. Then we'd season them all over with salt and pepper. **2: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ Place the shoulder in the pit, meat side down, and then start shoveling your coals underneath. **3: KEEP THE TEMP CONSISTENT, FLIP AND MOP THE SHOULDER** We'll cook the shoulders for about 6 hours, all the while shoveling the coals, keeping that temperature around the 250°F (120°C) mark so you hear the meat sizzling and its juices dripping and hear the coals steaming and giving the meat that flavor we're looking for. After 6 hours we'll go in with our leather gloves and flip the shoulders. My dad would take his Case pocketknife and he'd poke about three holes in the skin on the shoulder and then we'd flip it skin side down. So use a sharp knife to poke three holes in the skin, flip the shoulder, place it back on the grate over the coals skin side down, and let the liquid run out and hit the coals and steam up on the meat. When we flip the shoulders at the 6-hour mark we're going to take that mop we made and dab that mop all over the shoulders, even on the skin. Now we put the lid back down. Now we keep firing again. About every hour and a half, we pull the tin back, we go in and do another mop, and we close it. **4: TEST FOR DONENESS** When we get to our 10-hour mark, we start checking the meat for doneness. My dad did this by grabbing the blade bone and pulling it. (You can wear your heavy-duty work gloves to do this if you like.) You want that meat almost falling-off-the-bone tender. If the bones are slipping easily, we're going to pull the shoulders out; if not, we're going on that 12-hour mark. Again we're mopping about every hour and a half. **5: PULL AND EAT THAT MEAT** My dad pulled all the meat from the shoulders by hand and with tongs, simply grabbing off pieces and collecting them in a large aluminum pan, and then we'd lightly sauce the meat with a vinegar-based sauce right there and make barbecue platters or sandwiches, or we'd process the meat for Brunswick Stew _(this page)_ or a catering job. You'll notice you've got a lot of skin left over, which is perfect for making cracklin's _(this page)_. A * * * * * * **IN THE SMOKER** * * * **_Here's a down-and-dirty easy method to smoke a shoulder on a smoker:_** _**PREP:**_ **Start with one 15- to 20-pound (6.8- to 9-kg) pork shoulder. Rub it down thoroughly with white vinegar and season it liberally with salt and pepper, taking care to cover all surfaces. Prepare a smoker with soaked wood chips** _(seethis page)_ **and heat it to 250°F (120°C).** _**COOK:**_ **Place the shoulder directly on the smoker rack. Cook for 3 hours, then transfer it into a clean aluminum pan and cover with foil. Place the pan back on the smoker and finish cooking the shoulder until the internal temperature reaches 205°F (95°C). Pull the pan from the smoker and let the shoulder rest while still covered for at least 2 hours.** _**EAT:**_ **Put on clean heavy-duty gloves and pull the meat apart in chunks. Discard the gloves and, using tongs, toss the meat with a vinegar-based barbecue sauce, if you like. Pile the pulled pork on guests' plates or sandwich buns.** * * * A * * * * * * **Keeping Meat Moist the Jack Mixon Way** * * * I was always reluctant to ask my daddy why he was doing something, because he might tear my ass up if I did. The two words he hated to hear out of his young'uns were "Why?" and "What?" He'd rather you watch him do something and figure it out on your own. By the time I had the courage to ask him why he poked those three holes in the pork shoulder, I was in my late twenties. He said, "I poke them holes in that meat for two reasons: First reason is that when I flip it over on the skin, all that liquid that's in there—the grease and water and everything that's been rendering out while it's been cooking with the meat side down—is going to run out and hit those coals and have more of a steaming effect, steaming back up on that meat, which is a big part of the flavor. Second thing is, when I get that liquid away from it, it's going to cook faster and get done sooner." When I asked if doing it that way would dry the shoulders out, he looked at me and said, "Have you ever eaten any of my barbecue that was dried out?" And I said, "No, sir." And that ended that conversation. * * * A **SMOKED** **SPARERIBS** **Spareribs, or St. Louis–style ribs as they're known on the professional barbecue circuit, are the ribs of choice down here in the Deep South; people raised in our area want the biggest bang for their buck, so we like spareribs, which are surrounded by more fat to flavor the meat than, say, a baby back or a trimmed-up Kansas City–style rib (the latter is the St. Louis rib minus the top part of the bone so that the rack has ribs uniformly square in size). These are what we cooked when I was growing up. We cooked big spares, and my dad never even removed the membrane or trimmed them up too much, either.** _Cooking time:_ **4 TO 6 HOURS** _Serves:_ **8 TO 12** _ingredients_ **4 racks spareribs** **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_ **Vinegar-Based Barbecue Sauce** _(this page)_ **1: SELECT AND PREP THE MEAT** We're going to use four racks of whole large spareribs, not separated into individual ribs. We're going to place each rack one at a time on a cutting board, bone side down, and use a sharp knife to trim off any excess fat from the first three ribs. We're going to turn the slab over, make a small incision just below the length of the breastbone, work our fingers underneath the thick membrane (called "silver"), and firmly peel off that thick membrane that covers the ribs. Sometimes this exposes more fat that needs trimming, so trim that away if it's there. Now we're going to go in with white vinegar and dab the ribs all over with it. Then we're going to season the ribs well on both sides with salt and pepper. **2: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ Bring the temperature to 265°F (130°C). Place the racks of ribs in the pit, meat side down, and then start shoveling your coals underneath. **3: KEEP THE TEMP CONSISTENT, FLIP AND MOP THE RIBS** After 2 hours, we're coming into the pit with long, heavy tongs to flip the racks so the bones go down onto the bricks. We're going to mop all over both sides of our ribs. We're going to mop every 30 minutes until we get to the 4-hour mark and then check for doneness. With ribs, you need to be very careful about mopping because the more time you have that tin pulled back, the more oxygen is getting down in there to those smoldering coals, and the more apt you are to have flame-ups. You get the tin up, you mop all the meat, you pull the tin back down, and be done with it. It takes me about 30 seconds. **4: TEST FOR DONENESS** When we get to our 4-hour mark, we start checking the meat for doneness. My dad did this by grabbing the rib bones and pulling at them. You can do this with heavy-duty gloves on if you like, but it's easier to pull the rib without them. You want that meat almost falling-off-the-bone tender. If the bones slip easily, we're going to pull the ribs out; if not, we're going on that 6-hour mark and will keep mopping them every 30 minutes. **5: PULL AND EAT THAT MEAT** We pull the ribs off and lightly sauce the racks by brushing all over on both sides with our vinegar sauce. The crispy "bark" on the outside of the ribs, where the fat has been rendering out perfectly, is so damn good. Using heavy kitchen shears or a sharp knife, cut the ribs to separate them, then serve. A * * * * * * **IN THE SMOKER** * * * **_Here's a down-and-dirty easy method to smoke ribs in a smoker:_** _**PREP:**_ **Start with 4 racks of spareribs. Rub them down thoroughly with white vinegar, then season them all over with salt and pepper. Set the racks in an aluminum baking pan and let sit uncovered at room temperature for about 30 minutes.** _**COOK:**_ **Prepare a smoker with soaked wood chips and heat it to 275°F (135°C). Remove the ribs from the pan and place them directly on the smoker grate. After 2 hours, transfer the ribs to a clean large aluminum pan, bone side down. Pour 1 cup of water into the pan with the ribs and then cover the pan with foil. Place the pan back on the smoker and finish cooking the ribs for 2 more hours, or until tender. When the ribs are tender, remove the pan from the smoker, brush the ribs with vinegar-based barbecue sauce** _(this page)_, **and transfer them back to the smoker (no pan) for 10 minutes for the sauce to set.** _**EAT:**_ **Remove the ribs from the smoker and let them rest for 10 minutes, uncovered, on a wooden cutting board. Cut the ribs to separate them and serve.** * * * A * * * * * * **Smoked Pig Tails and Trotters** * * * These are smoked in a pit that's already fired up to 250 to 275°F (120 to 135°C)—when you're already cooking something else, most likely a whole hog, or hams or chickens. If you opt to cook a whole hog, you'll already have a tail, four trotters, and a snout to work with; if you don't, any butcher can get you trotters, tails, and snouts (or "SNOOT," as we pronounce it down here in south Georgia; see _this page_ for my Smoked Snout Sandwich recipe). These are very inexpensive parts that tend to make some folks squeamish, but I'm here to tell you that they're well worth cooking, because when they're done right, they make for very, very good eating indeed. **SMOKED PIG TAILS** Pig tails should come from a large hog because they've got more meat and fat on them, making for better cooking and eating. To prepare the pig tails, rinse them thoroughly in cold water and pat them dry completely with a clean kitchen towel or paper towels. Spritz or brush the tails with distilled white vinegar and season them all over with salt and pepper. Then put them in the pit right there along with the rest of the meats you're cooking. Cook the pig tails for 1½ hours. Then roll them over onto the uncooked side and cook them for another 1½ hours. You're looking for a crispy exterior here, and 3 hours of cooking time is going to do that for you. Now transfer the tails to a heavy cast-iron skillet or another heavy-duty pan. Pour a mop made of distilled white vinegar and crushed red pepper into the pan so that the mop comes about three-quarters of the way up the sides of the tails. Put the skillet in the pit and cook until the tails are tender and falling off the bone, about 2 more hours. Serve with plenty of sauce _(this page)_ on the side for dipping and homemade potato salad. **SMOKED TROTTERS** The process for trotters is very similar to the process for pig tails. To prep the trotters, split each one lengthwise straight down the middle, so that you have two halves (you can have your butcher split them for you). Clean them very well, rinsing thoroughly in cold water and patting them dry completely with a clean kitchen towel or paper towels. Apply distilled white vinegar to both sides of each piece, which you can do with a spritz bottle or a brush. Season the pieces well on both sides with salt and pepper. Meanwhile, you've got your pit at 250 to 275°F (120 to 135°C) and you're shoveling and checking the temperature and taking care to maintain it. Put the trotters in the pit, cut side down, on the grate. Cook them like this for 2 hours. Flip them skin side down and cook for another 2 hours. Now get a good heavy cast-iron skillet or other heavy-duty pan and transfer your trotters into it. Pour a mop made of distilled white vinegar and crushed red pepper into the pan so that the mop comes about three-quarters of the way up the sides of the trotters. Put the skillet into the pit and cook the trotters for about 2 more hours in the pan, just leaving them in the pan while you're still firing the pit. What this is going to do is tenderize that meat to where it's going to be falling-off-the-bone tender, where you can pick up a trotter and you can gnaw down on it and you can get between every bone in there. I don't care if this sounds a little primal; that's some good eating. * * * A **SMOKED** **WHOLE HAM** **If you've followed my method for the whole hog and the hog shoulders, you should see a pattern: same technique, different cut. A fresh ham is my favorite and was my daddy's favorite meat to cook. He liked white meat and he liked the fact that with hams you don't have a lot of fat and so you get a high yield—good for his barbecue business. (The shoulder, on the other hand, is full of marbling and fat, so when you cook it you end up with significantly less volume than you started with.) In his signature barbecue, my dad liked to combine pulled pork shoulder meat with pulled ham in a ratio of 1:3, shoulder to ham. He'd take one cooked shoulder and three cooked hams, blend all of that meat together, lightly sauce it with that vinegar-based sauce, and serve it. That meat he sold for take-out was a big part of his business, and it was a winning formula. Or you can just enjoy your smoked hams and the best chopped or sliced ham sandwiches you'll ever eat.** _Cooking time:_ **10 TO 12 HOURS** _Makes:_ **ABOUT 25 SERVINGS** _ingredients_ **1 (18- to 22-pound / 8.2- to 10-kg) fresh ham with the skin on** **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_ **Vinegar-Based Barbecue Sauce** _(this page)_ **1: SELECT AND PREP THE MEAT** We don't trim the ham, but we prep it by rubbing it down, skin and all, with white vinegar, then applying salt and pepper all over the skin and the meat. This is very easy. **2: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ Bring the temperature to 265°F (130°C). Place the ham in the pit, meat side down, and then start shoveling your coals underneath. **3: KEEP THE TEMP CONSISTENT, FLIP AND MOP THE HAM** At the 6-hour mark, poke three holes in the back of the ham, flip it, and let the accumulated juices run out onto the coals so the liquid steams back up onto the meat. Then we're going to take that pit mop and dab mop all over the ham, even the skin. We're going to mop the ham thoroughly every hour after the 6-hour mark until the 9-hour mark, when we'll test for doneness. **4: TEST FOR DONENESS** Now that we're getting to our 9-hour mark, we start checking this meat for doneness. My dad did this by grabbing the leg bone and pulling it. You want that meat almost falling-off-the-bone tender. If the bone is slipping easily, we're going to pull the ham out; if not, we're going on that 10-hour mark. **5: PULL AND EAT THAT MEAT** Transfer the cooked ham to a cutting board. Cover loosely with foil and let rest for 20 minutes. Pull off any crisp skin and, if you're so inclined, finely chop it with a cleaver. Wearing clean heavy-duty gloves, pull the meat off the bones in large pieces; discard any bones or lumps of fat. Using your fingers or a couple of forks, pull each piece of pork into thin shreds, or finely chop with a cleaver. Transfer the pork to a large, heatproof pan and stir in about 2½ cups (600 ml) of the barbecue sauce, enough to keep the meat moist; add more as needed. Season the pork with salt and pepper. Then pile it up on buns. A * * * * * * **IN THE SMOKER** * * * **_Here's a down-and-dirty easy method to smoke a ham in a smoker:_** _**PREP:**_ **Start with 1 (15-pound / 6.8-kg) precooked smoked ham on the bone. Prepare a smoker with soaked wood chips and heat it to 225°F (110°C). Trim the tough outer skin and excess fat from the ham.** _**COOK:**_ **Place the ham meat side down on the rack in the smoker and smoke the ham for 3 hours. Transfer the ham from the smoker into a large clean aluminum pan, cover the pan with foil, and put the pan back on the smoker. Finish cooking the ham for 2 more hours, covered, in the pan. Remove the pan from the smoker and let the ham rest, covered, for at least 2 hours.** _**MOP:**_ **Take the ham off the smoker. Apply a favorite glaze or barbecue sauce to the ham with a brush, covering it all over. Put the ham back in the aluminum pan, cover it with foil, and return it to the smoker. Smoke the ham for about 1 hour more, or until the internal temperature is right around 145°F (65°C). Unwrap the foil and baste the ham again with your favorite glaze or barbecue sauce. Cover the ham again, return it to the smoker, and smoke for about 1 hour more. At this point the ham will be at least 145°F (65°C), possibly a little higher.** _**EAT:**_ **Remove the ham from the smoker, loosely tent it with fresh foil, and allow it to rest for 30 minutes. Carve and serve warm or at room temperature, or follow the directions in step 5 on** _this page_ **for pulled pork.** * * * **If you ride through** * * * **SOUTH GEORGIA** * * * **right now, in 90 percent of the yards you drive past there will be some sort of smoker or grill or pit—that's the way we were raised down here.** A **HOG EXTRAS** _Smoked Snout Sandwich_ _Pork Cracklin's_ _Brunswick Stew_ _Hog-Skin Collards_ A **HOG EXTRAS** * * * I don't like the term "leftovers" for a whole bunch of reasons, not least because it sounds like an insult. What I do like is the concept of saving what you don't eat and repurposing it for future use, and also the concept of using what you've cooked on the pit or in the smoker in unexpected ways—both of those techniques keep life interesting, don't they? My "hog extras," as I like to call them, give you a good idea of how I might use a couple of pounds of pulled pork after I'm sick of sandwiches, or what I do with all the hog skin after a pig pickin', or even, yes, how I might enjoy the nose of the beast (that's one delicious sandwich—try it). A **SMOKED** **SNOUT** **SANDWICH** **In South Georgia we call a pig's snout a "snoot" or a "rooter" (because it's what a hog uses to root around for acorns). If you buy these from the butcher, the snout will include the top part of the hog's nose, with the skin on it and the nostril holes in it. The goal in cooking these is to make them very crispy, so you don't want to cook them the same way as the trotters and the pig tails. It's also important to note that you eat them in sandwich form, and to have in mind that you're making them almost like you would make cracklin's _(this page)_.** _Makes:_ **1 SANDWICH** _ingredients_ **1 pig snout** **Pit Mop** _(this page)_ **Kosher salt and freshly ground black pepper** **Bread of choice** **Vinegar-Based Barbecue Sauce** _(this page; optional)_ **Dill pickles, sliced** _(optional)_ **Onion, sliced** _(optional)_ The first thing you're going to do is split the nostrils, making a cut all the way through, so that the snout lays out flat. Then you're going to flip it over to where you can see the fat and the tendons inside the snout; cut out the tendons you see—there's generally one on each side (those tendons work the hog's nose up and down, like when you see him moving his nose like a big-ass rabbit, and they help him snort). The next thing you're going to do is go in with a sharp knife and score the snout, making a cut about ⅜ inch (1 cm) deep, making cuts all the way across, then in the other direction to make a checkerboard. The scoring's going to help keep that snoot from curling up when you cook it: You want it to lay flat. Flip the snout over and score it on the other side, taking care not to cut all the way through—you just want to score it. Now we take our mop and we're going to spritz it on both sides of the snout and season both sides with salt and pepper. Then we put the snout meat side down on the grate of our pit. We're going to shovel the coals underneath our pit, and we're going to cook the snout for 1½ to 2 hours so it gets good and crispy. Then we're going to come in and flip it upside down so it's skin side down now, and we're going to get it good and crispy on the bottom, so when you bite into the skin it'll be crunchy. That's going to take you about another 2 hours. The snout is already going to be about sandwich size, so lay it on some fresh bread, put a dab of barbecue sauce on the fat side, add maybe a dill pickle or two and some sliced onions on there, and you've got yourself a crunchy rooter sandwich. A **PORK** **CRACKLIN'S** **The pork cracklings, or pork skins, are better than any damn skins you'll ever have because you can't buy these out of a bag—that crap you buy out of a bag is just puffed air and fat. This right here made with the skin of a hog is the real deal. We get the skin that cracklings are made from after we get through barbecuing our shoulders, our hams, and even our whole hog.** _Makes:_ **4 TO 8 SERVINGS** _ingredients_ **Vinegar-Based Barbecue Sauce** _(this page)_ **Pork skin (1 pound / 0.5 kg pork skin yields about 8 cups / 1.2 g cracklin's, enough to serve 4 for snacks; 2 pounds / 0.9 kg pork fat yields enough to serve 8 for snacks)** What you have after you make a whole hog is skin that's crispy but not so much that you can bite through it. Once you get your meat ready to eat, you've got skin leftover. Bring the temperature to 265°F (130°C). Take that skin, lay it out flat, then take a thin-edged spoon and scrape all that fat out and render it down into lard (which you can save for Smoked Blackberry Cobbler, _this page_ ). Apply a thin coating of salt and pepper onto that skin and put it into the pit, situating it on the grate with the scraped-out side down. And start shoveling them coals again. After an hour, you should see the edges starting to curl; flip the skin. Continue to cook the skin, back skin side down, for another hour. To check for doneness, bend the skin; it's ready when it begins to break. Then come in and lightly brush a little bit of your vinegar-based sauce onto the inside part of the skins, the side where you scraped the fat off. Cook for 5 or 10 minutes more, and you're good to go. A **BRUNSWICK STEW** **This is my family's version of the classic barbecue side dish, which was born in the town of Brunswick, Georgia (don't believe the folks who say it was born in Brunswick County, Virginia). This is an ideal dish for a crowd because it's hearty and delicious and simple; feel free to double the recipe and store a batch in the freezer or just have a giant shindig if you feel like it. It's best served with cornbread _(this page)_ alongside it.** _Makes:_ **20 TO 25 SERVINGS** _ingredients_ **4 pounds (1.8 kg) russet potatoes, peeled and quartered or coarsely chopped, depending on your preference** **1½ pounds (0.7 kg) yellow onions, quartered** **3 pounds (1.4 kg) smoked pulled pork** _(this page)_, **finely shredded** **½ cup (100 g) sugar** **3 pounds (1.4 kg) canned crushed tomatoes** **3 pounds (1.4 kg) canned creamed corn** **3 pounds (1.4 kg) canned tomato sauce** **2 cups (480 ml) Vinegar-Based Barbecue Sauce** _(this page)_ **or other favorite barbecue sauce** Preheat the oven to 300°F (150°C). Combine all the ingredients and divide them evenly between two deep 13 × 9-inch (33 × 23-cm) roasting pans. Cover the pans with foil, put them in the oven, and bake, stirring frequently and making sure no meat sticks to the bottom of the pans, for 2½ to 3 hours, until the onions are tender. Serve warm in bowls. A **HOG-SKIN** **COLLARDS** **This is a great side dish for any type of barbecue: classic collards. When I'm cooking hogs in my masonry pit I like to add hog skin to the collards to give them smoky flavor and also because that hog fat is going to make them shiny and bright to look at and extra tender to eat.** _Makes:_ **10 TO 12 SERVINGS** _ingredients_ **1 pound (0.5 kg) hog skin, taken from the shoulder or the ham of a fully smoked whole hog** _(this page)_ **1 pound (0.5 kg) salt-cured pork, such as hog tails or jowls or thick-cut bacon, coarsely chopped** **6 pounds (2.7 kg) collard greens, stems discarded, leaves cut into ½-inch to 1-inch (12-mm to 2.5-cm) strips** **1 head young green cabbage, coarsely chopped** While the smoked whole hog is resting, find about a pound of skin from its shoulder or ham—there is a lot of extra skin on the hog, so this shouldn't be difficult. Cut the skin into 1-inch (2.5-cm) pieces. In a large stockpot, cook the skin and the salt-cured pork together over moderately high heat, turning, until both are golden, about 4 minutes. Add about 7 quarts (6.6 L) water and bring to a boil. Simmer over low heat until the skin is tender, about 45 minutes. Bring the broth to a vigorous boil. Add large handfuls of the collards at a time, alternating with the cabbage, allowing each batch to wilt slightly before adding more. Return the broth to a boil. Reduce the heat and simmer the collards over medium heat, stirring occasionally, until they are tender, about 30 minutes. Taste the collards; they should be salty enough from the salt-cured meat, but if they're not, adjust for seasoning. Then transfer the collards to a large serving bowl, spoon some of the broth over the greens, and serve piping hot. Serve with Smoked Cornbread _(this page)_. A **WHOLE BIRDS** _Smoked Butterflied Chicken_ _Smoked Whole Turkey_ _Smoked Butterflied Turkey_ A **WHOLE BIRDS** * * * I felt like the whole world's love affair with chicken kind of passed us by down here in the Deep South—yes, we barbecued chicken for our church suppers and we sure fried it, but if there was pork, we ate pork. I didn't pay very much mind to how to cook chicken until I got serious about cooking barbecue professionally and I had to learn how to cook it better than everybody else so I could make some money. My dad loved to make half chickens _(this page)_, so I'm going to give you his method for that—that's what he sold for take-out in his barbecue business and what I saw him cooking when I was coming up. The rest of these recipes for birds are ones that I figured out how to make on my own. **Right now everything that's popular revolves around** * * * **THE SOUTH.** * * * **A big part of that is barbecue. I learned from my dad, who learned from his dad, who learned from his dad.** A **SMOKED** **BUTTERFLIED** **CHICKEN** **The fastest and most efficient way to cook a chicken in a smoker is by spatchcocking it, which is a cool old-fashioned word for butterflying it. This lets you flatten the bird out for more even, consistent cooking of the breast and legs together, and shaves hours off your smoking time.** _Cooking time:_ **3 TO 4 HOURS** _Makes:_ **2 TO 4 SERVINGS** _ingredients_ **1 (3½-pound / 1⅔-kg) whole chicken with the skin on** **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_ **Vinegar-Based Barbecue Sauce** _(this page)_ **1: SELECT AND PREP THE MEAT** I like a whole chicken that's on the big side, so at least 3½ pounds (1⅔ kg). Local birds that didn't have to travel too far to get to your plate are best. First, remove the neck and giblets. Rinse the chicken thoroughly in cold water and pat it dry with paper towels or a clean kitchen towel. To butterfly it: Place the chicken on a cutting board, breast side down. Using a very sharp kitchen knife or cleaver or sharp kitchen shears and working from the cavity opening to the neck, cut down the backbone of each side of the chicken; discard the backbone. Next, cut a 2-inch (5-cm) slit through the membrane and cartilage at the "V" of the neck end. Grab a breast in each hand and gently bend both sides backward, as if you were opening a book, to snap the breastbone. Use your fingers to work along both sides of the breastbone to loosen the triangular keel bone; pull out the bone. With the tip of a sharp knife, cut along both sides of the cartilage at the end of the breastbone; remove the cartilage. Now your chicken is good to go. Just rub it down inside and out with white vinegar, and then season it all over, the skin and the cavity, with salt and pepper. **2: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ Place the bird in the pit, breast side up, and then start shoveling your coals underneath. **3: KEEP THE TEMP CONSISTENT, MOP THE BIRD** Every hour, mop the chicken thoroughly with the mop. **4: TEST FOR DONENESS** After 3 hours, check the Jack Mixon way: Pull on the leg and see if it easily twists away from the thigh; if it resists and doesn't turn, you're not done yet. Or use your instant-read thermometer: The breast should be 165°F (75°C), and the dark meat should reach 180°F (82°C). **5: PULL AND EAT THAT MEAT** Carefully and gently transfer the chicken from the pit to rest on its back on a cutting board or platter, uncovered, for 15 minutes. Then you can carve it and enjoy it. A * * * * * * **IN THE SMOKER** * * * **_Here's a down-and-dirty easy method to smoke a whole chicken in any type of smoker, including the one you may already have in your backyard:_** _**PREP:**_ **You do not need to butterfly the chicken to cook it in an offset smoker—it's less essential to prepare it that way because you can cook it for less time in the offset smoker than in the pit. You need a small whole chicken, about 3 pounds (1.4 kg). Apply white vinegar, salt, and pepper all over the chicken, inside and out, including on the skin, taking care to sprinkle the salt and pepper throughout the cavity. Let the chicken rest this way for 1 hour while you light the coals in the smoker and heat it to 250°F (120°C). Place the chicken on the smoker breast side up.** _**COOK:**_ **Let the chicken smoke for about 3 hours, or until the internal temperature of the leg and thigh reaches 180°F (82°C). Remove the chicken from the smoker and allow it to rest for 15 minutes.** _**EAT:**_ **To serve, carve the chicken into individual pieces, either 8 or 10 (2 wings, 2 breasts, 2 thighs, and 2 drumsticks; or divide the breast into two halves to make 10 pieces total).** * * * A **SMOKED** **WHOLE TURKEY** **Thanksgiving is the one holiday I don't cook. We go to the mountains on Thanksgiving. That said, I love smoked turkey and I don't believe you have to wait until Thanksgiving to enjoy it. I love a pulled turkey sandwich the next day, too.** _Cooking time:_ **ABOUT 6 HOURS** _Makes:_ **10 TO 12 SERVINGS** _ingredients_ **1 (14- to 17-pound / 6.3- to 7.7-kg) turkey with skin on, neck and giblets removed** **Pit Brine** _(this page)_ **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_ **Vinegar-Based Barbecue Sauce** _(this page)_ **1: SELECT AND PREP THE MEAT** I like a turkey that is 14 to 17 pounds (6.3 to 7.7 kg). Remove the neck and giblets and use them to make your gravy, if you like. One thing I do that my daddy didn't is brine the bird. The white meat really benefits from some additional flavor. Whip up a batch of Pit Brine, then submerge the turkey in the brine in a container large enough to hold it and let it sit overnight in the refrigerator. (You can also put it in an ice-packed cooler). When you're ready to cook it, bring the turkey out of the brine (discard the brine), then pat it dry thoroughly inside and out. Rub that turkey down inside and out with white vinegar. Then season it liberally with salt and pepper. Note that a turkey has got a lot of thick skin, and you need a heavy coating of salt and pepper on there. **2: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ Bring the temperature to 265°F (130°C). We put the turkey on the grate in the pit breast side up. **3: KEEP THE TEMP CONSISTENT, MOP THE BIRD** We don't need to flip a whole turkey; not only is it too large, it's not flat, either, and you have to worry about keeping that meat moist inside the skin. So we're not going to touch it—we're going to let it sit and accumulate its juices without knocking it around too much. It takes longer to cook a turkey this way than if we butterfly it, but we keep our beautiful presentation intact. We are going to mop it every hour with our mop. **4: TEST FOR DONENESS** At the 5-hour mark, take a look at the turkey skin on the thigh. Take your finger and push down on the skin and see if the push leaves a dimple. If that happens, it's on the way to being done. If you want to use a meat thermometer, check that the thigh meat and leg meat are around 180 to 185°F (82 to 85°C). You can also check by grabbing that drumstick and twisting it until it breaks away easily from that bird; when it does, you're ready to rock and roll. The breast meat's going to be somewhere around 165°F (75°C) when it's ready to pull off the pit. **5: PULL AND EAT THAT MEAT** Carefully and gently transfer the turkey from the pit to a wooden cutting board or platter. Allow the turkey to rest, loosely covered with foil, for 30 minutes. Then carve the turkey and serve immediately. * * * **A NOTE ABOUT CARVING THE TURKEY** A lot of times you see people carving turkeys at Christmas and Thanksgiving, and they carve it all wrong: They go up in there and they just carve the breast lengthwise, which is the quickest way to dry out the meat on the bone. Instead, try it my way: Take your knife and go on and bone out that breast—remove the whole breast from the bird—then cut across it horizontally, using short strokes instead of long strokes, so you're cutting across the grain. Your turkey meat will be so much more moist, tender, and juicy this way. You can thank me later. * * * A * * * * * * **IN THE SMOKER** * * * _**PREP:**_ **I make a slightly smaller turkey in my smoker than in my pit, simply because I have less room. So here I'm talking about a 12- to 15-pound (5.4- to 6.8-kg) turkey, neck and giblets removed. You're going to brine that turkey with our Pit Brine _(_** _this page_ ** _)_ overnight. When you're ready to cook the turkey, discard the brine, pat it dry thoroughly, and place it in a large, deep aluminum pan to rest while you heat your smoker to 250°F (120°C).** _**COOK:**_ **Transfer the turkey from the pan to the rack in the smoker breast side up and cook for 5 hours, or until the leg and thigh meat reaches an internal temperature of 180°F (82°C).** _**EAT:**_ **Remove the turkey from the smoker and transfer it to a large, heavy cutting board or platter. Allow the turkey to rest, loosely covered with foil, for 30 minutes. Then carve the turkey and serve immediately.** * * * A * * * * * * **Smoked Butterflied Turkey** * * * If coming to the table with the big whole bird is not your priority, consider butterflying the turkey; it makes it quicker to cook and easier to handle since it's basically broken down into pieces. Butterflying your turkey is very similar to butterflying a whole chicken _(seethis page):_ Set the turkey breast side down on a large cutting board with the tail closest to you. Use an electric knife or heavy-duty kitchen shears to cut up one side of the backbone. Turn the bird around and cut back down the other side of the spine. Reserve the backbone for Smoked Turkey Stock _(this page)_, if you like. Discard any fat pockets or excess skin found inside the turkey. Turn the turkey breast side up and use the heels of your hands to press down on both breasts until you hear a cracking sound and the bird has flattened slightly. Turn the turkey over onto what once was its back, splaying its legs out. Press hard on the ridge of the breastbone; you will hear a few cracks, and the turkey should now rest flatter—better for even cooking and crisper skin. Tuck the wing tips behind the breasts. Then proceed with the recipe as described on _this page_ : Use the Pit Brine _(this page)_ to brine the turkey overnight. Bring the smoker temperature to 265°F (130°C). Then remove the turkey from the brine and discard the brine and pat the turkey dry with clean kitchen towels or paper towels. Spritz the turkey all over with white vinegar. Season the turkey all over with salt and pepper. Then put it on your smoker breast- side up, butterflied side down. We're looking at a 4- to 5-hour cook time, mopping the bird with our Pit Mop _(this page)_ every hour. Start checking for doneness at the 5-hour mark. Do it the Jack Mixon way: Push down with your finger on the outside of the skin of the thigh; when you see that skin start to dimple, try to twist the leg away from the side of the breast meat where it connects; if it pulls easily, the turkey is ready to come off the pit. (You can use your instant-read thermometer, too: Look for the thighs and legs to hit 180°F / 82°C just as the breasts hit 165°F / 75°C.) Then you can rest and carve the turkey as described on _this page_. * * * A **BIRD PARTS** _Smoked Half Chickens_ _Smoked Chicken Wings_ _Smoked Turkey Legs_ _Smoked Turkey Wings_ A **BIRD PARTS** * * * If you've ever seen me on television talking about food that people like to eat, you've probably heard my theory about food with handles. It is my fervent belief that people like to eat food that's easy to pick up and hold—it solves a whole lot of problems when you're ready to chow down. Chicken "parts"—or cut-up pieces, if you will—have the kind of handles I'm always talking about; they make it easier for you to socialize while you're eating, and thus are great party foods. Speaking for myself, there's very little in life I like more than a chicken or turkey wing, let me tell you. A **SMOKED** **HALF CHICKENS** **In my daddy's barbecue business, he preferred to cook chicken halves—and he cooked a whole lot of them. One of the main things you see when you're down in the South is half-chicken dinner fund-raisers. When you're raising money for the local Little League, you bet there's going to be half-chicken plates. We never cooked whole chickens, and we never cooked chicken pieces; although I cook both of those things now, for me the "original" way of making chicken is a half chicken. The chicken half is made up of a breast, a leg, a thigh, and a wing, and that will be enough meat right there to fill up anybody around.** _Cooking time:_ **3 TO 5 HOURS** _Makes:_ **AT LEAST 2 SERVINGS** _ingredients_ **1 (3½-pound / 1.6-kg) whole chicken with the skin on, split in half** **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_ **Vinegar-Based Barbecue Sauce** _(this page)_ **1: SELECT AND PREP THE MEAT** We buy whole chickens, local ones, and then we split them ourselves. First, remove the neck and giblets. Place the chicken on a cutting board, breast side down. Using a very sharp kitchen knife or cleaver or sharp kitchen shears and working from the cavity opening to the neck, cut down the backbone of each side of the chicken; discard the backbone. Next, cut a 2-inch (5-cm) slit through the membrane and cartilage at the "V" of the neck end. Grab a breast in each hand and gently bend both sides backward, as if you were opening a book, to snap the breastbone. Use your fingers to work along both sides of the breastbone to loosen the triangular keel bone; pull out the bone. With the tip of a sharp knife, cut along both sides of the cartilage at the end of the breastbone; remove the cartilage. Turn the chicken breast side up. Cut lengthwise down the center of the chicken to split it into two halves. Rub each chicken half inside and out, skin and cavity, with white vinegar. Then season them thoroughly all over with salt and pepper—take your time to season the chicken. **2: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ Bring the temperature to 265°F (130°C). Place the half chickens in the pit meat side down, bone side up. **3: KEEP THE TEMP CONSISTENT, MOP THE BIRD** Chickens do not have to be flipped, but they do have to be mopped. So while you're carefully keeping your pit temp consistent, you're going to mop them every hour all over—3 hours of cooking time means you'll mop three times. **4: TEST FOR DONENESS** At the 3-hour mark, take a look at the chicken skin. Take your finger and push down on the skin and see if the push leaves a dimple. If that happens, it's on the way to being done. The final check for doneness is to grab that chicken leg and gently twist it; if it pulls away easily, it is ready to go. The internal temperature should be 180°F (82°C). **5: PULL AND EAT THAT MEAT** Remove the chicken halves from the pit, carefully transfer them to a wooden cutting board or platter, and let them rest, uncovered, for 15 minutes. Then serve your birds—I give a half chicken per person. A * * * * * * **IN THE SMOKER** * * * **_You can make smoked half chickens in the offset smoker exactly in the way that you'd make a whole chicken (this page). The cooking method is identical_.** _**PREP:**_ **You need a small whole chicken, about 3 pounds (1.4 kg) with the skin on, split in half. Apply white vinegar, salt, and pepper all over the chicken halves, inside and out, including on the skin, taking care to sprinkle the salt and pepper throughout the cavity. Let the chicken halves rest this way for 1 hour while you light the coals in the smoker and heat it to 250°F (120°C). Place the chicken halves on the smoker breast side up.** _**COOK:**_ **Let the chicken smoke for about 3 hours, or until the internal temperature of the leg and thigh reaches 180°F (82°C). Remove the chicken from the smoker and allow it to rest on the rack in its pan for 15 minutes.** _**EAT:**_ **Serve half a chicken to each person.** * * * A **SMOKED** **CHICKEN** **WINGS** **Breast man, leg man—whatever. I'm a wing man myself. By that I mean I could eat chicken wings all day. I just like the fact that you get a two-in-one eating experience: the drumette and the flat. Chicken wings are considered bar food, but I maintain that they're some of the best eating you can enjoy. Here I'm going to tell you how to make those wings get that one-of-a-kind pit-smoked flavor.** _Cooking time:_ **2 HOURS** _Makes:_ **12 WINGS** _ingredients_ **12 chicken wings** **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_ **Vinegar-Based Barbecue Sauce** _(this page; optional)_ **1: SELECT AND PREP THE MEAT** You need at least a dozen high-quality chicken wings, nice and meaty—the meatier they are, the better they'll do in the pit. Rinse the wings thoroughly in cold water and pat them dry. Spritz thoroughly with white vinegar, then season them liberally all over on both sides with salt and pepper. **2: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ Bring the temperature to 265°F (130°C). You're going to put the chicken wings on the grate over the coals in the smoker. Take care not to overlap them. **3: KEEP THE TEMP CONSISTENT, FLIP THE WINGS, MOP THE WINGS** These wings have a 2-hour cook time. Every 30 minutes, you're going to flip them, then mop them with the mop. **4: TEST FOR DONENESS** When the wings are close to the end of the 2-hour cook time, take the largest wing and pull the "drummie" from the flat. If it pulls easily, they're done. (Alternatively: When the drummies reach an internal temperature of 165°F / 75°C on a meat probe, they're done.) **5: PULL AND EAT THAT MEAT** Transfer the wings to a wooden cutting board or platter, cover them loosely with foil, and let them rest for 15 minutes. Serve them with some barbecue sauce on the side, if you like. A * * * * * * **IN THE SMOKER** * * * _**PREP:**_ **Using a very sharp knife, cut each wing in half to separate the flat from the drumette. Rinse the pieces well and pat them dry. Spritz each all over with white vinegar. Liberally season each piece with salt and pepper. Place the chicken wings in an aluminum baking pan and let them rest while you heat your smoker to 250°F (120°C).** _**COOK:**_ **Transfer the wings from the pan directly onto the grate in the smoker and cook the wings for 2 hours.** _**EAT:**_ **Remove the wings from the smoker, brush them all over with Vinegar-Based Barbecue Sauce** _(this page)_, **and return the pan to the smoker. Cook for 15 minutes. Transfer the wings to a cutting board, cover loosely with foil, and let rest for about 15 minutes. Uncover and enjoy with a little dipping sauce** _(this page)_ **on the side, if you like.** * * * A **SMOKED** **TURKEY** **LEGS** **We all love to eat turkey drumsticks because they've got a handle on them; you can hold them and gnaw down until your heart is content. This book ain't about a damn barbecue contest; if anything it's about a barbecue _eating_ contest, and that's what turkey legs are good for: They're fun to eat.** _Cooking time:_ **ABOUT 4 HOURS** _Makes:_ **4 SERVINGS** _ingredients_ **4 legs from 15- to 17-pound (6.8- to 7.7-kg) turkeys, skin on** **Pit Brine** _(this page)_ **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_ **Vinegar-Based Barbecue Sauce** _(this page)_ **1: SELECT AND PREP THE MEAT** The best turkey legs are coming off a large bird, a 15- to 17-pounder (6.8 to 7.7 kg). You'll likely have to get these from your butcher, but I see them sometimes in my supermarket, too. These guys need to be brined overnight in our pit brine in the refrigerator. When you're ready to cook them, take them out of the brine, discard the brine, and pat the legs dry thoroughly with a clean kitchen towel or paper towels. Spritz them all over with white vinegar, then season them liberally on all sides with salt and pepper. **2: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ Bring the temperature to 265°F (130°C). Then just put the legs in the pit. **3: KEEP THE TEMP CONSISTENT, FLIP THE LEGS, MOP THE LEGS** Every 30 minutes, turn the legs over and mop them with the mop. **4: TEST FOR DONENESS** At the 3½-hour mark, take a look at the turkey skin. Take your finger and push down on the skin and see if that push leaves a dimple. If that happens, it's on the way to being done. If you want to use a meat thermometer, check that the meat is around 180 to 185°F (82 to 85°C). **5: PULL AND EAT THAT MEAT** Pull the legs off the pit and transfer them to a wooden cutting board or platter. Let them rest, loosely covered with foil, for 15 minutes. Then chow down. A * * * * * * **IN THE SMOKER** * * * _**PREP:**_ **The turkey legs need to be brined overnight in the Pit Brine** _(this page)_ **in the refrigerator. Remove the turkey legs from the brine and pat them dry thoroughly with paper towels. Spritz the legs all over with white vinegar, and then season them liberally all over with salt and pepper. Transfer them to a deep aluminum pan and let them rest while you heat your smoker to 250°F (120°C).** _**COOK:**_ **Transfer the legs from the pan directly onto the grate of the smoker. Smoke the legs for 3 hours, or until the temperature of the legs in the meatiest part reaches 180°F (82°C) on an instant-read thermometer. Pull the legs from the smoker, wrap each individually in foil, and let them rest for 30 minutes.** _**EAT:**_ **Remove the legs from the foil and serve with Vinegar-Based Barbecue Sauce** _(this page)_ **on the side for dipping.** * * * **I was raised** * * * **SHOVELING COALS** * * * **under masonry pits. That was the only way barbecue was done. That was the way our forefathers did it.** A **SMOKED** **TURKEY** **WINGS** **Gobbler wings are the best damn wings on the planet, and I like ones that come from larger birds, 15 to 18 pounds (6.8 to 8.2 kg), and I don't take the tips off them because I'm going to gnaw down on those—I suggest you try them, too.** _Cooking time:_ **ABOUT 4 HOURS** _Makes:_ **6 TO 8 SERVINGS** _ingredients_ **5 lbs (2.27 kg) turkey wings, skin on** **Pit Brine** _(this page)_ **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_, **some kept aside for serving, if desired** **Vinegar-Based Barbecue Sauce** _(this page; optional)_ **1: SELECT AND PREP THE MEAT** We need to brine these turkey wings in our Pit Brine in the refrigerator overnight. When you're ready to cook them, remove them from the brine, discard the brine, and pat them dry thoroughly with a clean kitchen towel. Spritz them all over with white vinegar and season with salt and pepper. **2: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ Bring the temperature to 265°F (130°C). I like to place these on the grate over the coals in the pit so that the wing tips are pointing away from the flame; otherwise they may burn at the tips. **3: KEEP THE TEMP CONSISTENT, MOP THE WINGS** We're going to mop the wings every hour with our mop. You don't usually have to flip these, but they do need checking: I always check them to see if the bottoms (tips) are getting too hot, and if they are, you should flip them. **4: TEST FOR DONENESS** At the 3½-hour mark, take a look at the turkey skin. Take your finger and push down on the skin and see if that push leaves a dimple. If that happens, they're on the way to being done. If you want to use a meat thermometer, check that the meat is around 180 to 185°F (82 to 85°C). **5: PULL AND EAT THAT MEAT** Bring the wings out of the pit and transfer them to a wooden cutting board or platter and let them rest, loosely covered with foil, for 15 minutes. You can serve these with some fresh mop or some barbecue sauce on the side for dipping, if you like. Add this at the end: When the wings are cool enough to handle, use a sharp knife to cut the wings into three sections—drumette, wingette, and tip. Serve with Vinegar-Based Barbecue Sauce (this page). A * * * * * * **IN THE SMOKER** * * * _**PREP:**_ **Brine the wings overnight in the Pit Brine** _(this page)_ **in the refrigerator. When ready to cook, pull them from the brine, discard the brine, and pat them dry thoroughly with paper towels. Spritz the wings all over with vinegar and season them liberally with salt and pepper on all sides. Place them in a deep aluminum pan and let them rest while you heat your smoker to 250°F (120°C).** _**COOK:**_ **Transfer the wings from the pan to the grate of the smoker. Smoke the wings for 3 hours, or until the temperature in the meatiest part reaches 180°F (82°C) on an instant-read thermometer. Pull the wings and let them rest on a cutting board or platter, loosely covered with foil, for 30 minutes.** _**EAT:**_ **Uncover the wings and serve with Vinegar-Based Barbecue Sauce** _(this page)_ **on the side for dipping.** * * * A **BIRD EXTRAS** _The Pitmaster's Turkey Sandwich_ _Smoked Chicken or Turkey Pitmaster Stock_ _The Pitmaster's Smoked Chicken Salad Sandwiches_ A **BIRD EXTRAS** * * * I may not have always understood America's love affair with chicken, but no one had to tell me twice when it comes to what's great about sandwiches. My sandwiches are not complicated, big, stacked-up layered things that cost you $14 apiece; they are simple homemade sandwiches that are all about that flavor that the pit gives the bird. The turkey sandwich is a sneaky treat I give myself, a reward for making the turkey for everybody else to enjoy; the chicken salad is my light weekday work lunch (enhanced with Georgia pecans, of course), and the stock is something useful for all cooks to have around—and this version made from smoky bones is a damn sight better than anything you can get in a can at the supermarket. A **THE** **PITMASTER'S** **TURKEY SANDWICH** **Some of the best barbecue you'll ever have is during your private pitmaster time: When you're cooking a turkey on the pit, before you serve it you pull some meat off for yourself, right then and there on the spot, and make yourself a little secret treat. Just pile up the turkey meat (make sure you get some of that crispy skin on it) and some dill pickles—I love dill pickles with my barbecue—on white bread. I think white bread is underrated—and I also think it's the best stuff for this sandwich.** _Makes:_ **1 BIG BAD SANDWICH** _ingredients_ **2 slices white bread, homemade or store-bought** **2 tablespoons mayonnaise, homemade or your favorite brand** **Kosher salt and freshly ground black pepper** **1 cup (195 g) freshly pulled smoked turkey meat** _(this page)_ **from the underside of the turkey (so folks can still see the bird looking pretty when you serve it)** **Dill pickles, sliced** Lay out the slices of bread and spread mayonnaise on one side of each slice. Season liberally with salt and pepper. On one piece of bread, pile on your turkey meat, taking care to evenly distribute it all over the slice. Lay pickle slices on top of the turkey meat. Close the sandwich with the other prepared piece of bread. And don't tell anyone you did it—it's that good. A **SMOKED** **CHICKEN or TURKEY** **PITMASTER STOCK** **This recipe is going to be the same whether you're using chicken or turkey. You're going to save the bones of the smoked meat and turn them into this delicious stock. Because the birds have been smoked, your stock will have a distinct rich flavor; you can make chicken noodle soup out of it or use it for sauces and gravies just like you would any other kind of stock. But it will be better, because this is Pitmaster Stock.** _Makes:_ **ABOUT 3 QUARTS (2.8 L)** _ingredients_ **1 large smoked chicken or turkey carcass, legs, or wings** _(this page to this page)_, **chopped into manageable pieces to fit into a pot** **8 cloves garlic, unpeeled and smashed** **2 stalks celery, coarsely chopped** **1 medium onion, quartered** **2 large carrots, peeled and coarsely chopped** **1 bay leaf** **Kosher salt and freshly ground black pepper** In a large heavy Dutch oven or stockpot, combine all the ingredients. Add enough cold water to submerge all the ingredients, about 3 quarts (2.8 L). Bring to a boil over high heat, use a slotted spoon to skim any scum from the surface, then turn the heat down to low to simmer. Simmer the stock gently for 3 to 4 hours, using a slotted spoon to skim occasionally. Remove from the heat and let the stock cool for about 30 minutes. Strain the stock through a fine sieve. Let cool completely, about another 30 minutes, then refrigerate. Once the stock is cold it should be amber in color and relatively clear. Divide it among plastic containers, leaving some space at the top for expansion if you plan to freeze it. The stock will keep in the refrigerator for about 5 days and in the freezer for about 3 months. A **THE PITMASTER'S SMOKED** **CHICKEN SALAD** **SANDWICHES** **I'm always thinking ahead when I'm cooking in my pit about what I'm going to eat for the week, and I'll throw on an extra half chicken _(this page)_ just so I can have chicken salad sandwiches. This is not a so-called tea sandwich: I like a chicken salad sandwich to be filling and full of different flavors and textures. I also like to mix in both white and dark meat; I don't get it when people ask me if I prefer white or dark meat—there's nothing wrong with either, in my book. And remember: I was raised on white bread (or "light bread," as we call it in the South), so that's what I like for this sandwich, but you should have it on any bread you like, or simply pile it up in a bowl and get yourself some saltines—that's a pitmaster's lunch for you right there.** _Makes:_ **4 SANDWICHES** _ingredients_ **About 4 cups (780 g) leftover pulled smoked chicken** _(this page)_, **white and dark meat** **1 cup (240 ml) mayonnaise, homemade or your favorite brand** **1 cup (180 g) chopped Granny Smith or other tart apple** **½ cup (60 g) chopped pecans** **4 hard-boiled eggs, chopped** **1 cup (150 g) halved seedless red grapes** **Kosher salt and freshly ground black pepper** **8 slices white bread** **Garnishes (optional): iceberg lettuce, tomato slices, and red onion slices** In a large bowl, combine the chicken, mayonnaise, apple, pecans, hard-boiled eggs, and grapes. Season generously with salt and pepper. Using a wooden spoon, gently mix the chicken salad together, taking care to thoroughly combine all the ingredients. In the center of each of 4 slices of bread, scoop about one-quarter of the chicken salad and then use a spoon to spread it around gently to cover the whole slice of bread. Top with whichever garnishes you like—all of them or none of them—and place the other slice of bread on top of the sandwich. A **BEEF CUTS** _Smoked Beef Ribs_ _Smoked Beef Short Ribs_ _Smoked Beef Tenderloin_ _Smoked Brisket_ _Smoked Oxtails_ _Smoked Prime Rib_ A **BEEF CUTS** * * * It's hard for me to think of these beef recipes without remembering all the competitions that it took for me to learn how to make them right—competition barbecue prizes sweetness, which requires complicated methods of injections, sauces, and rubs to achieve. Pit smoking for me marks a return to the special quality of beef's natural flavor, as it's enhanced only by the process of barbecuing. I think of beef as a luxury; we had it once a week when I was growing up and everybody looked forward to it. It still means a special occasion to me to this day; in fact, if I'm eating beef, I'm probably celebrating something or giving myself a hard-earned treat. The luxuriousness of velvety beef is not compromised in the pit, but rather enhanced—the combination of rich flavor kissed with the deep smoky essence of the pit is among the most mouthwatering sensations for pitmasters like me. A **SMOKED** **BEEF RIBS** **Most folks don't realize that when it comes to the cow there are a few different things a person could mean when you say "ribs." You could mean the short ribs _(seethis page)_ or you could mean the back ribs, the big guys, and that's what most barbecue guys are interested in because they're fun to cook—I call them "tenderlicious" because of how succulent they are when you cook them right.** _Cooking time:_ **ABOUT 4 HOURS AND 15 MINUTES** _Makes:_ **4 SERVINGS** _ingredients_ **8 beef ribs** **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_ **Vinegar-Based Barbecue Sauce** _(this page)_ **1: SELECT AND PREP THE MEAT** You can usually find beef ribs in most grocery meat departments sold in mini slabs of 3 or 4. If you want to make sure you're getting the best-quality beef available, I'd recommend getting them from a butcher. To feed 4 people, you need 8 whole beef ribs. Serving more people? Count on 2 for most folks, maybe 3 for your biggest eaters. (I often don't even eat 3 of these suckers, so keep that in mind.) I suggest a small amount of trimming here: Peel off the thick membrane (or "silver skin," as it's called in the barbecue world) that covers the back side of each rib: It's easy to do if you work your fingers underneath the membrane until you have 2 to 3 inches (5 to 7.5 cm) cleared, then grab the membrane with a kitchen towel and pull it away from the rib and discard it. This will expose some loose fat if the rib has any, so if you see any excess fat, just use a paring knife and cut it out. Now spritz the ribs all over with white vinegar and season them all over, liberally, with salt and pepper. **2: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ Bring the temperature to 265°F (130°C). Set the ribs on the grate in the pit meat side down, bone side up. **3: KEEP THE TEMP CONSISTENT, FLIP THE RIBS, MOP THE RIBS** We'll cook these ribs on the pit for 2 hours, then flip them over, mop them with the mop, and cook for 2 more hours, mopping every 20 minutes. After 4 hours pull the rib gently and feel for the bone to begin to slip from the meat. Then you can be sure you're done. **4: PULL AND EAT THAT MEAT** Use long, heavy-duty tongs to remove the ribs from the smoker. Transfer them to a platter or cutting board and let them rest, loosely covered, for 10 minutes. Then they're good to go. Serve alongside plenty of sauce for dipping A * * * * * * **IN THE SMOKER** * * * _**PREP:**_ **Peel off the thick membrane (or "silver," as it's called in the barbecue world) that covers the back side of each rib: It's easy to do if you work your fingers underneath the membrane until you have 2 to 3 inches (5 to 7.5 cm) cleared, then grab the membrane with a kitchen towel and pull it away from the rib and discard it. This will expose some loose fat if the rib has any, so if you see any excess fat, just use a paring knife and cut it out. Now spritz the ribs all over with white vinegar and season them all over, liberally, with salt and pepper. Put the ribs in a deep aluminum pan and let them rest while you heat your smoker to 250°F (120°C).** _**COOK:**_ **Transfer the ribs from the pan onto the grate of the smoker and cook for 2 hours. Remove the ribs from the smoker and transfer them to a large clean aluminum pan, meat side up. Pour 2 cups (480 ml) water into the pan, cover the pan with aluminum foil, and return it to the smoker for 2 more hours. At the 4-hour mark, remove the pan from the smoker and glaze the top of the ribs (only the top) with the Vinegar-Based Barbecue Sauce** _(this page)_ **—use just enough to coat the ribs, taking care not to over-sauce. Put the pan back in the smoker, uncovered, and cook for 15 minutes.** _**EAT:**_ **Remove the pan from the smoker and let the ribs rest, loosely covered, for 10 minutes. Serve with more sauce on the side, if you like.** * * * A * * * **_The_ EARLY DAYS _of_ COMPETITION BARBECUE** I STARTED COMPETING in barbecue contests in June 1996. My dad, Jack, had died unexpectedly from a stroke six months before, when he was only fifty-six years old. I was thirty-two, I was divorced, I had to pay child support, I had to pay the bills, and I had two boys to feed. I got into competition barbecue because my mother's family made a good barbecue sauce that my parents were selling, and I wanted to promote it by going out and trying to win some trophies and gather some notoriety. Well, I did much more than that. The first contest I went to was the Lock-&-Dam BBQ Contest in Augusta, Georgia. I went by myself with no crew, and I took a first place in the whole-hog category. I said to myself, "This right here, I can do this." At the time Pat Burke, a legendary pitmaster from Murphysboro, Illinois, was the man to beat. After my dad, Pat is my barbecue hero; he's eighty years old now, and we're still friends. When I was first getting into barbecue and doing local contests in Georgia, I knew I was never going to get any better if I kept going up against people who I knew I could beat. Pat was winning all the contests then, and I think even early on he respected and liked me, while he wanted to beat the breaks off everyone else out there. I was probably one of the first people who made a job out of competing on the barbecue contest circuit—I didn't have another job on the side like most folks did then and some still do now, and that gave me an edge. Most people were doing barbecue contests as a hobby and they didn't depend on competition money to pay their bills, but I figured out I could do it for a living because I got good at it. Jack taught me the hardest way to cook pit barbecue there is, so when I started competing it was a piece of cake compared to what I had been doing for my dad. I understood the flavors of the meat and what complemented it, and I made a science of the barbecue cooking contests because I wanted to get better at it. At the early contests the people competing weren't necessarily the people who had a background in barbecue; contests were more like social gatherings, like competitions for something folks were doing in their backyards. Today barbecue has been brought to the forefront by television shows and people writing cookbooks, and the game is a whole lot more competitive. People are buying expensive equipment and going to cooking schools to speed up their learning curves. My education came from the Jack Mixon School of Barbecue. It's hard for me to win contests these days because I'm the Big Guy now. I'm on TV, I've won more contests than anyone else, and there's a target on my back. Others like me bring a notoriety to barbecue that wouldn't have happened if we weren't out there, but like any "hot" profession, there's always a desire for new faces and for a future generation of pitmasters to come along. Competitions want me to be there right now, but they don't want me to win. * * * A **SMOKED** **BEEF SHORT RIBS** **Hey, as long as you've got your pit up and smoking, you can throw some short ribs or oxtails on there, too, but I don't usually cook these in my other smokers because they're small and somewhat unusual cuts.** **Short ribs are the first through the fifth ribs that sit right under the neck of the animal and are usually 3 to 4 inches (7.5 to 10 cm) long. There's a nice long rectangle of well-marbled meat that sits on top of these short, wide bones; the fat in the marbling melts as these ribs cook, which keeps them moist, tender, and juicy. I like to eat these by dipping them in either some extra vinegar mop _(this page)_ or some good barbecue sauce _(this page)_. The advantage they have is that compared to other beef ribs and pork ribs, they cook up relatively fast in the smoker.** _Cooking time:_ **2½ TO 3 HOURS** _Makes:_ **4 TO 6 SERVINGS** _ingredients_ **3 to 4 pounds (1.4 to 1.8 kg) bone-in beef short ribs, each 2 to 4 inches (5 to 10 cm) long (6 to 8 pieces)** **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_ **Vinegar-Based Barbecue Sauce** _(this page)_ **1: SELECT AND PREP THE MEAT** I often see short ribs in grocery store butcher counters, but if you want to make sure you're getting the best-quality beef available in your area, I'd recommend getting them from a butcher. **2: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ Spritz the ribs all over on both sides with white vinegar. Then season them liberally all over with salt and pepper. **3: KEEP THE TEMP CONSISTENT, MOP THE RIBS** We need a steady 250°F (120°C) in the pit for these ribs. After the first hour, we're going to flip them and mop them with our mop, and repeat that process every 20 minutes for about 2 hours. **4: TEST FOR DONENESS** Start checking for doneness at the 2-hour mark. The short ribs are done when they are very dark brown and tender enough to pull apart. You will see that the meat has shrunk back from the ends of the bones by ¼ inch (6 mm) or so. **5: PULL AND EAT THAT MEAT** Transfer the short ribs to a wooden cutting board, cover with foil, and let rest for at least 5 minutes. Serve with barbecue sauce on the side. A **SMOKED** **BEEF TENDERLOIN** **Beef tenderloin is pretty much synonymous with Christmas for some folks, or with an anniversary party or some other occasion when you're willing to splurge on a fancy, delicate cut of meat. I think if you're celebrating, beef tenderloin is worth it: The cut happens to contain a whole lot of natural flavor already, and it's meltingly tender to start with, so the only two ways you can screw it up are by overseasoning it or, worse, overcooking it. This meat should be cooked and enjoyed at medium-rare doneness and no more, so keep that in mind when you cook it—this is not the time to fall asleep at the pit.** _Cooking time:_ **ABOUT 1½ HOURS** _Makes:_ **6 SERVINGS** _ingredients_ **1 (3-pound / 1.4-kg) beef tenderloin roast** **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_ **1: SELECT AND PREP THE MEAT** If you're going all out, go all out and get the best one you can. Overhandling tenderloin is the surest way to ruin it, so treat it with special care. With a cut this tender, there's not going to be anything to trim. Wipe the tenderloin down and pat it dry. Wipe it all over with white vinegar and season with salt and pepper. **2: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ Transfer the tenderloin to a large heavy wide-bottomed aluminum pan. Bring the smoker temperature to 265°F (130°C), then put the pan in the smoker. **3: KEEP THE TEMP CONSISTENT, MOP THE TENDERLOIN** Cook for about 30 minutes, then go in and mop the tenderloin. At the 1-hour mark, hit it again all over with the mop and put the pan back in the smoker. Cook for 15 minutes more and use your instant-read thermometer to begin testing for doneness. It may take another 15 minutes. **4: TEST FOR DONENESS** Because we're taking care not to overcook this sucker, we're going to use a instant-read thermometer and remove it the minute we get to 125°F (52°C) for rare, 130 to 135°F (54 to 57°C) for medium rare—at that point we know we're done. **5: PULL AND EAT THAT MEAT** Transfer the tenderloin to a wooden cutting board and let it rest for 10 minutes. Cut the tenderloin crosswise against the grain into ½-inch-thick (12-mm-thick) slices and serve immediately. A * * * * * * **IN THE SMOKER** * * * _**PREP:**_ **Use a clean kitchen towel or paper towels to pat the tenderloin dry. Spritz it all over with vinegar, then season it liberally on all sides with salt and pepper. Let the tenderloin rest in an aluminum roasting pan while you heat your smoker to 275°F (135°C).** _**COOK:**_ **Transfer the tenderloin from the pan onto the grate in the smoker and cook for about 1½ hours, until the internal temperature at the center for the tenderloin reaches 125°F (52°C) for rare, 130 to 135°F (54 to 57°C) for medium rare. Transfer the tenderloin to a cutting board and let it rest for 10 minutes.** _**EAT:**_ **Cut the tenderloin crosswise against the grain into ½-inch-thick (12-mm-thick) slices and serve immediately.** * * * A **SMOKED** **BRISKET** **Let me tell you something humorous: I have so many prizes for cooking brisket, but I learned to cook a brisket backward: My dad never cooked it in his life. When we were growing up, I didn't know what a damn beef brisket was—we didn't eat or cook brisket, you didn't find them in the Deep South in supermarkets very often, and if we ever ate brisket it was ground up into hamburger meat at a restaurant. I taught myself how to cook brisket when I started competing seriously on the barbecue circuit by studying the meat and paying close attention to how other people I respected were handling it. Here's how I learned how to do it on masonry pits, which is by far the easiest way: We don't need brining, we don't need injecting—we keep it easy, and it's delicious.** _Cooking time:_ **ABOUT 8 HOURS** _Makes:_ **20 TO 25 SERVINGS** _ingredients_ **1 (12- to 15-pound / 5.4- to 6.8-kg) untrimmed beef brisket** **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_ **1: SELECT AND PREP THE MEAT** I like a big beef brisket that hasn't been cut up or trimmed—a 12-to 15-pounder (5.4 to 6.8 kg) if you can find one. This just means I can feed a lot of people or have some excellent leftovers. Butcher shops and groceries usually cut briskets up into two pieces: the first or "flat" cut from the cow's belly and the second or "point" cut, which is near the foreshank. Both have pros and cons: The first cut is evenly shaped and lean; the second cut is fattier and tougher but has more flavor. I am giving you a recipe for a whole untrimmed brisket, which includes both the flat and the point; you may have to order directly from a butcher, but trust me, it's worth it: This is what I cook in my pits at home. And after buying meat from mass-market retailers and special-ordering it from top-notch purveyors, I have done enough homework that I can tell you that to make a great brisket, you have to start with great-quality meat. I like brisket from the Wagyu cattle, a breed first cultivated in Japan and prized for its fine marbling and rich flavor that you can now get in the United States. Get it if you can. Before you can smoke your brisket you need to trim the membrane, which is the fine silvery-white weblike coating on the meat. Use a sharp paring knife and slowly separate and cut away the layers of sticky white matter that surround the meat. It may take a while, but if you don't do it—and keep in mind that I'm not the kind of guy who works hard when he doesn't have to—your meat will be tough, so do a good solid job and take your time. When you're finished, use a kitchen towel or paper towel to rub the meat all over with white vinegar. Then season it liberally all over with salt and pepper. **2: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ Once our pit is stable at 275°F (135°C), gently place the brisket, meat side down, fat side up, on the grate in the pit. **3: KEEP THE TEMP CONSISTENT, FLIP THE BRISKET, MOP THE BRISKET** Smoke the brisket for 3 hours to get a good crust on it. Then flip the brisket, which means the fat side will now be down and the meat side up. Mop the brisket all over with the mop. We're going to smoke the brisket like this for 5 more hours, mopping every 30 minutes. **4: TEST FOR DONENESS** Use an instant-read thermometer inserted in the point end to check the temperature: We're looking for 205°F (95°C). It may take as little as a few more minutes to get there or up to an hour more, so be sure to check it every 15 minutes after the 5-hour mark. * * * _* Note: Guess what, folks? Because my dad never cooked a brisket, I don't have any tricks to check for doneness; even I use a meat thermometer on this one!_ * * * **5: PULL AND EAT THAT MEAT** We pull the brisket off the smoker when the point end registers 205°F (95°C), and then we wrap it in foil and let it rest for 2 hours on a wooden cutting board. Then we slice the brisket against the grain, drizzle a little sauce on it if we feel like it, and enjoy something that's out of this world—it's so good I sometimes wish I could haul around a masonry pit to my competitions because I'd win every damn time. A * * * * * * **IN THE SMOKER** * * * _**PREP:**_ **Trim the brisket: Using a sharp paring knife, slowly separate and cut away the layers of sticky white matter that surround the meat; take your time. When you're finished, use a kitchen towel or paper towel to rub the meat all over with white vinegar. This adds a layer of flavor, moistens the meat, and kills bacteria in the process. Then season it liberally all over with salt and pepper. Place the brisket in a deep aluminum baking pan and let it rest while you heat your smoker to 350°F (175°C).** _**COOK:**_ **Transfer the brisket from the pan to the grate of the smoker and cook the brisket for 2½ hours. Remove the brisket from the smoker, cover it with foil, and place in a deep aluminum baking pan. Put the pan in the smoker and cook for another 1½ hours, or until the temperature in the point end of the meat reaches 205°F (95°C) on an instant-read thermometer. Remove the brisket from the smoker and pan and wrap the brisket, still covered with foil, in a thick blanket. (The blanket is my own personal technique that I discovered on the professional barbecue circuit for making sure that brisket gets nice and tender without drying out, and I use it in competition to this day.) Let it rest this way at room temperature for at least 2 hours.** _**EAT:**_ **Remove the blanket, discard the foil, and set the brisket aside on a cutting board, taking care to save the accumulated juices. Pour off the clear fat that rests on top of the pan, taking care to retain the juices, then pour the juices into a medium saucepan. Warm the juices over medium heat and allow them to come to a simmer. Meanwhile, slice the brisket against the grain; try to make the slices as consistently sized as possible. Place the slices on a warmed platter and pour the juices over them. Serve immediately.** * * * A **SMOKED** **BEEF OXTAILS** **If you're uninitiated, oxtail is just what it sounds like: the tail of the cattle. Butchers usually cut it into segments. Oxtails are enjoyed a lot in Europe, where it's something like a delicacy, and in the rural South, where we pride ourselves on eating all parts of an animal; mostly it's braised in soups and stews, but when you smoke it in the pit it takes on a delicious, gamy flavor.** _Cooking time:_ **ABOUT 5 HOURS** _Makes:_ **6 TO 8 SERVINGS** _ingredients_ **6 pounds (2.7 kg) oxtails, cut into 2- to 3-inch (5- to 7.5-cm) pieces** **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_ **Vinegar-Based Barbecue Sauce** _(this page)_ **1: SELECT AND PREP THE MEAT** You can get oxtails from any butcher, even the ones in the grocery store, but you will most likely have to ask. Spritz your oxtails all over with white vinegar. Season them liberally all over with salt and pepper. Do any additional trimming to make sure the oxtail pieces are all about the same size and thus will cook evenly. **2: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ Bring the temperature to 265°F (130°C). Put the oxtails on the grate in the pit, cut side down. **3: KEEP THE TEMP CONSISTENT, FLIP THE OXTAILS, MOP THE OXTAILS** Smoke them for 3 hours. After 3 hours, flip them over to the other side and smoke them for 1½ hours. Transfer your oxtails to an aluminum baking pan. Pour in your mop until it reaches a little way up the side of the pan, about 1½ cups (360 ml) or so, depending on the size of your pan. Put the pan in the pit and smoke for another 1½ hours. **4: TEST FOR DONENESS** The oxtails should be crisp on the outside and tender when you push down on them with your index finger (if they're hard, they need a little more time). **5: PULL AND EAT THAT MEAT** Transfer the oxtails to a wooden cutting board, cover them loosely with foil, and let them rest for about 10 minutes before you gnaw down on them. There's not a ton of meat on oxtails, but they're fun to eat. Some chefs pull the meat off the bones and separate out the cartilage and use the oxtails in tacos or fold the meat into a rich gravy that you can serve atop grits, but I like eating them fresh out of the pit and straight off the bone myself. A **SMOKED** **PRIME RIB** **I think that the best way to cook rib-eye steaks on our masonry pits is to prepare the whole rib-eye loin, or the prime rib. This meat has just about the most marbling you're going to get on a cow: This marbling means it has good fat content that makes for a good crust on the outside of the meat. Prime rib is an expensive, luxurious cut, so when you splurge on it and cook it, make it your showpiece for special occasions and holidays. Although your guests will think you worked very hard on it because of its fancy reputation, prime rib is very easy to do on a pit. With this cut, you don't need to do any injections or marinating—you need no damn pretending when you're cooking the real deal.** _Cooking time:_ **ABOUT 3 HOURS** _Makes:_ **10 TO 12 SERVINGS** _ingredients_ **1 (5-rib) prime rib (10 to 12 pounds / 4.5 to 5.4 kg)** **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_ **Vinegar-Based Barbecue Sauce** _(this page)_ **1: SELECT AND PREP THE MEAT** If you're going to invest in an expensive cut like prime rib, I suggest getting it from a butcher if you can. The most important thing to know is that there are actually two separate cuts that may be called "prime rib," so you need to make sure you're getting the right one. The first cut (ribs 1 through 3) is closer to the loin, so it's the most tender. The second cut (ribs 4 through 7) is closer to the chuck end and is denser and more fatty. Ask the butcher for the first cut, and buy the best quality you can afford. Note that I'm giving you a recipe here for what I consider a small roast—sometimes I cook a prime rib that's 20 pounds (9 kg) to feed my folk—so if you want to go big and scale this recipe up, that's easy to do if you know the rule of thumb: Allow 30 minutes per pound, which can be applied to a prime rib of any size. All we need to do to prep the meat is use a sharp knife to trim any excess fat, but don't go crazy, because we want enough fat on the outside to give us that bubbling crust—so leave a good ½-inch (12-mm) border around the meat if you need to do any trimming. Then spritz the meat down with white vinegar, which gives it a base layer of flavor, makes it a little wet, and kills bacteria in the process. Now come in with salt and pepper and season the meat liberally all over. **2: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ Bring the temperature to 265°F (130°C). Put the prime rib on the grate in the pit with the fat side down and the rib side up. **3: KEEP THE TEMP CONSISTENT, FLIP THE ROAST, MOP THE ROAST** Smoke it for 2 hours. At the 2-hour mark, we're going to go into our pit and flip the roast because we want to get a good crust all over it. At that point when we go in to flip, we're going to use our mop and mop the roast. We're going to continue mopping the roast every 30 minutes, taking care to hit the roast all over in order to impart that vinegar tang flavor in there with that beef. **4: TEST FOR DONENESS** At the 2½-hour mark, we're going to start testing for doneness. We're going to take the internal temperature right in the center of the roast and look for 165°F (75°C). The edges are going to be for someone like my dad who wants it dead and burnt, and at the center it's going to be more like a medium rare—good to eat. **5: PULL AND EAT THAT MEAT** You ain't gonna need a sauce for this; you ain't gonna need anything else, it's so delicious. A * * * * * * **IN THE SMOKER** * * * _**PREP:**_ **All we need to do here is use a sharp knife to trim any excess fat, taking care to leave a good ½-inch (12-mm) layer over the meat to provide some flavor. Then spritz the meat down with white vinegar and come in with salt and pepper and season the meat thoroughly all over. Place the meat in a large aluminum roasting pan and let it rest while you heat the smoker to 250°F (120°C).** _**COOK:**_ **Transfer the meat from the pan onto the grate of the smoker. Cook for 5 hours, or until the internal temperature reaches 130°F (55°C) on an instant-read thermometer when measured at the center of the roast, for medium rare. Transfer the meat to a platter or cutting board, cover it loosely with foil, and let it rest at room temperature for at least 1 hour.** _**EAT:**_ **Unwrap the roast, reserving any accumulated juices. In a medium saucepan over medium heat, allow the drippings to simmer for a couple of minutes. Pour the heated drippings over the roast. Carve the roast and serve it immediately.** * * * A * * * **WHEN _the_ WINNING IS DONE** BEING AT THE JACK DANIEL'S World Championship Invitational Barbecue Competition is a big deal; you've got the best teams from around the world there, including teams from Europe and Asia. There's one big party in Lynchburg, Tennessee, let me tell you. One year we had done really well in the competition and when the awards ceremony was over, we were partying down by the side of a creek on the competition site. We don't party when we're competing. But we had some celebrating to do. The creek water has a lot of iron it, which is why they like to make whiskey with it; at that time of year there was only about three feet of water in the creek. Canada sent a lot of teams that year, and I was drinking my Crown Royal and cheering for them. Like everyone who drinks a lot of any liquid, I had to break water, as they say. There was a Porta-Potty about ten feet from where I was standing, but instead of going over to use it I decided I would just let go into the creek. So I'm standing on the edge of the creek and I got to wobbling . . . and before I knew it, I had rolled down the embankment about ten feet, right into the water. Everybody was looking down at me, and I'm on my side looking up, trying to figure out what in the hell happened. I tried to get out, but I knew I looked like one of them damn alligators trying to claw his way up a mud hill. On the bank folks were as drunk as I was, but one of them realized that there was a utility light pole near where I was, so he got the group to do a drunk human chain to try to reach me and get me out of that creek. Well, they about fell in trying. Finally, three sober people came around and dragged me out of there. By that point I had algae and mud all over me, and it was October and getting chilly. My team took me back to the hotel, where I put on a clean pair of shorts and—what the hell—kept drinking. When I woke up the next morning, my bed looked like a damn beach—sand was everywhere, I was bruised up on my side, and my wet clothes were piled in the corner of the room. All I could think about was how I did not want my wife, Faye, to see those dirty-ass clothes when I got home, so I put them in the back of my truck, thinking I'd take them out before she could see them. Well, I drove home and while I waited for Faye to get home from her job as the county tax commissioner, I got those muddy clothes and washed them, but I didn't have time to put them in the dryer. When Faye got home she went into the laundry room, then came back out and said, "When did you start washing clothes?" I said, "Honey, I know you work hard all day, and I didn't want you to have to work so hard at home, too," and I thought I'd just about get away with it. But then she said, "No, your muddy ass fell in the creek in Lynchburg!" Turns out someone emailed her a picture of the whole thing and she was just waiting for me to tell her about it—she already knew before I even got there. I'd like to tell you the whole mess happened because an anaconda crawled out of that creek and pulled me into it, but it's not true. I tumbled in like a wheel. I love to tell that story on myself and I love the part about my wife outsmarting me, but the truth is I only drink after a cooking contest is over. I learned a long time ago that when it comes to competition barbecue, you can't win both the party on Friday night and the barbecue contest on Sunday—it's got to be one or the other, and I'm there to make my living, so there's no choice. Even one beer makes you less clearheaded and more likely to miss a crucial detail while you're cooking. The competition on the professional barbecue circuit is so tough nowadays that if you drink before you cook, you'll wind up losing. You've still got teams that go to contests to party and throw down, and they don't ever win a thing. Now I'm not saying that if you quit drinking it's going to make you a better pitmaster, but it's going to make you pay a lot more attention to what you're doing. You work hard, you focus hard, and once you're done you can let your hair down. * * * A **STEAKS** _Smoked Rib-Eye Steaks_ _The Jack Mixon T-bone Steak_ A **STEAKS** * * * "Steak" ought to be synonymous with "Jack Mixon," because when my daddy wanted to do something nice for himself, he would eat a steak. He probably loved a T-bone more than he loved some members of his family. And he made sure he got one every weekend, too. Although a steak is a relatively small piece of tender meat compared to something like a hog shoulder, you can still smoke it in the pit with great success. It's true that grilling a steak over high heat can give you that seared-in crust on the outside with a moist, juicy center, but if you follow my method, you can produce the same result from pit cooking (it will take more time, of course, because you're cooking over lower heat) and your steak will taste different from any you've ever pulled off a grill. A **SMOKED** **RIB-EYE** **STEAKS** **I have been cooking steaks my whole life: The first meal my dad ever let me cook for my family was steak, so I know a thing or two about how it's done. There are two secrets to a great grilled steak. Number one: Buy the best quality of meat you can, because it really does make a difference when it comes to steaks. Number two: You need a golden, seared crust on that meat to lock in its juices and flavors. You get that crust by cooking the steak over dry heat on a very hot grill or in the pit by smoking it.** _Cooking time:_ **ABOUT 50 MINUTES** _Makes:_ **2 OR 3 SERVINGS PER STEAK** _ingredients_ **12- to 16-ounce (0.3- to 0.5-kg) bone-in rib-eye steaks (1½ inches / 4 cm thick)** **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_ **1: SELECT AND PREP THE MEAT** You need steaks that are at least 1½ inches (4 cm) thick and preferably thicker, and close to a pound (0.5 kg) each (remember you're serving them to more than one person), because steaks of this size and thickness do best in smokers. Trim any excess fat from the steaks, taking care to leave a good ½ inch (12 mm) of fat around them to ensure you get a good crust. Spritz the steaks all over with white vinegar. Season them liberally with salt and pepper all over the fat and the meat. **2: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ Gently set the steaks on the grate in the pit. **3: KEEP THE TEMP CONSISTENT, FLIP THE STEAKS, MOP THE STEAKS** Remember that we don't get a searing fire with these coals like you do on a grill, so it's going to take more time to cook the steaks. You want to maintain a temperature of about 275°F (135°C) in order to get a crispy bark on the outside of the meat. Cook the steaks for 15 minutes, then flip the steaks. We're going to go 15 minutes on the second side and then flip them again. Now we're going to cook for 10 minutes on the original side. Then we're going to flip them and go for 10 minutes more on the second side and then hit the steaks with our mop. **4: TEST FOR DONENESS** To test for doneness, press the top of the steak with your index finger: Rare will be soft and yielding; medium will be firmer; well-done steak will be quite firm. You can also use an instant-read thermometer inserted from the side. For medium rare, cook to 130 to 140°F (55 to 60°C); for medium, cook to 150°F (65°C); anything over 165°F (75°C) is well done, Jack Mixon–style. **5: PULL AND EAT THAT MEAT** Now we're going to bring the steaks out of the pit and transfer them to a wooden cutting board, cover with foil, and let them rest for 5 minutes. They're ready to eat. Perfect. You could almost cut them with a fork. But instead you're going to make thick slices cut against the grain and eat these with a big baked potato on the side. A * * * * * * **ON THE GRILL OR SMOKER** * * * **Prepare the steaks as described on** _this page_. **About 30 minutes before you're ready to cook them, prepare a charcoal or gas grill or a smoker with a grate to 500°F (260°C). Place the steak on the grate or grill and sear it over direct heat for about 3 minutes per side. For rare, cook to 125°F (52°C); for medium-rare, cook to 145°F (63°C); for well done, look for a thermometer reading of "UGH!", which translates to anything over 165°F (75°C). Transfer the steaks to a platter and cover with foil. Let rest for 5 minutes. Uncover the steaks. The best way to carve the steaks is to use a sharp knife to cut the bone out completely (save it someplace for yourself for gnawing on later on), and then cut the meat across the grain in thick diagonal slices. Then knock yourself out.** * * * A **JACK MIXON'S** **T-BONE STEAK** **As I said in the introduction _(seethis page)_, a T-bone steak was the first thing my dad ever let me cook and that I remember barbecuing. He loved a T-bone above anything out there. I'm not that way: I like the T-bone mostly because it reminds me of him—but to me, the rib-eye is the better steak. But my dad loved a damn T-bone, and this recipe is for him.** _Cooking time:_ **ABOUT 20 MINUTES** _Makes:_ **ABOUT 2 SERVINGS PER STEAK** _ingredients_ **12- to 16-ounce (0.3- to 0.5-kg) T-bone steaks (1½ inches / 4 cm thick)** **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_ **1: SELECT AND PREP THE MEAT** Spritz the steaks all over with white vinegar. Then season them liberally all over with salt and pepper. **2: PREPARE THE PIT OR SMOKER AND PUT THE MEAT ON** _(seethis page to this page for details)_ Bring the temperature to 265°F (130°C). Gently set the steaks on the grate in the pit. **3: KEEP THE TEMP CONSISTENT, FLIP THE STEAKS, MOP THE STEAKS** You're looking at about a 20-minute total cook time for these. You're going to come in after about 10 minutes, mop them down with that mop, flip them, and mop again. Cook for another 10 minutes. **4: TEST FOR DONENESS** To test for doneness, press the top of the steak with your index finger: Rare will be soft and yielding; medium will be firmer; well-done steak will be quite firm. You can also use an instant-read thermometer inserted from the side. For medium rare, cook to 130 to 140°F (55 to 60°C); for medium, cook to 150°F (65°C); anything over 165°F (75°C) is well done, Jack Mixon–style. **5: PULL AND EAT THOSE STEAKS** Pull the steaks off the pit, transfer to a wooden cutting board, cover loosely with foil, and let them rest for 10 minutes. Then get your damn baked potato and your momma's garlic bread ready to rock and roll with a little salad with Thousand Island on it, some sweet tea, you're done. Maybe some Smoked Blackberry Cobbler _(this page)_ later. A * * * * * * **ON THE GRILL OR SMOKER** * * * **Prepare the steaks as described on** _this page_. **About 30 minutes before you're ready to cook them, prepare a charcoal or gas grill or a smoker with a grate to 500°F (260°C). Place the steak on the grate or grill and sear it over direct heat for about 3 minutes per side. Transfer the steaks to a platter and cover with foil. Let rest for 5 minutes. Uncover the steaks. The best way to carve the steaks is to use a sharp knife to cut the bone out completely (save it someplace for yourself for gnawing on later on), and then cut the meat across the grain in thick diagonal slices. Then knock yourself out.** * * * A * * * **A HELPING HAND** MY DADDY WAS a hard and tough man, but he did have a "helping hand" side. When we barbecued for our family events, he would be sure to give additional food to the old folks in our community. He loved to go fishing, and when he'd catch fish he would clean 'em and give 'em to his friends to enjoy. One Christmas he loaned—and by "loaned" I knew that meant "gave"—money to a friend of his to help his family, even though the truth of the matter was that old Jack didn't have cash to spare because those were tough times for us, too. My mama was fire-hot mad with him for doing it, but that's who he was. Odd as it sounds, my dad was in a lot of ways nicer to friends than he was to family. Jack would overlook mess-ups from others that he would never tolerate from family. I guess it was that he expected more from blood than he did from others. He instilled the idea of doing things the "right way" in his family, and to this day I expect more out of my family than anyone else. I just also make sure that I do what I can to help my family folk succeed in everything they do. My daddy believed the right way was usually the old-fashioned way of doing things. He loved everything old, and he believed the old way of doing things was always best. He was taught at an early age to barbecue on brick pits just like his dad and granddad had. He could have changed his ways and adapted to modern devices as he got older and plenty of other easier options became available, but he didn't. He yearned for the ways of the rural southern lifestyle he grew up with. It was a tough and primitive life, but he loved it. For example, we had fifteen acres of sugar cane and a three-roller power cane mill that was more than a hundred years old. We ground the cane into juice and boiled the juice down into syrup. This all took place in the fall and winter, while we were still barbecuing in pits every week. On top of that, Jack had a running sawmill that was also more than a hundred years old, which we used to custom-cut cypress lumber and shingles that we used and sold. So Jack was always busy and he also didn't drink. I remember I came home late one Friday night when I was nineteen and I was a "little bit" drunk. Mama was up and rushed me off to bed so as not to wake Daddy. The next morning he got me up, took me down to the mill, and worked me all day long nonstop. He never mentioned drinking but he damn near killed my hungover ass. I never came home drunk again. (Not that I didn't drink, but when I did I knew to spend the night somewhere else.) The deal with my dad is that he believed in the tried-and-true way of doing things, because you had to have skill, knowledge, and the drive to succeed. Whether it was cutting wood, making syrup, or cooking meat, he taught me how to do it "right." And he tried to make sure that's how I lived my life, too. * * * A **BEEF EXTRAS** _Smoked Burgers_ _T-bone Bone, Tomato, and Onion Soup_ _Smoked Beef Stock_ _Smoked Meatloaf_ A **BEEF EXTRAS** * * * If I thought it wouldn't be cheating barbecue fans out of their money, I might not bother to give you a burger recipe—these days you can get a "gourmet" burger just about any old place. The burger trend has spread far and wide, my friends, and you don't need me to tell you about it. But you know that special flavor that only comes from smoking meat in a pit? Wait until you try a burger smoked in that pit, and you'll forget about that so-called gourmet version that costs a week's salary. The "extra" things you can do with beef that I show you in this section elevate very common preparations—like meatloaf, tomato soup, and simple stock, in addition to the burgers—into intense and very flavorful foods, the kind folks remember long after they're eaten. You can use the smoked beef stock here as a base for any soup or sauce you like; it lends smoky richness to anything you add it to. Try boiling pasta with it sometime, and you'll see. A **SMOKED** **BURGERS** **Like most red-blooded Americans, I like a burger. I've made hundreds if not thousands in my lifetime, and I've won prizes for cooking them. When I fire up my masonry pits, I will occasionally throw a burger or three in there because that smoke-kissed flavor cannot be beat—a burger smoked in the pit is even better than one on the grill because of that distinct flavor. If you've got your pit stoking, give it a try. It'll be the longest-cooked burger you've ever had, but it might just be the best.** _Cooking time:_ **ABOUT 20 MINUTES** _Makes:_ **2 SERVINGS** _ingredients_ **1 pound (0.5 kg) ground beef** **1½ teaspoons kosher salt** **1½ teaspoons freshly ground black pepper** **1 tablespoon Vinegar-Based Barbecue Sauce** _(this page; optional)_, **plus more for serving, if you like** **Pit Mop** _(this page)_ **2 hamburger buns, split** **Toppings** _(optional)_ **: Lettuce, tomato slices, mayonnaise** If you can grind your own beef from good brisket, by all means do that. If you buy it, like most people, get some good local grass-fed beef if you can, and get some with some fat content in it—80/20 is ideal. Every recipe you'll ever read for making burgers will tell you not to overhandle the ground beef or mush it up too much when you make your patties. I like a big-ass burger (I make two from a pound), and I like to mix a little barbecue sauce in with the meat. For the most basic burger: In a medium bowl, use your hands to gently combine the beef with the salt, pepper, and sauce, if using, until the seasonings are integrated with the beef. Form into two patties (or more if you want smaller burgers). You want to put the burgers in the pit when it's running at about 300°F (150°C). Lay the burgers on the grate above the coals. For medium rare, cook the burgers for 7 to 8 minutes, hit them with the mop, flip them, mop the top side, cook for 3 to 4 minutes more, and then serve. To test for doneness, insert an instant-read thermometer from the side of a burger into the center. The internal temperature should be about 145°F (63°C) for medium rare. Pull the burgers from the pit and let them rest, lightly covered with foil, on a wooden cutting board or a platter for about 10 minutes. Toast your buns, set up your lettuce, tomatoes, and mayo and your barbecue sauce and whatever else you like on your burger, and chow down. A * * * * * * **IN THE SMOKER** * * * _**PREP:**_ **Prepare your smoker to 300°F (150°C). Prepare your burgers as directed on the opposite page, forming one pound of ground beef into two patties (again, form more if you want smaller burgers). Place the burgers in a shallow aluminum pan, and place the pan in the smoker.** _**COOK:**_ **Cook for 15 minutes for medium-rare, and up to 30 minutes for medium-well. Remove the burgers from the smoker and allow them to rest, uncovered, while you melt 2 tablespoons of cold unsalted butter in a medium skillet over medium heat. When the butter is hot (but not smoking), slide the burgers carefully into the skillet using a spatula. Cook the burgers for about 3 minutes on each side, just until they're seared and a nice crust has formed (taking care not to overcook them). Top the patties with slices of cheddar cheese, if you like. Slide the burgers onto a platter and let them rest, lightly covered with aluminum foil, while you toast a couple of buns on the "light" setting in a toaster oven or toaster.** _**EAT:**_ **Fill the buns with the burgers and top with whatever you like; for me, it's a light smear of mayonnaise, a slice of ripe tomato, and a nice piece of iceberg lettuce. And that's a damn good burger.** * * * A **T-BONE** **BONE, TOMATO & ONION** **SOUP** **When I was growing up with my family we didn't think we were poor, but looking back I realize that by most standards we probably were. So anytime we'd get a steak we didn't waste anything on it. Since my dad loved T-bones and that's what we mostly ate when we could, my grandmother took the leftover bones, some tomatoes, and onions we had in the garden and made this soup.** **My granny, who we lived with until I was ten years old, never did eat steak herself. She waited until we were done eating and she took our bones away. I loved my granny to death, but she was the one that made my daddy as tough and ornery as he was—he took after her, which, come to think of it, wasn't a bad thing. If my dad had been any less strict with me I might not have learned all these lessons about barbecue that I'm trying to pass on to you.** _Cooking time:_ **ABOUT 3½ HOURS** _Makes:_ **6 TO 8 SERVINGS** _ingredients_ **4 or 5 T-bone steak bones or other steak bones** **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **2 cups (240 g) thinly sliced onions** **12 small tomatoes, diced** Rinse the bones thoroughly in cold water and pat them dry with a clean kitchen towel or paper towels. Wipe them down with a little white vinegar. Season the bones with salt and pepper. In a large heavy soup pot or Dutch oven, submerge the steak bones in 5 cups (1.2 L) water. Add the onions. Season the water with additional salt and pepper to taste. Bring to a boil over high heat, reduce the heat to medium, and boil for 30 minutes. Add the tomatoes, reduce the heat, and let the soup gently simmer over low heat, stirring occasionally, for 2½ to 3 hours, until the soup has a rich meaty flavor and the tomatoes have disintegrated. Taste the soup occasionally for seasoning and add more salt and pepper as needed. Remove from the heat and skim off any fat that has risen to the surface. Strain the liquid into a clean pot and discard the bones. Serve this soup piping hot with some crusty bread or buttered rolls on the side, if you like. A **SMOKED** **BEEF STOCK** **Smoking in your pit today? Might as well throw some bones in there, smoke them, and make stock. You can use this as the base for just about any soup or stew or sauce (it's an especially good base for tomato soup). It's handy to have around, and it's got much more flavor than anything you can buy at the grocery store in a can. Many grocery stores sell beef bones in the freezer section near the meat department, but you can buy the bones from a butcher, too.** _Cooking time:_ **ABOUT 6 HOURS** _Makes:_ **ABOUT 8 CUPS** _ingredients_ **5 pounds (2.3 kg) beef or veal marrow bones** **Distilled white vinegar** **Kosher salt and freshly ground black pepper** **1 large yellow onion** **2 large carrots, peeled and coarsely chopped** **2 stalks celery, including leaves, cut into thirds** **1 large tomato** **1 medium potato, scrubbed and cut into 1-inch (2.5-cm) cubes** **4 sprigs fresh parsley, or 2 teaspoons dried parsley** **1 bay leaf** **2 teaspoons dried thyme** **2 cloves garlic, peeled and smashed** **1 egg white** **1 crushed eggshell** Wash your bones in cold water and dry them. Spritz them all over with white vinegar, and season them with salt and pepper. Put them in a heavy baking pan or aluminum baking pan in a single layer. Put the pan in the pit and smoke with whatever else you've got smoking in the pit at 250 to 300°F (120 to 150°C) for 5 hours. Remove them from the pit, cover the pan with foil, and let them cool completely, about 1 hour. In a large heavy soup pot or Dutch oven, combine the smoked bones with the onion, carrots, celery, tomato, potato, herbs, garlic, 1 tablespoon salt, and 12 cups (2.8 L) water. Over medium-high heat, bring to a boil. Reduce the heat to low and gently simmer, stirring occasionally, for 5 hours. After 5 hours, remove from the heat and let the stock cool to room temperature, about 30 minutes. Using a cheesecloth-lined colander, strain the stock into a clean pot. Discard the solids. Clarify the stock to remove solid flecks that remain in the stock but that are too small to be strained out with cheesecloth: In a small bowl combine ¼ cup (60 ml) cold water with the egg white and eggshell and stir together to combine. Add the mixture to the pot with the strained stock. Bring to a boil over medium-high heat. As soon as the stock boils, remove it from the heat and let it stand for 10 minutes, until slightly cooled. Strain again through a sieve lined with cheesecloth and discard the solids. Transfer the stock to airtight containers. Refrigerate for at least 8 hours, or overnight; when ready to use, skim off the fat that collects on top of the stock (it is much easier to do this when the stock is cold). The stock can be refrigerated for up to 3 days or frozen for up to 4 months. A **SMOKED** **MEATLOAF** **I enjoy meatloaf tremendously, and it's best when smoked in a pit. Sometimes I make it meatball-style, other times I make it like the traditional loaf. Lots of people make meatloaf with bread crumbs, but I make cracker crumbs with buttery Ritz crackers. The smoky flavor of this pit-cooked meatloaf is unmistakable. If you've got your pit going, you're probably making lots of meat, but throw a meatloaf in there, smoke it, and save it for meatloaf sandwiches for your weekday lunches. That's how to use a pit with some planning.** _Cooking time:_ **ABOUT 2 HOURS** _Makes:_ **1 (9-INCH / 23-CM) MEATLOAF, ABOUT 6 SERVINGS** _ingredients_ **Butter or nonstick cooking spray, for the pan** **½ cup (120 ml) buttermilk** **1 cup (240 ml) Vinegar-Based Barbecue Sauce** _(this page)_ **1 cup (60 g) finely crushed Ritz crackers (about 20 crackers)** **1 pound (0.5 kg) ground beef, 80% lean** **1 pound (0.5 kg) ground pork** **½ teaspoon freshly grated nutmeg** **½ cup grated Parmigiano-Reggiano cheese** **1 large egg, beaten** **1 tablespoon barbecue rub (your favorite brand, or I'll sell you one of mine atjacksoldsouth.com)** **Kosher salt and freshly ground black pepper** **Pit Mop** _(this page)_ Grease a 9 × 5-inch (23 × 12-cm) loaf pan or a 10 × 6-inch (25 × 15-cm) baking dish with butter or nonstick cooking spray. In a large bowl, whisk the buttermilk with the barbecue sauce. Stir in the crackers, beef, pork, nutmeg, cheese, egg, rub, salt, and pepper and mix well. Pack the mixture into the prepared pan. Cover the pan with foil. You want your pit temperature to be between 250 and 300°F (120 and 150°C), which it will be if you're smoking any of our big meats. Place the meatloaf in the pit on the grate over the coals and smoke for 1 hour. Uncover the meatloaf and brush your vinegar mop on top of it. Cook for 30 minutes more, then hit it again with the mop. At the 2-hour mark, insert an instant-read thermometer into the center of the meatloaf; when the meatloaf registers 150°F (65°C), remove it from the pit, transfer the pan to a heatproof surface, and allow it to rest, loosely covered with foil, for 20 minutes before slicing and serving. A * * * * * * **IN THE SMOKER** * * * **The total cooking time of the meatloaf will depend on the heat of your fire and the size of your baking pan or dish, but expect about 3 hours. Prepare the meatloaf as directed on the opposite page. Prepare an offset smoker with soaked wood chips and heat it up to 350°F (175°C). When the smoker is ready, place the meatloaf pan on the cool side of the smoker. Cover the smoker and cook the meatloaf until its temperature registers 140°F (60°C); start checking the temperature at the 2-hour mark and keep checking every 15 minutes after that. At the 140°F (60°C) mark, brush ¼ cup (60 ml) Pit Mop** _(this page)_ **on top of the meatloaf and put it back on the smoker. The sauce will set into a shiny glaze while the meatloaf reaches its final temperature of 150 to 160°F (65 to 71°C). Once there, transfer the meatloaf pan to a heatproof surface, cover it with foil, and allow it to rest for 20 minutes before slicing and serving.** * * * A **EXTRAS** _Smoked Salt_ _Smoked Hoop Cheese_ _Smoked Whole Trout_ _Old-Fashioned Pulled Candy_ _Smoked Cornbread_ _Smoked Blackberry Cobbler_ _Smoked Chocolate Skillet Cake_ _Handmade Pit Ash Lye Soap_ A **EXTRAS** * * * I preach to folks who are interested in learning how to cook old-school barbecue on coal-fired masonry pits that when they fire up their pits, they should take advantage of the situation and cook as much as they can. Make hay while the sun shines, if you like. And I practice what I preach. When I fire up my pit, you'll likely find me cooking up some of my favorite pit-flavored sides that I remember from my childhood, like smoked hoop cheese (never heard of it? Turn to _this page_ to enlighten yourself—you'll thank me later) and smoked trout. And I'm also going to share with you some of my granny's formulas for pulled candy and soap, both of which are old-school recipes from days gone by that evoke the pit-smoking lifestyle at its best. A **SMOKED** **SALT** **As far as rubs and seasonings go, the way I learned to barbecue was with just two ingredients: salt and pepper. And in this book I wanted to return to this most basic of formulas. It's so simple I wonder if everybody will say, "He just wants us to use salt and pepper?" And I will answer that for this style of pit cooking, it's adequate. It's more than damn adequate. But if you want to use your favorite rub, I can't stop you. I've got a bunch of rub recipes that I could give you and I also sell my own brand of rubs; when I do competitions, I use those. But I don't use that stuff when I cook in my pits. At the end of the day, simple is better. The smoky flavor the pit emits is powerful, so it's best to stick to salt and pepper and not mess too much with it. One way I have of extending that smoky flavor to other foods is through something I figured out that not even my dad knew about: smoked salts. If you have smoked salt in your pantry, you're ahead of the game before you even start cooking.** **The process of infusing wood smoke into your salt is very easy to do in the coal-fired pits, but you can also do it on your regular smoker at home.** _Cooking time:_ **5 HOURS** _Makes:_ **ABOUT 2 CUPS (440 G)** _ingredients_ **About 2 cups (480 g) salt (kosher or plain iodized, whatever you prefer; enough to cover the bottom of a cookie sheet)** The best time to smoke your salt is when you're cooking something for a long time in the smoker and it's going to be maintaining a steady temperature. Pour a layer of salt about ½ inch (12 mm) deep onto a large (13 × 18-inch / 33 × 46-cm) rimmed cookie sheet. Smooth it down carefully so that it's in as even a layer as you can arrange it, and put it on the opposite side of the smoker from whatever meat you've got on there, then fire that pit for about 5 hours. By doing this, you're going to infuse the salt with the smoke (in my case it's going to be a combination of the hickory and oak woods flavoring the salt). You can take a lot of recipes in this book and substitute smoked salt for the kosher salt I call for, and you can also add that smoked salt to vegetables and other meats, too. After 5 hours, remove the pan from the pit, let the salt cool to room temperature, and store it in a jar. A **SMOKED** **HOOP CHEESE** **Hoop cheese is an old southern staple that you can barely find in the South anymore. It was traditionally made by farmers from cow's milk that had been drained entirely of its whey; the remaining curd was poured into a round mold, aka the hoop, and pressed—the cheese is usually semisoft. It used to be made in-house in small-town grocery stores, where it was cut fresh from big wheels. Sometimes it's called "red-rind cheese" for obvious reasons; it has a mild flavor and very creamy texture. (Ashe County Cheese, in the mountain town of West Jefferson, North Carolina, can ship you some if you can't find it locally:www.ashecountycheese.com.)** _Cooking time:_ **2 TO 4 HOURS** _Makes:_ **1 POUND (0.5 KG)** _ingredients_ **1 (1-pound / 0.5-kg) wheel of hoop cheese** You want to set up your smoker precisely as you would to cook all your meats, but we want it even lower: You want a heat of about 140°F (60°C), and you want to put a few hot coals on one side, and the cheese on the side without the coals. Remember, we're not trying to cook the cheese or even melt it; we just want to get some good smoky flavor on it. Take your hoop cheese and place it on a cookie sheet or in a shallow aluminum pan (you can do more than one round if you like). Set the sheet in the smoker and smoke the cheese for 2 to 4 hours, depending on about how much smoke you want on the cheese. If you go 2 hours, the cheese will be firmer and you'll have the flavor of smoke on it—you can slice it for sandwiches or eat it on crackers this way and it's delicious. If you go the full 4 hours, it will be quite soft, creamy, very melty, and very smoky, and in this state you can stir it into some pasta or use it to top nachos—the flavor will be strong and the consistency will be on the softer side. A **SMOKED** **WHOLE TROUT** **My dad loved to fish, and we spent a lot of time doing that when I was growing up; it was his real passion—he loved it even more than barbecue. Trout is one of my favorite fish to cook and eat because it's hardy enough to stand up to the smoking process, so when I'm firing up my pit for other meats, I know I can throw in some trout and then eat the smoked trout on crackers for a quick app, in a sandwich for lunch, or mixed with a little sour cream and mayonnaise for the world's most delicious smoky dip.** _Cooking time:_ **30 MINUTES TO 1 HOUR** _Makes:_ **4 SERVINGS** _ingredients_ **½ cup (120 g) kosher salt, plus more for seasoning** **2 pounds (0.9 kg) trout fillets (4 to 6 ounces / 115 to 170 g each), skin on, pin bones removed** **Distilled white vinegar** **Freshly ground black pepper** In a heavy medium pot, combine the salt with 4 cups (1 L) water over medium-high heat and bring to a boil, stirring to dissolve the salt. Remove from the heat and let cool to room temperature, about 30 minutes. Submerge the trout fillets in the brine, cover, and refrigerate until ready to use, at least 30 minutes or up to 3 hours. During this time you should be setting up your pit and getting some meat on there, so you have it at 250°F (120°C) when you're ready to cook your trout. Remove the trout from the brine and rinse the fillets thoroughly in fresh cold water. Spritz the trout with white vinegar and season it with salt and pepper. Put the trout in an aluminum pan in a single layer, skin side down, with at least ½ inch (12 mm) of space between the fillets, and put the pan on the grate in the pit. Smoke the trout for 30 minutes to 1 hour, until the fish is cooked through, flakes easily when prodded with a fork, and has darkened in color from transparent white to pearly white. Remove the trout from the smoker and serve immediately. A **OLD-FASHIONED** **PULLED CANDY** **I have many memories of my granny pulling candy by our sugar-processing shack; she would pull it and pull it and my mouth would water and water. I associate it with pit cooking because it's the same kind of old-school real southern technique that's simple and cheap to do and turns into something delicious to eat. You can see folks pulling candy at the beach towns in the South to this day, like in Myrtle Beach and Panama City, where they make saltwater taffy, which is similar to this candy.** _Makes:_ **ABOUT 3 DOZEN SMALL CANDIES** _ingredients_ **1¼ cups (300 ml) sorghum syrup or dark molasses** **¾ cup (150 g) sugar** **1 tablespoon distilled white vinegar** **1 tablespoon unsalted butter** **⅛ teaspoon baking soda** **⅛ teaspoon kosher salt** **A few drops peppermint oil or chopped walnuts** _(optional)_ Butter a glass 9 × 13-inch (23 × 33-cm) lasagna dish and set aside. In a large heavy pot over medium heat, combine the sorghum or molasses with the sugar and vinegar and, stirring occasionally to prevent burning, cook until the mixture reaches 270°F (132°C) on a candy thermometer, the soft-crack stage. Remove from the heat and add the butter, baking soda, and salt and stir just enough to blend. Pour the candy into the prepared pan. When cool enough to handle, after about 5 minutes, gather the mixture into a ball and pull it between your ungreased fingertips until the candy is a firm, light-colored strip. Cut it into 1-inch (2.5-cm) pieces and wrap each in waxed paper. You can add a few drops of peppermint oil or some chopped walnuts to the candy before pulling, if you like. A **SMOKED** **CORNBREAD** **This is a no-brainer when it comes to a side dish that you can throw into the pit while you're smoking your big meats. I make up a batch of skillet cornbread and set that sucker down in my pit for it to acquire the unmistakable flavor only a coal-fired pit smoker can give. This cornbread cooks at just about the same temp that most meats cook (250 to 300°F / 120 to 150°C); check your temperature regularly and keep it as consistent as you can.** _Cooking time:_ **ABOUT 45 MINUTES** _Makes:_ **1 (10-INCH / 25-CM) ROUND CORNBREAD** _ingredients_ **Unsalted butter, for the skillet** **1 cup (180 g) yellow cornmeal, fine or medium-coarse grind** **1 cup (125 g) all-purpose flour, sifted** **2 tablespoons sugar** **4 teaspoons baking powder** **½ teaspoon kosher salt** **1 large egg** **1 cup (240 ml) buttermilk** **⅓ cup (75 ml) sour cream** **¼ cup (60 ml) vegetable oil** Grease a 10-inch (25-cm) cast-iron skillet with butter and set aside. In a large bowl, combine the cornmeal, flour, sugar, baking powder, and salt and stir well. In a medium bowl, whisk together the egg, buttermilk, sour cream, and oil. Make a well in the center of the dry ingredients and pour in the egg mixture. Using a mixing spoon, stir together until well combined; the batter should have no lumps. Pour the batter into the prepared skillet. Gently place the skillet in the pit and cook, maintaining the temperature at 250 to 300°F (120 to 150°C), until a skewer inserted into the center of the cornbread comes out clean, about 45 minutes. Remove from the smoker and let cool for 10 minutes on a wire rack. You can serve the cornbread warm out of the skillet if you like, or invert it onto a platter and let it cool to room temperature. A **SMOKED** **BLACKBERRY** **COBBLER** **Blackberries are my favorite fruit; when they're in season you can spot me carrying around pints of them for snacking as I check my smoker and pit temps. Something about that tart flavor with a little bit of sweetness to it gets me every time. Of course you can use this pancakelike topping over other fruits, too. In the South we like our desserts made with lard, and since we're cooking whole hogs in our pit, we have access to very high-quality fresh lard. You can also buy lard from good butcher shops, or substitute vegetable shortening if you prefer. This is something to make when your smoker is going and cooking meat in it.** _Cooking time:_ **ABOUT 1½ HOURS** _Makes:_ **6 TO 8 SERVINGS** _ingredients_ **2 pints (560 g) fresh or frozen blackberries** **¾ cup (150 g) granulated sugar** **½ cup (110 g) firmly packed brown sugar** **1 tablespoon plus ½ cup (65 g) all-purpose flour** **1 tablespoon apple cider vinegar** **1 teaspoon ground cinnamon** **1 teaspoon baking powder** **½ teaspoon kosher salt** **½ cup (120 ml) buttermilk or whole milk** **¼ cup (60 ml) lard or vegetable shortening** **Vanilla ice cream, for serving** _(optional)_ In a medium nonreactive bowl, stir together the blackberries, ¼ cup (50 g) of the granulated sugar, the brown sugar, 1 tablespoon of the flour, the vinegar, and the cinnamon. Set aside. In another medium nonreactive bowl, sift together the remaining ½ cup (65 g) flour, the remaining ½ cup (100 g) granulated sugar, the baking powder, and the salt. Pour in the buttermilk and use a wooden spoon to stir and combine. The batter should be the consistency of pancake batter; it is fine if there are a few lumps. Set it aside. In a 10-inch (25-cm) cast-iron skillet, melt the lard over medium heat until it starts to brown around the edge and foam. It should be sizzling; you should hear it. When you hear that lard start to brown and sizzle, pour the batter into the hot skillet. Do not stir it. Pour the fruit mixture right on top of the batter. Remove the skillet from the heat. Transfer the skillet into the pit that you've been steadily maintaining at 250°F (120°C). Smoke the cobbler until the crust is golden and crisp-edged, about 1½ hours. Serve topped with vanilla ice cream, if you like. A **SMOKED** **CHOCOLATE** **SKILLET CAKE** **This is a great dessert to make at the end of your pit cooking, when your pit is hot and you want to get the most out of it that you can. This cake needs a slightly higher temp than most meats, though, so if you've been running the smoker at 250 to 300°F (120 to 150°C), you need to get it up to 325 to 350°F (165 to 175°C) to cook this cake. The rest is easy.** _Cooking time:_ **45 MINUTES TO 1 HOUR** _Makes:_ **10 TO 12 SERVINGS** _ingredients_ **¾ cup (1½ sticks / 170 g) plus 1 tablespoon unsalted butter, softened** **2 cups (250 g) all-purpose flour** **½ cup (50 g) unsweetened cocoa powder** **2 teaspoons baking powder** **½ teaspoon baking soda** **½ teaspoon kosher salt** **1 cup (200 g) plus 2 tablespoons sugar** **3 large eggs** **3 ounces (85 g) semisweet chocolate, melted and cooled** **2 teaspoons pure vanilla extract** **1½ cups (360 ml) sour cream** **Whipped cream and fresh fruit for serving** _(optional)_ Generously grease an 8-cup (2-L) cast-iron skillet with 1 tablespoon of the butter and set aside. In a medium bowl, sift the flour with the cocoa powder, baking powder, baking soda, and salt. In a large bowl using an electric mixer, beat the remaining ¾ cup (170 g) butter until creamy. Add the sugar and beat until light and fluffy. Add the eggs, one at a time, beating well after each addition. Add the melted chocolate and the vanilla and beat until the batter is smooth. Beat in the dry ingredients in three batches, alternating with the sour cream. Scrape the batter into the prepared pan and use a rubber spatula to smooth the surface. Place the skillet in the pit and cook for 45 minutes to 1 hour. Take special care to check the temperature every 10 minutes and maintain it between 325 and 350°F (165 to 175°C), shoveling in fresh hot coals as necessary. Smoke the cake until a skewer inserted into the center comes out with moist crumbs attached but no wet batter. Remove from the pit and let the cake cool completely in the pan on a rack, about 1 hour. When ready to eat, invert the cake onto a serving platter. Cut it into wedges and serve with freshly whipped cream and fresh fruit (such as my favorite, blackberries), if you like. **I love chocolate cake and I love spareribs—but I don't necessarily want my spareribs to taste like** * * * **CHOCOLATE CAKE.** * * * **I will throw a chocolate cake in my pit at the end of a big cook, though.** A **HANDMADE** **PIT ASH LYE** **SOAP** **Homemade soap is something my granny made because like a lot of southern people she couldn't get to the store all the time to buy it. I think it's as good a cleanser as any commercial soap, nice and gentle on your skin, and especially good to use if you have poison ivy or bug bites. The alkaline properties of lye soap neutralize and break down the bug's stinging saliva, which means that the bite is less likely to be itchy and painful in the hours and days that follow. Most soap is made from lye, the chemical used in cleaning products that is highly soluble in water. What is lye made of? It's made from leaching ashes, exactly the kind of ashes that come from hardwood (oak, hickory, maple, and so on) and fruit woods (apple, peach, cherry, and so on). So after you've fired up your pit and smoked all your food in it, you can take the ashes and make yourself some soap just like my granny did.** **A note about lye: There are two types. Homemade lye is potassium hydroxide and can be made at home from wood ashes mixed with fat or oils such as beef tallow or pig lard. You can use other fats such as olive oil or coconut oil or avocado oil in place of the animal fat, if you like. Commercial lye, available in some grocery stores and most hardware stores, is sodium hydroxide, which is more caustic than the homemade kind and makes a less appealing soap. Besides, this is a barbecue book and I'm not going to tell you to go to the store and buy lye if you have perfectly good ashes to work with when you're done cooking. Both types are strong chemicals that can cause burns and quickly eat through many materials (including skin), so do wear gloves and protective eyewear for this process.** **Add-ins: I'm giving you a basic soap method, but there are many additions you can include if you like: Colorings, herbs, flowers, honey, oatmeal, and essential oils are all good examples.** **To make a reasonable quantity of lye, you will need 2 gallons (7.6 L) of wood ashes. You are looking for white, paperlike ashes; not black or gray chunks of charcoallike wood.** _Makes:_ **4 (¼-POUND / 2-KG) BARS OF SOAP** _materials & tools_ **Two 5-gallon (19-L) plastic buckets (or similarly sized wooden barrels; do not use aluminum or metal, which the solution will corrode)** **2 gallons (7.6 L) white wood ash** **About 3 gallons (11 L) water (rainwater or spring water work best because their mineral content is lower than that of tap water, but tap water will work fine)** **Apron** **Rubber gloves** **Plastic sunglasses or other plastic protective eyewear** **Cheesecloth and plastic colander** **Deep and wide waterproof container (to go under the bucket)** **Egg or small potato** **Heavy cooking pots** **1 pound (0.5 kg) lard, beef tallow, or other animal fat** **Wooden spoon** **About ½ cup (120 g) kosher salt** _(optional; for bar soap)_ 1) MAKE THE LYE WATER: Wearing your apron, gloves, and protective eyewear, collect about 2 gallons (7.6 L) white ash from the pit in one of the buckets—a little less than half of the bucket's capacity. Be sure to choose the thinnest, most papery ashes and no black chunks of coal. Pour in 1 gallon (3.8 L) water. Cover the container and let the solution settle for about 3 hours. 2) WEARING YOUR APRON, gloves, and protective eyewear, use cheesecloth and a plastic colander to strain the lye water solution into the second bucket. You can place a deep and wide waterproof container under the bucket to catch any spills (not pictured here). 3) DO THE EGG / POTATO TEST to determine if the solution is concentrated enough to make soap: Float an egg or potato in the solution. When the solution has the right amount of concentration, the egg or small potato will float so that about ½ to 1 inch (12 mm to 2.5 cm) of it remains above the water's surface. If it sinks, the solution is too weak and you should boil the lye water down, reducing it by about a third, letting it cool, and trying the test again by repeating the process of pouring the lye water over the ashes again along with 2 gallons (7.6 L) fresh water, letting it settle for 3 hours, re-straining, and trying again until the water supports the egg correctly. When the solution is right, discard the ashes. If it floats and turns sideways, it is too strong; add water to the solution 1 cup (240 ml) at time, repeating the test until the water supports the egg correctly. 4) PREPARE THE FAT: You can obtain the animal fat from a good butcher; ask for beef tallow or pork lard. Or you can render it yourself: Remove about 1 cup (240 ml) of the fat from the meat you've smoked (such as a whole hog or brisket) and melt it slowly over low heat in a large frying pan. Once thoroughly melted, strain the grease through cheesecloth into a heavy medium pot. Combine an equal amount of water with the strained grease, bring to a boil over medium heat, add ¼ cup (60 ml) fresh cold water, remove from the heat, and let cool. Once the fat hardens in the pot, scrape off any parts that look "dirty" or dark, and remove the hardened fat from the water to store in a dry container. 5) MAKE THE SOAP: You should have 1 pound (0.5 kg) lard and 1⅔ to 2 gallons (6.3 to 7.6 L) lye water. In a medium pot over medium heat, melt the lard. Transfer this to a large heavy container (plastic or wood, not metal) and combine it with the lye water, stirring with a wooden spoon. Once the mixture has a creamy, light caramel appearance, test it for doneness: Place a small dab of the soap on a glass or china plate and let it cool for 20 minutes. If it's done the cooled mixture will appear transparent with white streaks and specks throughout. If it is gray and weak looking or has a gray margin, it needs more lye. If it cools with a gray skin over it, it needs more fat. You can reheat what you've got in your container, the fat-and-lye-water mixture, and adjust as necessary. You can pour it into any liquid soap container to use at this point. 6) IF YOU WANT BAR SOAP, you need to reduce the soap's water content. Adding the salt will separate the soap and the water. Once the salt is added, let the mixture cool completely. During this time the soap will separate from the water and float on top. Remove the soap to a heavy medium pot, add a small amount of water (no more than ½ cup / 120 ml), and bring to a boil; boil for 5 to 6 minutes, then skim the soap from the surface. Using a kitchen towel or mitts to protect your hands, you can pour the liquid soap into a mold—small silicone breadstick molds are great for this—or other square plastic molds in the shape of bars of soap—and let it cool and harden, then remove from the mold. A * * * **LIFE, DEATH, _and_ BARBECUE** BESIDES ME, THE greatest pitmaster of all time probably would be my dad, because he actually knew how to cook without any damn gadgets or technology. In terms of folks who cook barbecue for a living, you have a lot of unsung heroes in this game and a lot of people who never got any notoriety. To be recognized as a great pitmaster today you have to be a great showman. I'm not saying that that's right, but it's a fact: We live in a world now where media rules. Everything now is so instant, you gotta be that guy or gal who not only can cook but also can relay that message to the public. I'll tell you a story about my public life. I was inducted into the Barbecue Hall of Fame in 2013 at the Royal in Kansas City, one of the most prestigious competitions in barbecue. That's the best thing that's ever happened to me, because you're voted in by your peers who are other barbecue people. My induction ceremony was for pitmaster, and I gave credit to my dad. It's been a long path to get to where I am today, but my dad got me started, and Pat Burke was the guy I chased who showed me how to be a champion. So according to the world I'm a champion pitmaster. But this book is all about how "pitmaster" means something different today than it did in my daddy's day. Sometimes I wonder what old Jack would think about what a pitmaster has come to mean and about how barbecuing is something that is now associated with fame and fortune. His idea of a pitmaster was like one from two hundred years ago, when the pitmaster in the community was a guy of importance, the keeper of the fire. That was the guy who cooked for the reunions and cooked the thirty hogs for the churches and for the governor coming around and stumping for the state. It wasn't anything like competition barbecue. Competition meat is a totally different genre of food than what you would call authentic barbecue. The comp meat is good, but for me, if I'm going to sit down and eat a plate of barbecue, I want to eat what I was raised eating. It's not about nostalgia, but you taste the meat, you taste the vinegar, you taste the smoke—and that's it. Competition barbecue is so rich, it's so heavy with flavor that you take one bite and that's about all you can stand. Pitmasters know how to keep it simple. And their stakes are higher: If you lose a competition, it's not the end of the world. For my dad, who cooked seventy-five joints of meat every week, if he screwed up one pit full of meat he screwed up three days' worth of sales—don't forget there's a lot of labor involved in barbecue because you have to do the prep work, clean the pit, feed and stoke the pit, hold that meat to a consistent temperature . . . I learned how to do that though trial and error. My dad didn't allow mistakes, and he sold every damn bit of that meat every week. I don't have a restaurant like he did. I have competitions, though. The first time someone wins a trophy is fun; it's a great feeling because winning is a great thing. But the best thing for me is the people I've met, great cooks like Art Smith and Tuffy Stone. If it hadn't been for competition barbecue, I wouldn't have known them. Competition barbecue has also brought me closer to my brother, Tracy, which is something I didn't think would happen and couldn't have envisioned. Barbecue has been good for my family, and it's provided a living that I wouldn't have been able to make any other way. I feel like the luckiest man in the world. I ain't saying I don't have skills in barbecue, but I was lucky: I was at the right place at the right time when barbecue took off nationally as something that was bigger than people's backyards. I was already there, doing my thing. Some people peak too early, but I was still rocking and rolling in my prime (I still think I'm in my prime). My dad didn't think a lot about competition barbecue. In fact, he thought it was kind of silly and for folks who didn't have to worry about making a living. _If he could see me now_ , I think sometimes. You never know what is going to happen at a contest, and in that way it's a lot like life. Recently I was cooking at a contest down in Bain-bridge, Georgia, and there were seventy-five teams, including all the top-ten Kansas City BBQ Society contenders. The contest had a lot of money in it; the grand champion had a $7,500 prize. A perfect score in a category was 180 points. (In my life, I've gotten three 180s, two of them at the Jack Daniel's Invitational for whole hog, one at a contest in Dillard, Georgia, for ribs.) This guy who had driven down to Georgia from New Jersey got a 180 for chicken. Then he went back to his tent and died of a massive heart attack. Got a 180 and passed away. He was only in his midfifties, and this was one of the happiest times of his life, and he goes and gets a perfect 180, puts it out on Facebook, calls his wife, he's so happy, and then he dies. Kind of puts things in perspective, don't it? * * * **A NOTE ABOUT** * * * **PIT TOOLS** This book is the opposite of one that is all about stuff you need to buy in order to make good barbecue. Remember that the thing I most want you to do is build a masonry pit—if you can do that, your life will be changed, my friends. That said, I understand that I use certain tools that you might want to make sure you have around because they make cooking over a coal-fired masonry pit (or another live and active heat source) that much easier. Here are the tools I suggest having that you should plan to use when you make my kind of barbecue: **_Plastic work gloves_** for handling and prepping raw meat **_Aluminum foil pans_** for transporting meat on and off the pit (take care to use a new one every time so you don't contaminate cooked meat with raw meat; disposability is key) **_Plastic table coverings_** for meat prep **_Plain old-fashioned wood-handled mops_** with long string (plan to saw off the handles to whichever length is most comfortable for you) **_Coolers_** (to store meat while brining) **_Plastic spray bottles_** for blasting meats with vinegar at prep time **_Very good steel-forged knives_** (if you're going to splurge on something, it should be these because they will last a lifetime—I still use some knives that were my daddy's—but make sure you sharpen them regularly and treat them with care); I recommend at least three: a long chef's knife (10 to 12 inches / 25 to 30.5 cm); a paring knife; and a carving knife **_Heavy wooden cutting boards_** (for letting meat rest and for slicing) **_Tongs_** in varying lengths and sizes, springloaded (the best for manipulating meat on pits and smokers) **_An instant-read meat thermometer_** (if you can't bear not to know the exact temp of your meat) **_Heavy-duty leather or other durable work gloves_** for handling the pit when it's hot **_A heavy garden hoe_** for raking coals A **A MINI-GUIDE TO MEAT** * * * **SELECTION & PREP** Here's my "just facts" version of how to get the main barbecue meats ready to go for the pit or smoker. You still need to consult the recipes in this book for the exact methods, but you can use this section for quick reference on how to prep these meats to get yourself and your barbecue going. * * * **RIBS** * * * **_For pork ribs:_** Spareribs, also called St. Louis ribs, are the long bones situated behind the shoulders and are from the lower part of the hog belly; they are straight and fatty, and the racks usually weigh about 4 pounds (1.8 kg) or less. When you buy them, look for ribs with meat that covers the whole bone—try to find racks where no rib bones are showing through the meat. Don't buy spares that are covered entirely in fat—there's often no meat on there to enjoy after you've finished cooking them. **_For beef ribs:_** The secret to making what I call "tenderlicious" beef ribs, aka dinosaur bones (called that because they're so big), is that you don't have to do too much—these ribs are so tender because they sit right below the rib roast of the cow, which has lots of good marbling. **_For both kinds of ribs:_** Fresh ribs that have never been frozen are best if you can find them. To trim them up before pit-smoking them, place them on a clean cutting board, bone side down. The first three ribs are the fattiest: Use a sharp knife to trim off the excess fat surrounding these. Then turn the slab over; you'll see that there's a thick, clear-white membrane covering the backs of the ribs. You want to pull this membrane off because it makes the ribs tough to chew, but you need to do it with a combination of gentle-yet-firm pulling. First make a small incision just below the length of the breastbone. Work your fingers underneath the membrane until you have 2 to 3 inches (5 to 7.5 cm) cleared. Grab the membrane with a towel or other clean cloth and gently but firmly pull it away from the ribs. Pulling off the membrane exposes loose fat, so after you've discarded the membrane, go in with your paring knife and trim any excess exposed fat. Use a clean towel to pat the ribs try. Then apply white vinegar all over both sides of the rack, and season all over with salt and pepper. * * * **PORK SHOULDER** * * * A pork shoulder has two parts: the lower ("arm") piece, commonly called the "picnic," and the upper piece, commonly called the "Boston butt." These are just names for cuts that have evolved in the vernacular lingo of cooks and butchers over time. When you want to cook a pork shoulder, look for one that is well marbled (shot through with visible fat) and as squared-up and regularly shaped as an irregular-looking piece of meat can be (you're thinking about consistent and even cooking time, so try to find a shoulder without too many lumps and bumps). A pork shoulder can be an unwieldy piece of meat but it's very easy to master. First, place the shoulder on a clean large cutting board and use a sharp paring knife to trim any exposed slivers of bone from the meat. Run your hands gently around the sides to make sure no shards of bone are sticking out, and if you feel a few then use your knife to trim them out. Next, cut off any weblike layers of white fat by sliding the knife blade underneath the fat and cutting it off the shoulder. Then pat the meat dry all over with paper towels, use a towel or kitchen brush to apply white vinegar on all sides of the meat, and season all over with a healthy amount of salt and pepper. * * * **BRISKET** * * * Butcher shops and grocery-store meat counters break briskets down into two pieces: The first, called the "flat," comes from an area near the cow's belly and is relatively lean; the second, called the "point" cut, comes from near the shank and has more fat (and more flavor). When you are smoking a brisket, you need the whole thing, untrimmed and uncut, and that runs 12 to 15 pounds (5.4 to 6.8 kg) total and might need to be ordered in advance. If you want to make a small brisket, you'll most likely find a 5-pound (2.3-kg) cut and you can follow my method exactly, but you'll need to adjust the cooking time. To make a great piece of brisket, start with a great piece of meat: I like grass-fed wagyu cattle the very best. The first step is to trim the meat's membrane, that fine, silvery, weblike fat that covers its surface. This involves slowly cutting away the layers of sticky silver-white matter from the actual beef, and I warn you that it can be a painstaking process that requires patience and determination. But it's worth doing right, because it could make the difference between meat that's tough and meat that's tender. Also, seasonings like salt and pepper (and any rub you choose to use) won't stick to fat; the spices only adhere to the meat itself—and those spices are what create the "bark," or seared coating, that surrounds the brisket when you cook it. So take your time and carefully trim off all of the fatty membrane around that brisket before you rub it down with white vinegar and season it all over with salt and pepper. * * * **CHICKEN** * * * The best chickens to buy are local birds that have never been frozen and that foraged for food and ate plants (not industrially raised corn-fed birds from the freezer case, in other words). I look for big birds because they are easier to handle; 3 or even 4 pounds (1.4 to 1.8 kg) is my ideal. I can't preach enough about how much faster and easier it is to cook a chicken if you butterfly it before you put it in your pit or smoker. You can butterfly, or spatchcock, as we old-school barbecue cooks like to call it, a chicken in less than a minute with just a pair of kitchen shears. You're just cutting out the backbone so that you can open up the bird, kind of like how you would crack a book. Place the chicken on a clean cutting board and turn it over so it's breast side down; the backbone runs down the center of the chicken from the neck to the tail. Starting on one side of the tail—either side is fine—cut all the way up along the spine of the chicken, exerting a little pressure to cut all the way through the bones (it shouldn't need a lot of strength to do this, it's easy). Then repeat the process by cutting up the other side. Now you've cut out the backbone, which you can either save for stock (smoke it first and your stock will be delicious) or toss out if you're feeling lazy. Flip the chicken over on your cutting board and press down on its back with the flats of your palms to flatten it out. This action should break the breastbone so that the chicken lies flat. Then you can tuck the wings under the breast so they don't dry out, and you're good to go. _With my bulldog, Winston._ **ACKNOWLEDGMENTS** My list of thanks needs to be short and sweet because Dad taught me this style of cooking with no help from other family members. If you want to see the names of my other family members, they're in my first two cookbooks. Of course there are always individuals who help you through life and circumstances, but this book is about the influence of my relationship with my dad, Jack Mixon, and the path, unbeknownst to him at the time, that he set me on. Without first learning how to manage his pits, I wouldn't be the winningest man in BBQ. Thanks, Daddy. I would like to thank my publishing house, Abrams, for believing in a cookbook that requires readers to build a pit and manage a fire, which are apparently dicey things in the world of Manhattan publishing. Publisher Michael Sand and our editor Camaren Subhiyah were terrific to work with and made sure their team respected my wishes for presenting this old-school barbecue technique. Rob Deborde is responsible for bringing the vision to life with his remarkable illustrations. Johnny Autry came down to my barbecue compound in Unadilla, Georgia, and produced the stunning photographs while I called him a hipster the whole time. Super-agent Michael Psaltis has been part of my personal pit crew for years and continues to work hard on my behalf. And Kelly Alexander is still the best food writer in the world. To all of these folks, I am deeply grateful. **ABOUT THE AUTHORS** **MYRON MIXON** was born into a barbecue family. His father, Jack, owned and operated a barbecue take-out business in Vienna, Georgia, which Myron helped him run. His parents sold Jack's Old South, their house brand of barbecue sauce, in their store. When Jack died unexpectedly in 1996, Myron decided to enter some barbecue contests in hopes of selling sauce. He competed in his first competition in Augusta, Georgia, later that year and took first place in Whole Hog, first place in Pork Ribs, and third in Pork Shoulder. He was hooked. Since then he has won more than 200 grand championships resulting in more than 1,900 trophies; won more than 40 state championships, including wins in Georgia, Florida, Alabama, Virginia, Arkansas, Mississippi, Kentucky, Illinois, South Carolina, and Tennessee; and has been head of Team of the Year a record eight times. In addition, he has won eleven national championships, taken three first-place whole hogs at the Jack Daniel's World Championship Invitational Barbecue Competition, and been the Grand Champion at the World Championship Memphis in May three times, in 2001, 2004, and 2007. His team is the only team to win Grand Championships in Memphis in May, the Kansas City BBQ Society, and the Florida BBQ Association in the same year. Myron's success with his competition team, Jack's Old South, has led him to countless media appearances. He is the executive producer and host of _BBQ Rules_ and star of two other hit television shows, _BBQ Pitmasters_ and _BBQ Pit Wars_ on Discovery's Destination America, broadcast throughout the world. Myron is the author of the hugely successful _New York Times_ bestselling cookbook _Smokin' with Myron Mixon: Recipes Made Simple, from the Winningest Man in Barbecue_ (Ballantine, 2011) and _Everyday Barbecue_ (Ballantine, 2013). In addition, Myron has a popular line of rubs and sauces, two different lines of smokers/grills—the professional-grade line of custom Myron Mixon Smokers and the consumer-level line of Myron Mixon Pitmaster Q3 smokers—and the Myron Mixon Pitmaster Grill Tool—a combination chef knife, food flipper, and bottle opener. For more information about Myron or his products, go to: www.jacksoldsouth.com. When he's not competing, Myron teaches barbecue at his Jack's Old South Cooking School, an intensive weekend-long course that is held several times each year. He is also a regular participant and speaker at food festivals, competitions, and events throughout the world. A Writer **KELLY ALEXANDER** grew up in a boisterous, southern-Jewish food-focused family in Atlanta, Georgia. She is coauthor of the _New York Times_ bestselling cookbook _Smokin' with Myron Mixon_ (Ballantine, 2011) and author of the critically acclaimed _Hometown Appetites: The Story of Clementine Paddleford, the Forgotten Food Writer Who Chronicled How America Ate_ (Gotham, 2008). She is also the collaborator with _Top Chef_ winner Richard Blais on the cookbook _Try This At Home_ (Clarkson Potter, 2013); editor of _The Great American Cookbook_ , a reissue of Clementine Paddleford's _How America Eats_ (Rizzoli, 2012); author of _Peaches: A Savor the South Cookbook_ (UNC Press, 2013); and coauthor of _Everyday Barbecue_ with Myron Mixon (Ballantine, 2013). Her magazine work, which covers topics including obsessive collectors of Fiestaware and the cross-cultural significance of brisket, earned her a James Beard Foundation Journalism Award for magazine writing. She was a senior editor at _Saveur_ magazine and an editor at _Food & Wine_ and _Boston_ magazines. Her writing also has appeared in the _New York Times_ ; _O: The Oprah Magazine_ ; _Gourmet_ ; _The New Republic, New York_ ; _Southern Living_ ; _Slate_ ; _Real Simple_ ; _Travel + Leisure_ ; and _Newsweek_ , among many periodicals. Alexander teaches food writing at the Center for Documentary Studies at Duke University and can be heard chronicling food customs on _The State of Things_ , which airs on North Carolina Public Radio. A graduate of Northwestern University's Medill School of Journalism, she lives in Chapel Hill, North Carolina, with her husband and two sons. She's currently a doctoral candidate in Cultural Anthropology at Duke University. **INDEX OF SEARCHABLE TERMS** **A** aluminum foil pans Ashe County Cheese **B** back ribs barbecue coal making cooking multiple things fire choice firing the pit heat consistency in pit as lifestyle meat resting pit building as social food wood, using right Barbecue Hall of Fame barbecue sauces beef beef ribs selection & prep cuts Jack Mixon's T-Bone Steak Smoked Beef Oxtails Smoked Beef Ribs Smoked Beef Short Ribs Smoked Beef Tenderloin Smoked Brisket Smoked Prime Rib Smoked Rib-Eye Steaks beef extras Smoked Beef Stock Smoked Burgers Smoked Meatloaf T-Bone, Tomato & Onion Soup bird extras The Pitmaster's Smoked Chicken Salad Sandwiches The Pitmaster's Turkey Sandwich Smoked Chicken or Turkey Pitmaster Stock bird parts Smoked Chicken Wings Smoked Half Chickens Smoked Turkey Legs Smoked Turkey Wings birds, whole Smoked Butterflied Chicken Smoked Butterflied Turkey Smoked Whole Turkey blackberry cobbler bone. _See_ t-bone steak Boston butt. _See_ pork shoulder brining brisket selection & prep Smoked Brisket Brunswick Stew bullet smokers burgers Burke, Pat **C** cake candy carving turkey ceramic smokers charcoal grills cheese Smoked Hoop Cheese chicken The Pitmaster's Smoked Chicken Salad Sandwiches selection & prep Smoked Butterflied Chicken Smoked Chicken or Turkey Pitmaster Stock Smoked Chicken Wings Smoked Half Chickens chocolate skillet cake coals Coal-Fired Pit-Smoked Whole Hog making shoveling cobbler collards competitions early days party after condiments. _See also_ sauces Smoked Salt coolers cornbread cow. _See_ beef cracker crumbs Cross, Charles cutting boards **D** dead wood dessert Smoked Blackberry Cobbler Smoked Chocolate Skillet Cake drumsticks durable work gloves **E** equipment aluminum foil pans bullet smoker ceramic smoker charcoal grill coolers garden hoe for gurney making for pit building for soap making heavy-duty gloves H2O cooker instant-read meat thermometer offset smoker pit tools plastic spray bottles plastic table coverings plastic work gloves smokers steel-forged knives tongs wooden cutting boards wood-handled mops extras. _See also_ beef extras; bird extras; hog extras Handmade Pit Ash Lye Soap Old-Fashioned Pulled Candy Smoked Blackberry Cobbler Smoked Chocolate Skillet Cake Smoked Cornbread Smoked Hoop Cheese Smoked Salt Smoked Whole Trout **F** fire choosing lighting firing the pit fish flat cut. _See_ brisket flipping, whole hog Florida BBQ Association foil pans fruit woods **G** garden hoe gas grill gloves green wood grilling Jack Mixon's T-Bone Steak Smoked Rib-Eye Steaks grills gurney, making **H** H2O cooker ham Handmade Pit Ash Lye Soap heat consistency heavy-duty leather work gloves heavy wooden cutting boards helpers Mustard-Based Barbecue Sauce Pit Brine Pit Mop Vinegar-Based Barbecue Sauce hickory wood hoe hog extras Brunswick Stew Hog-Skin Collards Pork Cracklin's Smoked Snout Sandwich hog gurney hog parts Smoked Pig Tails Smoked Shoulder Smoked Spareribs Smoked Trotters Smoked Whole Ham hogs Coal-Fired Pit-Smoked Whole Hog homemade coal, no substitutes hoop cheese horizontal smoker **I** infusing wood smoke instant-read meat thermometer **J** Jack Daniel's World Championship Invitational Barbecue competition Jack Mixon's T-Bone Steak Jack's Old South Team **K** Kansas City BBQ Society kettle grills knives **L** lightered knots local wood Lock-&-Dam BBQ Contest lye soap **M** main courses Coal-Fired Pit-Smoked Whole Hog Jack Mixon's T-Bone Steak The Pitmaster's Smoked Chicken Salad Sandwiches The Pitmaster's Turkey Sandwich Smoked Beef Oxtails Smoked Beef Ribs Smoked Beef Short Ribs Smoked Beef Tenderloin Smoked Brisket Smoked Burgers Smoked Butterflied Chicken Smoked Butterflied Turkey Smoked Chicken Wings Smoked Half Chickens Smoked Meatloaf Smoked Pig Tails Smoked Prime Rib Smoked Rib-Eye Steaks Smoked Shoulder Smoked Snout Sandwich Smoked Spareribs Smoked Trotters Smoked Turkey Legs Smoked Turkey Wings Smoked Whole Ham Smoked Whole Trout Smoked Whole Turkey meat moisture in resting testing for doneness thermometer meatloaf meat mini-guide beef ribs selection & prep brisket selection & prep chicken selection & prep pork ribs selection & prep pork shoulder selection & prep ribs selection & prep Mixon, Faye Mixon, Jack barbecue education of death of growing up with helping hand of old school barbecuing of pitmaster meaning of toughness of wood Mixon, Michael Mixon, Myron competitions Mixon, Tracy mopping Pit Mop whole hog mops, wood handle Mustard-Based Barbecue Sauce Myron Mixon Smokers **O** oak wood offset smokers Old-Fashioned Pulled Candy oxtails **P** pans water pan peach wood picnic (cut of meat). _See_ pork shoulder pig tails pipe smoker pit consistent heat in firing laying pit ash lye soap Pit Brine Pit Mop preparation smoking tools pit building groundwork materials and tools site pitmasters first The Pitmaster's Smoked Chicken Salad Sandwiches The Pitmaster's Turkey Sandwich Pit-Smoked Whole Hog plastic spray bottles plastic table coverings plastic work gloves point cut. _See_ brisket pork. _See also_ hog; hog extras; hog parts Pork Cracklin's pork ribs, selection & prep pork shoulder, selection & prep prime rib pulled candy **R** red-rind cheese resting meat rib-eye steaks ribs. _See also_ prime rib; rib-eye steaks beef, selection & prep pork, selection & prep Smoked Beef Ribs Smoked Beef Short Ribs Smoked Spareribs St. Louis–style rooters. _See_ snouts rules for barbecuing coal making cooking multiple things fire choice firing the pit heat consistency in pit meat resting pit building wood, using right **S** salt sandwiches The Pitmaster's Smoked Chicken Salad Sandwiches The Pitmaster's Turkey Sandwich Smoked Snout Sandwich sauces Mustard-Based Barbecue Sauce Vinegar-Based Barbecue Sauce seasoned wood seasonings selection & prep of meat beef ribs brisket chicken pork ribs pork shoulder ribs short ribs Smoked Beef Short Ribs shoulder pork shoulder selection & prep smoked shoulder side dishes Brunswick Stew Hog-Skin Collards Pork Cracklin's Smoked Cornbread simplicity site, for pit skillet cake Smith, Art Smoked Beef Oxtails Smoked Beef Ribs Smoked Beef Short Ribs Smoked Beef Stock Smoked Beef Tenderloin Smoked Blackberry Cobbler Smoked Brisket Smoked Burgers Smoked Butterflied Chicken Smoked Butterflied Turkey Smoked Chicken or Turkey Pitmaster Stock Smoked Chicken Wings Smoked Chocolate Skillet Cake Smoked Cornbread Smoked Half Chickens Smoked Hoop Cheese Smoked Meatloaf Smoked Pig Tails Smoked Prime Rib Smoked Rib-Eye Steaks Smoked Salt Smoked Shoulder Smoked Snout Sandwich Smoked Spareribs Smoked Trotters Smoked Turkey Legs Smoked Turkey Wings Smoked Whole Ham Smoked Whole Trout Smoked Whole Turkey smokers bullet ceramic charcoal grill fire lighting H2O cooker offset preparation two-step process water pan setup smokers, cooking methods Coal-Fired Pit-Smoked Whole Hog Jack Mixon's T-Bone Steak Smoked Beef Ribs Smoked Beef Tenderloin Smoked Brisket Smoked Burgers Smoked Butterflied Chicken Smoked Chicken Wings Smoked Half Chickens Smoked Meatloaf Smoked Prime Rib Smoked Rib-Eye Steaks Smoked Shoulder Smoked Spareribs Smoked Turkey Legs Smoked Turkey Wings Smoked Whole Ham Smoked Whole Turkey smoking snouts (snoot) Smoked Snout Sandwich soap soup spareribs Smoked Spareribs spray bottles steaks Jack Mixon's T-Bone Steak Smoked Rib-Eye Steaks steel-forged knives stew stick burning St. Louis–style ribs stock Smoked Beef Stock Smoked Chicken or Turkey Pitmaster Stock Stone, Tuffy sweets Old-Fashioned Pulled Candy Smoked Blackberry Cobbler Smoked Chocolate Skillet Cake **T** table coverings taffy. _See_ pulled candy t-bone steak T-Bone Bone, Tomato & Onion Soup 10-count method tenderloin testing meat for doneness thermometer tongs tools torpedo (bullet smoker) Tower Rock BBQ team trotters trout turkey carving The Pitmaster's Turkey Sandwich Smoked Butterflied Turkey Smoked Chicken or Turkey Pitmaster Stock Smoked Turkey Legs Smoked Turkey Wings Smoked Whole Turkey two-step process, for smokers **V** vinegar Vinegar-Based Barbecue Sauce **W** water pan Waterpan TechnologyTM whole birds whole hog Coal-Fired Pit-Smoked Whole Hog flipping mopping selecting and prepping in smokers wings Smoked Chicken Wings Smoked Turkey Wings wood cutting boards -handle mops using right wood smoke, infusing work gloves World Championships Published in 2016 by Stewart, Tabori & Chang An imprint of ABRAMS Text copyright © 2016 Myron Mixon Photographs copyright © 2016 Johnny Autry Illustrations copyright © 2016 Rob DeBorde All rights reserved. No portion of this book may be reproduced, stored in a retrieval system, or transmitted in any form or by any means, mechanical, electronic, photocopying, recording, or otherwise, without written permission from the publisher. Library of Congress Control Number: 2015949283 ISBN: 978-1-61769-184-3 eISBN: 978-1-61312-945-6 Editor: Camaren Subhiyah Designer: Paul Kepple and Max Vandenberg @ Headcase Design www.headcasedesign.com Production Manager: True Sims Certain activities discussed in this book may be potentially hazardous or dangerous. Any person should approach these activities with caution and appropriate supervision and training. The author and publisher do not accept liability for any injury, loss, legal consequences, or incidental or consequential damages suffered or incurred by any reader of this book. Stewart, Tabori & Chang books are available at special discounts when purchased in quantity for premiums and promotions as well as fundraising or educational use. Special editions can also be created to specification. For details, contact specialsales@abramsbooks.com or the address below. 115 West 18th Street New York, NY 10011 www.abramsbooks.com
{ "redpajama_set_name": "RedPajamaBook" }
3,508
Ata_images_dir internationalizes berrying hereafter. Colbert reclimb compunctiously. Shelley whips expertly. Wheyey Egbert typewrote Yellow Xanax Bars Online redeliver cross-examining lazily! Quintin reinvolved macaronically?
{ "redpajama_set_name": "RedPajamaC4" }
8,532
After the bubble gum Adams Family prototype sent the internet crazy, its designers are set to cash in. A little over a week ago, Canadian designer Andrew Greenbaum posted a photo of a pink, inflatable coffin on Instagram. He had made it with fellow creative Ian Felton. It was one of those perfect internet flash points: millennial pink, great for photo op, summertime vibes—a nice "I want to die, lol" undertone. Like the eternal resting place for a vampire that owns everything Glossier ever produced. The handful of prototype pictures were shared countless times, going niche-viral among those of us who live in the venn diagram of cry-for-help nihilism, industrial design stan, and thot. But it was also a massive tease, as the coffin was the only one in existence. They're after AU$ 80,877. Which sounds like a lot, but I guess don't underestimate the power of an aesthetically pleasing joke. Plus, as the Kickstarter points out, the money would go to funding Andrew and Ian's collaborative work, that would see them creating other strange and covetable objects that you can take bikini selfies with. Pledging $120 will get you one of these babies, with discounts offered for the more you buy. The Kickstarter also offers special edition versions in gold and clear, for total Lenin vibes. At the time of writing, after the campaign had been live for less than 12 hours, they were sitting on over $5000. So fingers crossed you'll be crossing the river Styx in this baby soon.
{ "redpajama_set_name": "RedPajamaC4" }
1,746
\section{Statement of results} Let $S$ be a scheme. The category $\mathcal{P}_\Sigma(\mathrm{Cor}^\mathrm{fr}(S))$ of presheaves with framed transfers \cite[\S2.3]{EHKSY} is a motivic analog of the classical category of $\scr E_\infty$-monoids. We have the \emph{framed suspension spectrum} functor \[ \Sigma^\infty_\mathrm{fr}: \mathcal{P}_\Sigma(\mathrm{Cor}^\mathrm{fr}(S)) \to \mathcal{SH}(S) \] which was constructed in \cite[Theorem 18]{hoyois2018localization}. By analogy with the classical situation, one might expect that many interesting motivic spectra can be obtained as framed suspension spectra. This is indeed the case; see \cite[\S1.1]{hoyois2021hermitian} for a summary. This note concerns the following examples of the above idea. One has framed presheaves \cite[\S6]{hoyois2021hermitian} \[ \mathrm{Vect}, \mathrm{Bil} \in \mathcal{P}_\Sigma(\mathrm{Cor}^\mathrm{fr}(S)) \] where $\mathrm{Vect}(X)$ is the groupoid of vector bundles on $X$ and $\mathrm{Bil}(X)$ is the groupoid of vector bundles with a non-degenerate symmetric bilinear form. There exist Bott elements \[ \beta \in \pi_{2,1} \Sigma^\infty_\mathrm{fr} \mathrm{Vect} \quad\text{and}\quad \tilde\beta \in \pi_{8,4} \Sigma^\infty_\mathrm{fr} \mathrm{Bil} \] and canonical equivalences \cite[Proposition 5.1]{hoyois2020hilbert} \cite[Proposition 6.7]{hoyois2021hermitian} \[ (\Sigma^\infty_\mathrm{fr} \mathrm{Vect})[\beta^{-1}] \simeq \mathrm{KGL} \quad\text{and}\quad (\Sigma^\infty_\mathrm{fr} \mathrm{Bil})[\tilde\beta^{-1}] \simeq \mathrm{KO}. \] Here $\mathrm{KGL}$ is the motivic spectrum representing homotopy algebraic $K$-theory and $\mathrm{KO}$ is the motivic spectrum representing homotopy hermitian $K$-theory.\footnote{As a notational convention for this introduction, whenever we mention $\mathrm{KO}$ we shall assume that $1/2 \in S$.} Again by comparison with the classical situation, this suggests that $\Sigma^\infty_\mathrm{fr} \mathrm{Vect}$ and $\Sigma^\infty_\mathrm{fr} \mathrm{Bil}$ should be motivic analogs of \emph{connective} $K$-theory spectra. Another way of producing ``connective'' versions is by passing to (very) effective covers \cite{voevodsky-slice-filtration,spitzweck2012motivic}. It was proved in \cite{hoyois2021hermitian,hoyois2020hilbert} that these two notions of connective motivic $K$-theory spectra coincide, provided that $S$ is regular over a field. Our main result is to extend this comparison to more general base schemes. We denote by $H\mathbb{Z}$ Spitzweck's motivic cohomology spectrum \cite{spitzweck2012motivic} and by $H\mathrm{W}$ the periodic Witt cohomology spectrum \cite[Definition 4.6]{bachmann-etaZ}. \begin{theorem} \label{thm:main} Let $S$ be a scheme. \begin{enumerate} \item Suppose that $f_1(H\mathbb{Z}) = 0 \in \mathcal{SH}(S)$. The canonical map \[ \Sigma^\infty_\mathrm{fr} \mathrm{Vect} \to f_0 \mathrm{KGL} \in \mathcal{SH}(S) \] is an equivalence. \item Suppose in addition that $1/2 \in S$ and $H\mathrm{W}_{\ge 2} = 0 \in \mathcal{SH}(S)$. The canonical map \[ \Sigma^\infty_\mathrm{fr} \mathrm{Bil} \to \tilde f_0 \mathrm{KO} \in \mathcal{SH}(S) \] is an equivalence. \end{enumerate} These assumptions are satisfied if $S$ is essentially smooth over a Dedekind scheme (containing $1/2$ in case (2)). \end{theorem} \begin{remark} That the assumptions are satisfied for Dedekind schemes is proved in \cite[Proposition B.4]{bachmann-norms} for (1) and in \cite[Lemma 3.8]{bachmann-etaZ} for (2). They in fact hold for all schemes; this will be recorded elsewhere. \end{remark} \begin{example} Bott periodicity implies formally that $f_n \mathrm{KGL} \simeq \Sigma^{2n,n} f_0\mathrm{KGL}$ and $s_n(\mathrm{KGL}) \simeq \Sigma^{2n,n} f_0(\mathrm{KGL})/\beta$. Theorem \ref{thm:main}(1) implies that $f_0(\mathrm{KGL})/\beta \simeq H\mathbb{Z}$ (see Lemma \ref{lemm:Vect/beta}). Hence in this situation the slice filtration for $\mathrm{KGL}$ yields a convergent spectral sequence, with $E_2$-page given by (Spitzweck's) motivic cohomology. \end{example} \subsection*{Notation} We use notation for standard motivic categories and spectra, as in \cite{bachmann-etaZ} and \cite{hoyois2021hermitian}. \section{Proofs} As a warm-up, we treat the case of $\mathrm{KGL}$. Recall that the functor $\Sigma^\infty_\mathrm{fr}$ inverts group-completion. The Bott element lifts to $\beta: (\bb P^1, \infty) \to \mathrm{Vect}^\mathrm{gp}$ \cite[\S5]{hoyois2020hilbert}. We also have the rank map $\mathrm{Vect}^\mathrm{gp} \to \mathbb{Z} \in \mathcal{P}_\Sigma(\mathrm{Cor}^\mathrm{fr}(S))$. The composite \[ (\bb P^1,\infty) \wedge \mathrm{Vect}^\mathrm{gp} \xrightarrow{\beta} \mathrm{Vect}^\mathrm{gp} \wedge \mathrm{Vect}^\mathrm{gp} \xrightarrow{m} \mathrm{Vect}^\mathrm{gp} \to \mathbb{Z} \] is null-homotopic after motivic localization, since $\mathbb{Z}$ is motivically local and truncated and $(\bb P^1,\infty) \stackrel{\mathrm{mot}}{\simeq} S^1 \wedge {\mathbb{G}_m}$. \begin{lemma} \label{lemm:Vect/beta} The induced map \[ (\Sigma^\infty_\mathrm{fr} \mathrm{Vect})/\beta \to \Sigma^\infty_\mathrm{fr} \mathbb{Z} \simeq H\mathbb{Z} \] is an equivalence. \end{lemma} \begin{proof} The equivalence $\Sigma^\infty_\mathrm{fr} \mathbb{Z} \simeq H\mathbb{Z}$ is \cite[Theorem 21]{hoyois2018localization}. Since all terms are stable under base change \cite[proof of Lemma 7.5]{hoyois2021hermitian} \cite[Lemma 16]{hoyois2018localization}, we may assume that $S = \mathrm{Spec}(\mathbb{Z})$. Using \cite[Proposition B.3]{bachmann-norms} we further reduce to the case where $S$ is the spectrum of a perfect field. In this case $\Sigma^\infty_\mathrm{fr} \mathrm{Vect} \simeq f_0 \mathrm{KGL}$ and so $(\Sigma^\infty_\mathrm{fr} \mathrm{Vect})/\beta \simeq s_0 \mathrm{KGL} \simeq H\mathbb{Z}$ (see e.g. \cite[Proposition 2.7]{ananyevskiy2017very}). \end{proof} \begin{proof}[Proof of Theorem \ref{thm:main}(1)] Note first that if $U \subset S$ is an open subscheme, and any of the assumptions of Theorem \ref{thm:main} holds for $S$, it also holds for $U$. On the other hand, if one of the conclusions holds for all $U$ in an open cover, it holds for $S$. It follows that we may assume that $S$ is qcqs, e.g. affine. Since $f_1(H\mathbb{Z}) = 0$ we find (using Lemma \ref{lemm:Vect/beta}) that \[ \beta: \Sigma^\infty_\mathrm{fr} \mathrm{Vect} \to \Sigma^{-2,-1} \Sigma^\infty_\mathrm{fr} \mathrm{Vect} \] induces an equivalence on $f_i$ for $i \ge 0$. It follows that in the directed system \[ \Sigma^\infty_\mathrm{fr} \mathrm{Vect} \xrightarrow{\beta} \Sigma^{-2,-1} \Sigma^\infty_\mathrm{fr} \mathrm{Vect} \xrightarrow{\beta} \Sigma^{-4,-2} \Sigma^\infty_\mathrm{fr} \mathrm{Vect} \xrightarrow{\beta} \dots \] all maps induce an equivalence on $f_0$. Since the colimit is $\mathrm{KGL}$, $f_0$ commutes with colimits (here we use that $X$ is qcqs, via \cite[Proposition A.3(2)]{bachmann-norms}) and $\Sigma^\infty_\mathrm{fr} \mathrm{Vect}$ is effective (like any framed suspension spectrum), the result follows. \end{proof} The proof for $\mathrm{KO}$ is an elaboration on these ideas. From now on we assume that $1/2 \in S$. Recall from \cite[Definition 2.6, Lemma 2.7]{bachmann-etaZ} the motivic spectrum \[ \ul{k}^M \simeq (H\mathbb{Z}/2)/\tau \in \mathcal{SH}(S). \] For the time being, assume $S$ is Dedekind. Taking framed loops we obtain \[ \ul{k}_1^M := \Omega^\infty_\mathrm{fr} \Sigma^{1,1} \ul{k}^M \in \mathcal{P}_\Sigma(\mathrm{Cor}^\mathrm{fr}(S)). \] \begin{lemma} \label{lemm:k1M} Let $S$ be a Dedekind scheme, $1/2 \in S$. \begin{enumerate} \item We have $\ul{k}_1^M \simeq a_\mathrm{Nis} \tau_{\le 0} {\mathbb{G}_m}/2$, where ${\mathbb{G}_m} \in \mathcal{P}_\Sigma(\mathrm{Cor}^\mathrm{fr}(S))$ denotes the sheaf $\scr O^\times$ with its usual structure of transfers \cite[Example 2.4]{lecture-notes-mot-cohom}. \item If $f: S' \to S$ is a morphism of Dedekind schemes then $f^* \ul{k}_1^M \stackrel{\mathrm{mot}}{\simeq} \ul{k}_1^M \in \mathcal{P}_\Sigma(\mathrm{Cor}^\mathrm{fr}(S'))$. \item The canonical map $\Sigma^\infty_\mathrm{fr} \ul{k}_1^M \to \Sigma^{1,1} \ul{k}^M \in \mathcal{SH}(S)$ is an equivalence. \end{enumerate} \end{lemma} For this and some of the following arguments, it will be helpful to recall that we have an embedding of $\mathcal{S}\mathrm{pc}{}^\mathrm{fr}(S)^\mathrm{gp}$ into the stable category of spectral presheaves on $\mathrm{Cor}^\mathrm{fr}(S)$. In particular, many fiber sequences in $\mathcal{S}\mathrm{pc}{}^\mathrm{fr}(S)$ are cofiber sequences. \begin{proof} (1) Clear by construction since $H^1_{\acute{e}t}(X, \mu_2) \simeq \scr O^\times(X)/2$ for $X$ affine. (2) By (1) we have a cofiber sequence $\Sigma \mu_2 \to a_\mathrm{Nis} {\mathbb{G}_m}/2 \to \ul{k}_1^M \in \mathcal{P}_\Sigma(\mathrm{Cor}^\mathrm{fr}(S))$. Since pullback of framed presheaves preserves cofiber sequences and commutes with forgetting transfers up to motivic equivalence \cite[Lemma 16]{hoyois2018localization} we reduce to the same assertion about ${\mathbb{G}_m}, \mu_2$, viewed as presheaves without transfers. Since they are representable, the assertion is clear. (3) Using \cite[Proposition B.3]{bachmann-norms}, (2) and \cite[Theorem 4.4]{bachmann-etaZ} we may assume that $S$ is the spectrum of a perfect field. In this case $\Sigma^\infty_\mathrm{fr} \Omega^\infty_\mathrm{fr} \simeq \tilde f_0$ \cite[Theorem 3.5.14(i)]{EHKSY}, so we need only prove that $\Sigma^{1,1} \ul{k}_1^M$ is very effective. But this is clear since we have the cofiber sequence $\Sigma^{1,0} H\mathbb{Z}/2 \xrightarrow{\tau} \Sigma^{1,1} H\mathbb{Z}/2 \to \Sigma^{1,1} \ul{k}_1^M$ and $H\mathbb{Z}/2$ is very effective. \end{proof} \begin{construction} The assignment $V \mapsto (V \oplus V^*, \varphi_V)$ sending a vector bundle to its associated (hyperbolic) symmetric bilinear bundle upgrades to a morphism \[ \mathrm{Vect} \to \mathrm{Bil} \in \mathcal{P}_\Sigma(\mathrm{Cor}^\mathrm{fr}(S))^{BC_2}, \] where $\mathrm{Vect}$ carries the $C_2$-action coming from passing to dual bundles, and $\mathrm{Bil}$ carries the trivial $C_2$-action. \end{construction} \begin{proof} Since the presheaves are $1$-truncated, all the required coherence data can be written down by hand.\NB{Better argument?} \end{proof} \begin{lemma} Let $S$ be a Dedekind scheme containing $1/2$. \begin{enumerate} \item The map \[ (\mathrm{Vect}^\mathrm{gp})_{hC_2} \to \mathrm{Bil}^\mathrm{gp} \] induces an isomorphism on $a_\mathrm{Nis} \pi_i$ for $i = 1,2$. \item The homotopy orbits spectral sequence yields \[ a_\mathrm{Nis} \pi_0 (\mathrm{Vect}^\mathrm{gp})_{hC_2} \simeq \mathbb{Z}, \] an exact sequence \[ 0 \to \ul{k}_1^M \to a_\mathrm{Nis} \pi_1 (\mathrm{Vect}^\mathrm{gp})_{hC_2} \to \mathbb{Z}/2 \to 0 \] and a map \[ a_\mathrm{Nis} \pi_2 (\mathrm{Vect}^\mathrm{gp})_{hC_2} \to \mathbb{Z}/2, \] all as presheaves with framed transfers. \end{enumerate} \end{lemma} \begin{proof} (1) This follows from the cofiber sequence $K_{hC_2} \to \mathrm{GW} \to L$ \cite[Theorem 7.6]{schlichting2016hermitian} using that $a_\mathrm{Nis} \pi_i L = 0$ unless $i \equiv 0 \pmod{4}$. (2) The homotopy orbit spectral sequence just arises from the Postnikov filtration of $\mathrm{Vect}^\mathrm{gp}$ and the formation of homotopy orbits and hence is compatible with transfers. Its $E_2$ page takes the form \[ H_i(C_2, a_\mathrm{Nis} \pi_j \mathrm{Vect}^\mathrm{gp}) \Rightarrow a_\mathrm{Nis} \pi_{i+j} (\mathrm{Vect}^\mathrm{gp})_{hC_2}. \] The form of the differentials of the spectral sequence implies that the terms $H_i(C_2, a_\mathrm{Nis} \pi_j \mathrm{Vect}^\mathrm{gp})$ are permanent cycles for $i \le 1$, and survive to $E_\infty$ for $(i,j) = (0,0)$ and $(i,j) = (1,1)$. One has $a_\mathrm{Nis} \pi_0 \mathrm{Vect}^\mathrm{gp} = \mathbb{Z}$ with the trivial action and $a_\mathrm{Nis} \pi_1 \mathrm{Vect}^\mathrm{gp} = {\mathbb{G}_m}$ \cite[Lemma III.1.4]{weibel-k-book} with the inversion action. This already yields the first assertion. A straightforward computation shows that \[ H_*(C_2, \mathbb{Z}) = \mathbb{Z}, \mathbb{Z}/2, 0, \mathbb{Z}/2, \dots \] and \[ H_*(C_2, {\mathbb{G}_m}) = \ul{k}_1^M, \mu_2, \ul{k}_1^M, \dots. \] Since $H_2(C_2, \mathbb{Z}) = 0$, no differential can hit the $(i,j) = (0,1)$ spot either, yielding the second assertion. Moreover this implies that $H_1(C_2, {\mathbb{G}_m}) = \mu_2$ is the bottom of the filtration of $\pi_2$. It follows that there is a map $a_\mathrm{Nis} \pi_2 (\mathrm{Vect}^\mathrm{gp})_{hC_2} \to A$, where $A$ is a quotient of $\mu_2$. To prove that $A=\mu_2$ it suffices to check this on sections over a field\NB{really?}, in which case we can use the hermitian motivic spectral sequence of \cite{bachmann-very-effective}.\NB{Better argument?} \end{proof} We have $a_\mathrm{Nis} \pi_0 \mathrm{Bil}^\mathrm{gp} \simeq \ul{GW}$. Thus we can form the following filtration of $\mathrm{Bil}^\mathrm{gp}$ refining the Postnikov filtration \[ \mathrm{Bil}^\mathrm{gp} \leftarrow F_1 \mathrm{Bil}^\mathrm{gp} \leftarrow F_2 \mathrm{Bil}^\mathrm{gp}\leftarrow F_3 \mathrm{Bil}^\mathrm{gp}\leftarrow F_4 \mathrm{Bil}^\mathrm{gp} \in \mathcal{P}_\Sigma(\mathrm{Cor}^\mathrm{fr}(S)) \] with subquotients given Nisnevich-locally by \begin{equation} \label{eq:subquotients} \ul{GW}, \Sigma \mathbb{Z}/2, \Sigma \ul{k}_1^M, \Sigma^2 \mathbb{Z}/2. \end{equation} Recall also the framed presheaf $\mathrm{Alt} \in \mathcal{P}_\Sigma(\mathrm{Cor}^\mathrm{fr}(S))$ sending a scheme to the groupoid of vector bundles with a non-degenerate alternating form. Tensoring with the canonical alternating (virtual) form $H(1) - h$ on $H\bb P^1$ (where $H(1)$ is the tautological rank $2$ alternating form on $H\bb P^1$, and $h$ is the standard alternating form on a trivial vector bundle of rank $2$) yields maps \[ \sigma_1: H\bb P^1 \wedge \mathrm{Alt}^\mathrm{gp} \to \mathrm{Bil}^\mathrm{gp} \quad\text{and}\quad \sigma_2: H\bb P^1 \wedge \mathrm{Bil}^\mathrm{gp} \to \mathrm{Alt}^\mathrm{gp}; \] by construction we have $\tilde \beta = \sigma_1 \sigma_2$ (recall that $H\bb P^1 \stackrel{\mathrm{mot}}{\simeq} S^{4,2}$). \begin{lemma} Let $S$ be a Dedekind scheme, $1/2 \in S$. \begin{enumerate} \item The composite \[ H\bb P^1 \wedge \mathrm{Alt}^\mathrm{gp} \xrightarrow{\sigma_1} \mathrm{Bil}^\mathrm{gp} \to \mathrm{Bil}^\mathrm{gp}/F_4\mathrm{Bil}^\mathrm{gp} \] is motivically null. The induced map \[ \Sigma^\infty_\mathrm{fr} \mathrm{cof}(\sigma_1) \to \Sigma^\infty_\mathrm{fr} \mathrm{Bil}^\mathrm{gp}/F_4\mathrm{Bil}^\mathrm{gp} \] is an equivalence. \item The composite \[ H\bb P^1 \wedge \mathrm{Bil}^\mathrm{gp} \xrightarrow{\sigma_2} \mathrm{Alt}^\mathrm{gp} \xrightarrow{rk/2} \mathbb{Z} \] is motivically null. The induced map \[ \Sigma^\infty_\mathrm{fr} \mathrm{cof}(\sigma_2) \to \Sigma^\infty_\mathrm{fr} \mathbb{Z} \] is an equivalence. \end{enumerate} \end{lemma} \begin{proof} (1) Write $C$ for the cofiber \emph{computed in the category of spectral presheaves on $\mathrm{Cor}^\mathrm{fr}(S)$}. Then $C$ admits a finite filtration, with subquotients corresponding to those in \eqref{eq:subquotients}. Since each of those is the infinite loop space of a motivic spectrum, it follows that $C$ is in fact motivically local. Consequently $C$ corresponds to $\mathrm{Bil}^\mathrm{gp}/F_4\mathrm{Bil}^\mathrm{gp}$ under the embedding into spectral presheaves. These contortions tell us that there are \emph{fiber} sequences \[ F_{i+1}\mathrm{Bil}^\mathrm{gp}/F_4\mathrm{Bil}^\mathrm{gp} \to F_i\mathrm{Bil}^\mathrm{gp}/F_4\mathrm{Bil}^\mathrm{gp} \to F_i\mathrm{Bil}^\mathrm{gp}/F_{i+1}\mathrm{Bil}^\mathrm{gp} \] for $i < 4$. Hence to prove that the composite is null, it suffices to prove that there are no maps from $\Sigma^{4,2} \mathrm{Alt}^\mathrm{gp}$ into the motivic localizations of the subquotients of the filtration given in \eqref{eq:subquotients}. These motivic localizations are $\ul{GW}, L_\mathrm{Nis} K(\mathbb{Z}/2,1), L_\mathrm{Nis} K(k_1^M,1)$ and $L_\mathrm{Nis} K(\mathbb{Z}/2,2)$ (since they are motivically equivalent to the subquotients, and motivically local because they are infinite loop spaces of the motivic spectra $H\tilde \mathbb{Z}$\NB{Use defining fiber square.}, $\Sigma \ul{k}^M$, $\Sigma^{2,1} \ul{k}^M, \Sigma^2 \ul{k}^M$). It suffices to prove that $\Omega^{4,2}$ of these subquotients vanishes, which is clear. Next we claim that $\Sigma^\infty_\mathrm{fr} \mathrm{Bil}^\mathrm{gp}/F_4\mathrm{Bil}^\mathrm{gp}$ is stable under base change (among Dedekind schemes containing $1/2$). Indeed the defining fiber sequences of $F_4\mathrm{Bil}^\mathrm{gp}$ are also cofiber sequences, and so $\Sigma^\infty_\mathrm{fr} \mathrm{Bil}^\mathrm{gp}/F_4\mathrm{Bil}^\mathrm{gp}$ is obtained by iterated extension from spectra stable under base change (see Lemma \ref{lemm:k1M}(2) for $\ul{k}_1^M$, \cite[proof of Lemma 7.5]{hoyois2021hermitian} for $\mathrm{Bil}$ and $\mathrm{Alt}$, and \cite[Lemma 16]{hoyois2018localization} for $\mathbb{Z}/2$). To prove that the induced map is an equivalence we thus reduce as before to $S = \mathrm{Spec}(k)$, $k$ a perfect field of characteristic $\ne 2$. In this case the result is a straightforward consequence of the hermitian motivic filtration of \cite{bachmann-very-effective}. (2) The proof is essentially the same as for (1), but easier. \end{proof} We now arrive at the main result. \begin{theorem} Let $S$ be a scheme containing $1/2$ such that \[ f_1(H\mathbb{Z}) = 0 = H\mathrm{W}_{\ge 2} \in \mathcal{SH}(S). \] The canonical maps \[ \Sigma^\infty_\mathrm{fr} \mathrm{Bil} \to \tilde f_0 \mathrm{KO} \quad\text{and}\quad \Sigma^\infty_\mathrm{fr} \mathrm{Alt} \to \tilde f_0 \Sigma^{4,2} \mathrm{KO} \] are equivalences. \end{theorem} \begin{proof} As before we may assume that $S$ is qcqs. We know that $\mathrm{KO}$ is the colimit of \[ \Sigma^\infty_\mathrm{fr} \mathrm{Bil} \xrightarrow{\sigma_2} \Sigma^{-4,-2} \Sigma^\infty_\mathrm{fr} \mathrm{Alt} \xrightarrow{\sigma_1} \Sigma^{-8,-4} \mathrm{Bil} \xrightarrow{\sigma_2} \cdots. \] It is hence enough to prove that $\sigma_1: \Sigma^{-8n,-4n} \Sigma^\infty_\mathrm{fr} \mathrm{Bil} \to \Sigma^{-8n-4,-4n-2} \Sigma^\infty_\mathrm{fr} \mathrm{Alt}$ induces an equivalence on $\tilde f_0$ for every $n \ge 0$, and similarly for $\sigma_2$. (Here we use that $S$ is qcqs, so that $\tilde f_0$ preserves filtered colimits.\NB{ref?}) Given a cofiber sequence $A \to B \to C$, in order to prove that $\tilde f_0 A \simeq \tilde f_0 B$, it suffices to show that $\mathrm{Map}(X, C) = *$ for every $X \in \mathcal{SH}(S)^{\text{veff}}$, i.e. that $C \in \mathcal{SH}(S)^{{\text{veff}}\perp}$. Over $\mathbb{Z}[1/2]$, the cofiber of $\sigma_1$ has a finite filtration, with subquotients \[ \Sigma^{-4,-2} \Sigma^\infty_\mathrm{fr} \ul{GW}, \Sigma^{-3,-2} \Sigma^\infty_\mathrm{fr} \mathbb{Z}/2, \Sigma^{-3,-2} \Sigma^\infty_\mathrm{fr} \ul{k}_1^M, \Sigma^{-2,-2} \Sigma^\infty_\mathrm{fr} \mathbb{Z}/2, \] and the cofiber of $\sigma_2$ is $\Sigma^{-4,-2} \Sigma^\infty_\mathrm{fr} \mathbb{Z}$. Using \cite[Corollary 22]{hoyois2018localization}, \cite[Theorem 7.3]{hoyois2021hermitian} and Lemma \ref{lemm:k1M}(3), we can identify the list of cofibers as \[ \Sigma^{-4,-2} H\tilde\mathbb{Z}, \Sigma^{-3,-2} H\mathbb{Z}/2, \Sigma^{-2,-1} \ul{k}^M, \Sigma^{-2,-2} H\mathbb{Z}/2, \Sigma^{-4,-2} H\mathbb{Z}. \] These spectra are stable under arbitrary base change (essentially by definition), and hence for arbitrary $S$ the cofibers of $\sigma_1, \sigma_2$ are obtained as finite extensions, with cofibers in the above list. To conclude the proof, it will thus suffice to show that all spectra in the above list are in $\mathcal{SH}(S)^{{\text{veff}}\perp}$. Note that if $E \in \mathcal{SH}(S)$ then $E \in \mathcal{SH}(S)^{{\text{veff}}\perp}$ if and only if $\Omega^\infty E \simeq *$. In particular this holds if $f_0 E = 0$. This holds for $\Sigma^{m,n} H\mathbb{Z}$ as soon as $n<0$, by assumption. Hence it also holds for $\Sigma^{m,n}H\mathbb{Z}/2$ in the same case ($f_0$ being a stable functor) and for \[ \Sigma^{m,n} \ul{k}^M \simeq \mathrm{cof}(\Sigma^{m,n-1} H\mathbb{Z}/2 \xrightarrow{\tau} \Sigma^{m,n} H\mathbb{Z}/2). \] The only spectrum left in our list is $\Sigma^{-4,-2}H\tilde\mathbb{Z}$. Using \cite[Definition 4.1]{bachmann-etaZ} we see now that $\Omega^\infty \Sigma^{-4,-2} H\tilde\mathbb{Z} \simeq \Omega^\infty \Sigma^{-4,-2} \ul{K}^W$, so we may treat the latter spectrum. We have $\ul{K}^W/\eta \simeq \ul{k}^M$ \cite[Lemma 3.9]{bachmann-etaZ}, whence $\eta: \Sigma^{-4-n,-2-n} \ul{K}^W \to \Sigma^{-5-n,-3-n} \ul{K}^W$ induces an equivalence on $\Omega^\infty$. Since $\Omega^\infty$ commutes with filtered colimits, we see that $\Sigma^{-4,-2}\ul{K}^W \in \mathcal{SH}(S)^{{\text{veff}}\perp}$ if and only if $\Sigma^{-4,-2}\ul{K}^W[\eta^{-1}] \in \mathcal{SH}(S)^{{\text{veff}}\perp}$. This latter spectrum is the same as $\Sigma^{-2} H\mathrm{W}$ \cite[Lemma 3.9]{bachmann-etaZ}, and \[ \tilde f_0(\Sigma^{-2} H\mathrm{W}) \simeq \tilde f_0((\Sigma^{-2} H\mathrm{W})_{\ge 0}) \simeq \tilde f_0(\Sigma^{-2} (H\mathrm{W}_{\ge 2})) = 0 \] by assumption. \end{proof} \bibliographystyle{plainc}
{ "redpajama_set_name": "RedPajamaArXiv" }
2,751
Шувакі́ш () — селище у складі Єкатеринбурзького міського округу Свердловської області. Населення — 1067 осіб (2010, 753 у 2002). Національний склад станом на 2002 рік: росіяни — 87 %. Примітки Джерела Посилання На Вікімапії Населені пункти Єкатеринбурзького міського округу Селища Свердловської області
{ "redpajama_set_name": "RedPajamaWikipedia" }
119
\section{Introduction} Let $S = S_{g,n}$ be a surface of genus $g$ with $n$ punctures, where $\chi(S)=2-2g-n<0$. The mapping class group of $S$, $Mod(S_{g,n})$, is the group of orientation preserving homeomorphisms of $S$ up to isotopy. Here, the punctures are assumed to be fixed setwise by the homeomorphism. Nielsen-Thurston classification of mapping classes states that every mapping class is either pseudo-Anosov, reducible or finite order \cite{thurston1988geometry}. Pseudo-Anosovs are often the ones whose understanding is the crucial part in studying the mapping class group. Associated to any pseudo-Anosov map is an algebraic integer called the dilatation or the stretch factor. The dilatation measures how much the map stretches/shrinks in the two canonical directions at each point of the surface. From a dynamical point of view, the logarithm of the dilatation is the entropy of the pseudo-Anosov map. Ivanov proved that on a fixed surface, the set of dilatations is a discrete subset of $(1,\infty)$ \cite{ivanov1990stretching,arnoux1981construction}. In particular there exists a minimum dilatation. Let us denote by $l_{g,n}$ the logarithm of the minimum dilatation for pseudo-Anosov maps on $S_{g,n}$. Finding the minimum dilatation or its asymptotic behavior has been of great importance. One motivation is that $l_{g,n}$ is the systole (the length of the shortest geodesic) of the moduli space with the Teichm\"{u}ller metric. Another motivation comes from the relation between low-dilatation pseudo-Anosov maps and low-volume fibered hyperbolic 3-manifolds \cite{aaber2010closed}. Penner found the asymptotic behavior of this number for closed surfaces \cite{penner1991bounds}. He proved that there are constants $c_1 , c_2 >0$ such that for any $g\geq 2$ \[ \frac{c_1}{g} \leq l_{g,0} \leq \frac{c_2}{g}. \] \noindent Our aim is to understand the asymptotic behavior of $l_{g,n}$ similarly. Recall that Penner has proved the following \cite{penner1991bounds} \[ l_{g,n} \geq \dfrac{\log(2)}{12g-12+4n}. \] which is comparable to $\dfrac{1}{|\chi(S)|}$, up to multiplicative constants. Tsai has obtained another lower bound for $l_{g,n}$, which gives a better bound than Penner's theorem when $n$ is large compared to $g$ \cite{tsai2009asymptotic}. Let $\Gamma_S(3)$ denote the kernel of the action of $Mod(S_{g,0})$ on $H_1(S_{g,0}\medskip;\smallskip\mathbb{Z}/3\mathbb{Z})$. Define \[ \Theta(g):=[Mod(S):\Gamma_S(3)]. \] Note that $\Theta(g)$ is super-exponentially large in $g$ \cite{tsai2009asymptotic}\footnote{In fact standard theorems imply that it is larger than $3^{g^2}$. See the background section.}. \begin{thm}(Tsai) For any $g \geq 2$ and $n \geq 0$ we have the following: \[ l_{g,n} \geq \min \left\{ \frac{1}{\Theta(g)}\frac{\log(2)}{(12g-12)}, \frac{1}{\Theta(g)} \frac{\log(3|\chi(S)|)}{6|\chi(S)|} \right\}.\] \label{tsai} \end{thm} \noindent Note that when $n$ is large compared to $g$, the minimum is the second expression. The following theorem shows that one can replace $\Theta(g)$ in Tsai's theorem by a term that is polynomially small in $g$. \begin{thm} Given any positive real number $\alpha$, there exists a positive constant $C = C(\alpha)$ such that for any $g \geq 2$ and $n \geq 0$ we have the following: \[ l_{g,n} \geq \frac{C}{g^{2+\alpha}}\frac{\log(|\chi(S)|)}{|\chi(S)|}. \] \label{maintheorem} \end{thm} \noindent Our lower bound should be compared with Tsai's upper bound for $l_{g,n}$ \cite{tsai2009asymptotic}. Tsai proved that there is a constant $C>0$ such that for any $g \geq 2$ and $n \geq 0$ the following holds \footnote{Tsai proved this bound for $n \geq 12g+7$. See the appendix for an extension of her result to all $n \geq 0$.}: \[ \l_{g,n} \leq C \medskip g \medskip \frac{\log|\chi(S)|}{|\chi(S)|}. \] Here is the idea of the proof. Following Tsai, we look at the Lefschetz number of the map $f : S \longrightarrow S$. If the Lefschetz number of $f$ is negative then one can give a 'good' lower bound for the dilatation. However, the Lefschetz number of a pseudo-Anosov map need not to be negative in general. We prove that there is a 'relatively small' number (at most polynomially large in genus), $k$, such that the Lefschetz number of $f^k$ is negative. Using the Lefschetz formula for the Lefschetz number of a map, this translates into a problem about traces of powers of integral matrices. Then we use elementary Fourier analysis and Dobrowolsky's theorem about modulus of algebraic integers to prove the desired statement. \subsection{Acknowledgement} This work has been done during my PhD studies at Princeton University. I would like to thank my advisor David Gabai for his constant support and encouragement. Special thanks to Peter Sarnak for helpful discussions on Turan theory and suggesting the reference \cite{montgomery1994ten}. I would like to thank Ian Agol and Will Sawin for helpful comments and Bal\'{a}zs Strenner and Masoud Zargar for reading an earlier version of this paper. \section{Background} \noindent Throughout, we assume that the surface $S$ is orientable and $\chi(S) = 2-2g-n<0$. \subsection{Thurston-Nielsen Theory}\hfill\\ Thurston-Nielsen classification of mapping class group states that each element in the mapping class group can be represented by a map $f$ that is one of the following:\\ 1) periodic \\ 2) reducible \\ 3) pseudo-Anosov\\ \textbf{Periodic} means that $f$ has a power that is equal to identity. \textbf{Reducible} means that there is a collection $\mathcal{C}$ of disjoint simple closed curves on $S$ that is preserved by $f$, i.e., $f(\mathcal{C})=\mathcal{C}$. \textbf{Pseudo-Anosov} means that there is a pair of transverse measured foliations $\mathcal{F}^\pm$ on $S$ and a positive number $\lambda > 0$ such that the foliations are preserved by $f$ but their measures are expanded/contracted by a factor of $\lambda > 1$, i.e., $f(\mathcal{F}^+)=\lambda \mathcal{F}^+$ and $f(\mathcal{F}^-)=\frac{1}{\lambda} \mathcal{F}^-$. The foliations $\mathcal{F}^\pm$ might have prong-type singularities (Figure \ref{prongs}). The number $\lambda$ is called the \textbf{dilatation} or \textbf{stretch factor} of $f$.\\ \begin{figure} \centering \includegraphics[width= 3 in]{prongs} \caption{Left: A 3-prong singularity, Right: We allow 1-prong singularities around the punctures.} \label{prongs} \end{figure} \subsection{Previous bounds for dilatations}\hfill\\ Penner originated the study of minimal dilatations for orientable surfaces. He proved that $l_{g,0}$ behaves asymptotically like $\frac{1}{g}$. He also gave a lower bound of the order $\frac{1}{|\chi(S)|}$ for the value of $l_{g,n}$ \cite{penner1991bounds}. Since then, there has been a lot of effort for understanding the minimum stretch factor from at least two different perspectives. \\ The first one tries to make the constants in Penner's original theorem sharp, for small values of $g$ or asymptotically. McMullen's question is in this direction \cite{mcmullen2000polynomial}. \begin{question}(McMullen) Does $\lim_{g \rightarrow \infty} \hspace{2mm} g \hspace{1mm}. \hspace{1mm} l _{g,0}$ exists? What is its value? \end{question} \noindent There has been a lot of progress in finding upper bounds for $g \hspace{1mm}. \hspace{1mm} l _{g,0}$ \cite{bauer1992upper,minakawa2006examples}. The lower bound seems to be much more difficult (see the work of McMullen \cite{mcmullen2015entropy}). \\ The second direction seeks for understanding the behavior of $l_{g,n}$ along different subsets of the $(g,n)$ plane, at least up to multiplicative constants. Theorem \ref{tsai} is of this form. It implies that the behavior of $l_{g,n}$ along the line $\underline{ g = \Text{Constant}}$ is like $\frac{\ln(n)}{n}$ when $g \geq 2$. Valdivia showed for any fixed $r \in \mathbb{Q}^+$, the behavior of $l_{g,n}$ along the line $\underline{ g = rn }$ is like $\frac{1}{g}$ (which is the same behavior as $\frac{1}{n}$ in this case) \cite{valdivia2012sequences}, i.e., for any $r \in \mathbb{Q}$ theres are constants $D_1 = D_1(r)$ and $D_2= D_2(r)$ such that for any $n \in \mathbb{N}$ and $g =rn$ we have \[ \frac{D_1}{g} \leq l_{g,n} \leq \frac{D_2}{g}\] It is tempting to understand the behavior of $l_{g,n}$ as a two variable function. \begin{question} What is the behavior of $l_{g,n}$ as a function of two variables in the $(g,n)$ plane? \end{question} \subsection{Markov Partition}\hfill\\ Let $f : S \longrightarrow S$ be a pseudo-Anosov map with invariant measured foliations $\mathcal{F}^+$ and $\mathcal{F}^-$. A rectangle is a map $\phi : I \times I \longrightarrow S$ such that $\phi$ is an embedding when restricted to the interior of $I \times I$. Moreover, $\phi( \Text{point} \times I) \subset \mathcal{F}^+$ and $\phi(I \times \Text{point}) \subset \mathcal{F}^-$. Define the $\pm$ boundary of $\phi$ as $\partial_+ = \phi( \partial I \times I)$ and $\partial_- = \phi(I \times \partial I)$ (Figure \ref{rectangle}). We usually do not distinguish between a rectangle and its image, $\mathcal{R}$, by abuse of notation. \begin{figure} \labellist \pinlabel $\mathcal{R}$ at 115 55 \pinlabel $\partial_+$ at -10 55 \pinlabel $\partial _+$ at 240 55 \pinlabel $\partial_-$ at 115 115 \pinlabel $\partial_-$ at 115 -10 \endlabellist \centering \includegraphics[width= 2 in]{rectangle} \caption{A rectangle} \label{rectangle} \end{figure} \noindent A \textbf{Markov partition for $f$} is a finite family of rectangles $\{ \mathcal{R}_i \}$ that cover the whole surface and satisfy the following three conditions.\\ i) The rectangles do not intersect in the interior. \\ ii) For each rectangle $\mathcal{R}_i$, $f(\partial_+ \mathcal{R}_i) \subset \bigcup \limits_{j} \partial_+ \mathcal{R}_j $.\\ iii) For each rectangle $\mathcal{R}_i$, $f^{-1}(\partial_- \mathcal{R}_i) \subset \bigcup \limits_{j} \partial_- \mathcal{R}_j $.\\ Any pseudo-Anosov map has a Markov partition. Bestvina-Handel have constructed a Markov partition of size at most $9 |\chi(S)|$ for $f$ when the surface is closed and a Markov partition of size at most $3|\chi(S)|$ when the surface has at least one marked point. Define the transition matrix $A=(a_{i,j})$ associated to the Markov partition as follows. The entry $a_{i,j}$ counts the number of times that $f(\mathcal{R}_i)$ wraps around $\mathcal{R}_j$. Bestvina-Handel showed that this matrix can be chosen to be Perron-Frobenius. Moreover, its maximal eigenvalue is equal to the dilatation of $f$. In particular, $\lambda(f)$ is an algebraic integer. \subsection{Lefschetz number}\hfill\\ Let $M$ be a compact, oriented manifold and $f : M \longrightarrow M$ be a map. The \textbf{Lefschetz number of $f$}, $L(f)$, is defined as the algebraic intersection of the graph of $f$ and the diagonal inside $M \times M$. Therefore it is invariant under homotopy of the map $f$. The \textbf{Lefschetz formula} states that this number can be computed in two different ways. On one hand, it is equal to the following sum coming from the action of $f$ on homology groups of $M$: \[ \sum_{i\geq0} (-1)^i \hspace{2mm} Tr(f_* : H_i (M;\mathbb{R}) \longrightarrow H_i(M;\mathbb{R})) \] On the other hand when $f$ has isolated fixed points, the Lefschetz number of $f$ is equal to sum of the local Lefschez numbers at fixed points. If $p$ is an isolated fixed point of $f$, then the local Lefschetz number of $f$ at $p$, $L_p(f)$, is defined as follows. Take a small sphere, $U$, around $p$ that contains no other fixed point. Then $L_p(f)$ is equal to the degree of the map $z \mapsto \frac{f(z)-z}{|f(z)-z|}$ restricted to $U$. \[ L(f) = \sum_{f(p)=p} \hspace{2mm} L_p(f) = \sum_{i\geq0} (-1)^i \hspace{2mm} Tr(f_* : H_i (M;\mathbb{R}) \longrightarrow H_i(M;\mathbb{R})). \] Note that when $M=S$ is a compact orientable surface, the above formula simplifies to the following: \[ L(f) =\sum_{f(p)=p} \hspace{2mm} L_p(f)= 2 - Tr(f_* : H_1(S) \longrightarrow H_1(S)). \] The following crucial observation is due to Tsai. We bring the proof from \cite{tsai2009asymptotic} for the reader's convenience. \begin{lem}(Tsai) Let $f : S \longrightarrow S$ be a pseudo-Anosov map on a surface with at least one marked point. Assume that $L(f) < 0$. We have the following estimate for the stretch factor of $f$. \[ \log(\lambda(f)) \geq \frac{\log(3|\chi(S)|)}{6|\chi(S)|}. \] \label{negative-lefschetz} \end{lem} \begin{proof} \textbf{First Step}: There exists a Markov partition and a rectangle $\mathcal{R}$ of the partition such that the interior of $\mathcal{R}$ and $f(\mathcal{R})$ intersect. \\ The map $f$ has a Markov partition with $k$ rectangles where $k \leq 3|\chi(S)|$ \cite{bestvina1995train}. The map $f$ has isolated singularities. Since the Lefschetz number of $f$ is negative, at least one of the local Lefschetz numbers of $f$, say at $p$, should be negative. We show that one of the rectangles that contain $p$ (in the interior or on the boundary) is the rectangle that we are looking for. If there exist a rectangle $\mathcal{R}$ that contains $p$ in the interior then we are done since $p \in \mathcal{R} \cap f(\mathcal{R})$. Otherwise, we claim that $p$ has to be of the following types:\\ i) $p$ is a non-singular fixed point and the transverse orientation of $\mathcal{F}^+$ at $p$ is preserved,\\ ii) $p$ is a singular fixed point and at least one of the separatrices emanating from $p$ is fixed by $f$,\\ Note that it is clear that if $p$ is of the above types then one of the rectangles of the Markov partition (constructed by Bestvina-Handel \cite{bestvina1995train}) around $p$ has the desired property. If $p$ is not of the above type then it would be one of the following:\\ iii) $p$ is a non-singular fixed point and the transverse orientation of $\mathcal{F}^+$ at $p$ is reversed, or\\ iv) $p$ is a singular fixed point and none of the separatrices emanating from $p$ are fixed by $f$.\\ However, direct calculation shows that in the third and fourth case, the local Lefschetz number of $p$ is equal to $+1$ which is inconsistent with our assumption about $p$ (see page 2262 of \cite{tsai2009asymptotic}). This completes the proof. \\ \textbf{Second Step}: As a corollary of the first step, the transition matrix associated to the Markov partition, $A=(a_{i,j})$, has a nonzero entry on the diagonal. Define an oriented graph $G$ with the vertex set $V$ such that there are $a_{i,j}$ oriented edges from $v_i$ to $v_j$. Since $A$ is Perron-Frobenius, $G$ is path connected by oriented paths. By the previous step, there is an $\ell$ such that $a_{\ell,\ell}>0$, therefore there is at least one edge from $v_\ell$ to itself. On the other hand any two vertices of $G$ are connected by an oriented path of length at most $k$. Hence, for any $i,j$ there are oriented paths of length at most $k$ from $v_i$ to $v_\ell$ and from $v_\ell$ to $v_j$. Putting these paths together and adding the loop at the vertex $v_\ell$ as much as necessary, we get an oriented path of length $2k$ from $v_i$ to $v_j$. We just showed that all entries of the matrix $A^{2k}$ are positive. This means that $\mu(A^{2k}) \geq k$ since the spectral radius of a non-negative matrix is bounded below by the minimum row (column) sum. Hence \[ \log( \mu(A)) \geq \frac{\log(k)}{2k} \geq \frac{\log(3|\chi(S)|)}{6|\chi(S)|}. \] Note that by the Lefschetz formula, when $L(f) < 0$ the map $f$ has at least one fixed point and we can take the fixed point as a marked point. In other words, the condition of having at least one marked point is redundant here. \end{proof} \subsection{The order of $\Theta(g)$}\hfill\\ In this part, we briefly explain why $\Theta(g) > 3 ^{g^2}$. Let $\Gamma_S(3)$ be the kernel of the composition \[ Mod(S_g) \longrightarrow Sp(2g,\mathbb{Z}) \longrightarrow Sp(2g,\mathbb{F}_3) \] where $\mathbb{F}_3$ is the field of three elements. Both of these maps are surjective. The first one is well known in the mapping class group theory (see for example \cite{farb2011primer}). The second one follows from strong approximation. (Morally speaking, it says that the mod $q$ solutions to a system of quadratic equations can be lifted to integral solutions under suitable conditions, where $q$ is a prime number \cite{kneser1966strong}.) Therefore, the index of the kernel is equal to the order of the image: \[ \Theta(g) = [Mod(S):\Gamma_S(3)] = |Sp(2g,\mathbb{F}_3)|. \] But the order of $Sp(2m,\mathbb{F}_q)$ over a finite field $\mathbb{F}_q$ with $q$ elements is equal to \cite{grove2002classical}: \[ q^{m^2} \prod_{i=1}^{m} \hspace{2mm} (q^{2i}-1) \] which is obviously greater than $q^{m^2}$. \subsection{On the modulus of algebraic integers}\hfill\\ In the proof of Proposition \ref{main}, we use some facts about modulus of algebraic integers. A complex number $\lambda$ is an \textbf{algebraic integer}, if it is a root of a monic polynomial with integer coefficients. The \textbf{degree of $\lambda$}, is the smallest possible degree of such polynomial. The smallest degree polynomial is called the $\textbf{minimal polynomial of $\lambda$}$. The \textbf{Galois conjugates of $\lambda$} are all the roots of the minimal polynomial, including $\lambda$ itself. Define $\overline{|\lambda|}$ to be the maximum modulus amongst all Galois conjugates of $\lambda$. Clearly, $\overline{|\lambda|} \geq 1$ and equality happens for roots of unity. \textbf{Kronecker's theorem} states that if $\overline{|\lambda|} = 1$ then $\lambda$ is a root of unity. Moreover, if $\lambda $ is not a root of unity and has degree $d$, then $\overline{|\lambda|} - 1$ is bounded below by a number that just depends on $d$. The conjectural best bound is of order $\frac{1}{d}$. In fact by looking at the number $\lambda = 2^{\frac{1}{d}}$, it is easy to see that this is the best one can hope for. This is called the Schinzel-Zassenhaus conjecture \cite{schinzel1965refinement}. \begin{conj}(Schinzel-Zassenhaus) There exists a constant $c>0$ such that for any algebraic integer $\lambda \neq 0$ of degree $d$ which is not a root of unity we have \[ \overline{|\lambda|} \geq 1+\frac{c}{d}. \] \label{conj:Schinzel} \end{conj} \noindent Although Schinzel-Zassenhaus conjecture is still open, a slightly weaker form of it has been proved by Dobrowolsky \cite{dobrowolski1979question}. \begin{thm}(Dobrowolsky) Let $\lambda$ be an algebraic integer of degree $d$. For large enough $d$ if $\lambda$ is not a root of unity then \[ \overline{|\lambda|} \geq 1+\frac{1}{d} \bigg( \frac{\log\log(d)}{\log(d)} \bigg)^3. \] \label{thm:Dobrowolsky} \end{thm} \noindent Note that Dobrowolsky theorem does not take care of small values of $d$. Therefore we use the following theorem of Schinzel-Zassenhaus for small values of $d$ \cite{schinzel1965refinement}. \begin{thm}(Schinzel-Zassenhaus) If an algebraic integer $\lambda \neq 0$ is not a root of unity, and if $2s$ among its conjugates have nonzero imaginary part, then \[ \overline{|\lambda|} > 1+4^{-s-2}. \] \label{thm:Schinzel} \end{thm} \section{Proof of Theorem \ref{maintheorem}} \begin{proof} Let $f \in Mod(S_{g,n})$ be a pseudo-Anosov map. Denote by $\lambda(f)$ the dilatation of $f$. The idea is to look at the Lefschetz number of $f$, which we denote by $L(f)$. Define $\hat{f} \in Mod(S_{g,0})$ to be the map obtained by forgetting the punctures. The following two observations have been made by Tsai \cite{tsai2009asymptotic}. 1) $L(f) = L(\hat{f})$. 2) If $L(f)<0$ and $f$ is pseudo-Anosov, then $\log(\lambda(f)) \geq \frac{\log(3|\chi(S)|)}{6|\chi(S)|}$ (see Lemma \ref{negative-lefschetz}).\\ The aim is to find a suitable power $\nu$ of $f$ such that $L(f^{\nu})<0$ and then use the above bound. For any map $\phi \in Mod(S_{g,0})$, we have $L(\phi)=2-Tr(\phi _*)$ where $\phi_* : H_1(S) \longrightarrow H_1(S)$ is the induced map on homology. Proposition \ref{main} shows that one can find such a power that is at most polynomially large in terms of the genus.\\ In Proposition \ref{main}, take $B=2$, $\epsilon = \alpha$, $\phi = \hat{f}$ and $A=\phi_*$. Therefore $m=2g$. Hence, if $g \gg 0$ there is some $\nu \leq (2g)^{2+\alpha}$ such that \[ L(f^{\nu})=L\left(\widehat{f^{\nu}}\right)=L\left((\hat{f})^{\nu}\right)=2-Tr(A^\nu)<0. \] Since $f^{\nu}$ is pseudo-Anosov we have the following \[\nu \log(\lambda(f)) = \log(\lambda(f^{\nu})) \geq \frac{\log(3|\chi(S)|)}{6|\chi(S)|}. \] \[ \Rightarrow \log(\lambda(f)) \geq \frac{1}{\nu} \frac{\log(3|\chi(S)|)}{6|\chi(S)|} \geq \frac{1}{(2g)^{2+\alpha}} \frac{\log(3|\chi(S)|)}{6|\chi(S)|}. \] This finishes the proof when $g \gg 0$ let say for $g \geq N$. For the finitely many remaining values of $2 \leq g<N$, we use Lemma \ref{Dirichlet} \cite{montgomery1994ten}. Since $det(A)=1$, by Lemma \ref{Dirichlet} there exist a $1 \leq \nu \leq 8^{2g}$ such that \[ Tr(A^{\nu}) \geq \frac{2g}{\sqrt{2}} > 2. \] Therefore \[ \log(\lambda(f)) \geq \frac{1}{\nu} \frac{\log(3|\chi(S)|)}{6|\chi(S)|} \geq \frac{1}{8^{2g}} \frac{\log(3|\chi(S)|)}{6|\chi(S)|}. \] So if we define $c_j = \dfrac{\hspace{1mm}j^{2+ \alpha}\hspace{1mm}}{8^{2j}}$ and set \[ C' = \min \left\{ c_1 , ... , c_{N-1}, \frac{1}{2^{2+\alpha}}\right\}. \] Then, we have the following for each $g \geq 2$ and $n \geq 0$ \[ \log(\lambda(f)) \geq \frac{C'}{g^{2+\alpha}} \frac{\log(3|\chi(S)|)}{6|\chi(S)|} \geq \frac{C}{g^{2+\alpha}} \frac{\log(|\chi(S)|)}{|\chi(S)|} \] for $C = \dfrac{C'}{6}$. \\ \end{proof} \begin{remark} One can use Theorem \ref{tsai} instead of Lemma \ref{Dirichlet} to take care of the finitely many remaining values of $g<N$. However, we preferred to use a more elementary approach. \end{remark} \noindent The next Lemma has been used in the proof of Theorem \ref{maintheorem}. \begin{lem} Let $z_1 , ... , z_m$ be complex numbers. Define \[ S_{\nu} = z_1^{\nu}+... + z_m^{\nu}. \] There is a $\nu$, $1 \leq \nu \leq 8^m$ such that \[ Re(S_{\nu}) \geq \frac{1}{\sqrt{2}} \sum_{j=1}^{m}|z_j|^{\nu}. \] In particular if $|z_1 ... z_m| =1$ then there is a $\nu$, $1 \leq \nu \leq 8^m$ such that $Re(S_{\nu}) \geq \frac{m}{\sqrt{2}}$. \label{Dirichlet} \end{lem} \begin{proof} Decompose the plane into $8$ equal sections according to the angle. For $1 \leq i \leq 8$: \[ V_i = \{ (r,\theta) \in \mathbb{R}^2 | (i-1)\frac{2\pi}{8} \leq \theta < i \frac{2\pi}{8} \}. \] For each $1 \leq k \leq 8^m+1$ we code the regions in which the points $z_1^k, ... , z_m^k$ lie with a vector \[A_k = (a_1 , ... , a_m) \] where $1 \leq a_i \leq 8$. By the pigeonhole principle there are distinct indices $1 \leq i,j \leq 8^m+1$ such that $A_{i} = A_j$. Therefore \[ A_{|j-i|} = (b_1 , ... , b_m)\] where $b_{\ell} \in \{ 1,8\} $ for each $1 \leq \ell \leq m$. This implies that for $\nu = |j-i|$ \[ Re(S_{\nu}) \geq \frac{1}{\sqrt{2}} \sum_{j=1}^{m}|z_j|^{\nu}. \] The conclusion of the second part of the lemma is obtained by using the AM-GM inequality: \[\frac{ |z_1|^{\nu}+...+|z_m|^{\nu}}{m} \geq \sqrt[m]{|z_1 ... z_m|^{\nu}}=1. \] \end{proof} \noindent For any real matrix $A$, we use the notation $\rho(A)$ for the spectral radius of $A$, i.e. the largest absolute value of its eigenvalues. The next Proposition is the main technical result that has been used in this paper. \begin{prop} Fix $B>0$ and $\epsilon >0$. There exist $n=n(B,\epsilon)$ such that for any $m \geq n$ and any $A \in SL(m,\mathbb{Z})$ we have the following:\\ There is some $\nu$, $1 \leq \nu \leq m^{2+\epsilon}$ such that \[Tr(A^\nu)>B. \] \label{main} \end{prop} \noindent Proposition \ref{main} obviously follows from the combination of Propositions \ref{case1} and \ref{case2}. \begin{prop} Fix $B>0$ and $\epsilon >0$. There exist $n=n(B,\epsilon)$ such that for any $m \geq n$ and any $A \in SL(m,\mathbb{Z})$ with $\rho(A)>1$ we have the following:\\ There is some $\nu$, $1 \leq \nu \leq m^{1+\epsilon}$ such that \[Tr(A^{\nu})>B. \] \label{case1} \end{prop} \begin{prop} Fix $B>0$ and $\epsilon >0$. There exist $n=n(B,\epsilon)$ such that for any $m \geq n$ and any $A \in SL(m,\mathbb{Z})$ with $\rho(A)=1$ we have the following:\\ There is some $\nu$, $1 \leq \nu \leq m^{2+\epsilon}$ such that \[Tr(A^\nu)>B. \] \label{case2} \end{prop} \noindent \textbf{Proof of Proposition \ref{case1}} \begin{proof} Recall the following theorems of Dobrowolsky \cite{dobrowolski1979question} and Schinzel-Zassenhaus \cite{schinzel1965refinement} \newtheorem*{thm:Dobrowolsky}{Theorem \ref{thm:Dobrowolsky}} \begin{thm:Dobrowolsky}(Dobrowolsky) Let $\lambda$ be an algebraic integer of degree $d$ and define $\overline{|\lambda|}$ to be the maximum modulus between all Galois conjugates of $\lambda$, including itself. For large enough $d$ if $\lambda$ is not a root of unity then \[ \overline{|\lambda|} \geq 1+\frac{1}{d} \bigg( \frac{\log\log(d)}{\log(d)} \bigg)^3. \] \end{thm:Dobrowolsky} \newtheorem*{thm:Schinzel}{Theorem \ref{thm:Schinzel}} \begin{thm:Schinzel}(Schinzel-Zassenhaus) If an algebraic integer $\lambda \neq 0$ is not a root of unity, and if $2s$ among its conjugates have nonzero imaginary part, then \[ \overline{|\lambda|} > 1+4^{-s-2}. \] \end{thm:Schinzel} \noindent The two theorems together imply that there exist a constant $c>0$ such that for all $d$ \[\overline{|\lambda|} \geq 1 + \frac{c}{d\smallskip\log(d)^3} \hspace{10mm}(*) \] This is because by Dobrowolsky's theorem one can take $c=1$ for large $d$, say for $d \geq M$. For the finitely many remaining values of $2 \leq d < M$ one can take $c = 4^{-M-2}$. Hence, in general $c= \min\left\{1, 4^{-M-2}\right\}$ works.\\ Let $\lambda_1, ... , \lambda_m$ be the eigenvalues of $A$ with $\lambda_1$ having the maximum modulus between them. Therefore $|\lambda_1| > 1$. By the previous discussion we have the following: \[ |\lambda_1| \geq 1 + \frac{c}{d\smallskip\log(d)^3} \geq 1 + \frac{c}{m\log(m)^3} \smallskip.\] Define $z_j = \frac{\lambda_j}{|\lambda_1|}$. Hence $z_1 , ... , z_m$ are complex numbers with $\max |z_j|=1$. Define \[ S_{\nu} = z_1^{\nu} +...+ z_m^{\nu}. \] In particular $S_{\nu}$ is always a real number by Newton identities. Set $K_0 = 20(\dfrac{B}{c}) \left(m \log(m)^3\right)$, where $c$ is the constant in $(*)$. Set $K = m^{1+ \epsilon}$. Note that for $m \gg 0$ we have $K \geq 5(m+2BK_0)$. We consider two cases\\ \noindent 1) There exists $1 \leq \nu \leq K_0 $ such that $S_{\nu} > B$. Then \[ Tr(A^{\nu}) = |\lambda_1|^{\nu}\smallskip S_{\nu} \geq S_{\nu} >B. \] \noindent 2) For each $1 \leq \nu \leq K_0$ we have $S_{\nu} \leq B$. The proof in this case follows the lines of the proof of Cassel's theorem \cite{montgomery1994ten}. Let $P(z)= \frac{1}{2}+ \sum_{\nu=1}^{K} (1- \frac{\nu}{K+1})z^{\nu}$. Then $Re(P(z)) \geq 0$ whenever $|z| \leq 1$ by the properties of the Fejer kernel. Let $z_j = r_j e(\theta _j) := r_j e^{2\pi i \theta_j}$. We have the following \[ \sum_{\nu = 1}^{K} (1-\frac{\nu}{K+1})(1+\cos 2\pi \nu \theta_1)Re(S_{\nu}) = \sum_{j=1}^{m} \sum_{\nu=1}^{K}(1-\frac{\nu}{K+1})r_j^{\nu}(1+\cos 2\pi \nu \theta_1)\cos 2 \pi \nu \theta_j \] \[ = \sum_{j=1}^{m} Re[P(z_j)+ \frac{1}{2}P(r_j e(\theta_j - \theta _1))+ \frac{1}{2}P(r_j e(\theta_j + \theta_1))-1] \] Since $P(r_1)=P(1)=\frac{K+1}{2}$, we obtain that the above is \[ \geq \frac{K+1}{4} - m .\] Now we have the following estimate \[ \sum_{\nu = K_0}^{K} (1-\frac{\nu}{K+1})(1+\cos 2\pi \nu \theta_1)Re(S_{\nu}) = \] \[ \sum_{\nu = 1}^{K} (1-\frac{\nu}{K+1})(1+\cos 2\pi \nu \theta_1)Re(S_{\nu})- \sum_{\nu = 1}^{K_0-1} (1-\frac{\nu}{K+1})(1+\cos 2\pi \nu \theta_1)Re(S_{\nu})\] \[ \geq \frac{K+1}{4} -m - 2BK_0. \] On the other hand we have \[ \sum_{\nu = K_0}^{K}(1-\frac{\nu}{K+1})(1+\cos 2 \pi \nu \theta_1) \leq K.\] Therefore, there exist $K_0 \leq \nu \leq K $ such that \[ S_{\nu} \geq \frac{\frac{K+1}{4}-m-2BK_0}{K} > \frac{1}{4}-\frac{m+2BK_0}{K} \geq \frac{1}{4}-\frac{1}{5}=\frac{1}{20}. \] Now using $(*)$, we have \[ Tr(A^{\nu})=|\lambda_1|^{\nu} \smallskip S_{\nu} \geq (1+\frac{c}{m\log(m)^3})^{K_0} \times \frac{1}{20} \] \[ \geq (1+\frac{cK_0}{m\log(m)^3}) \times \frac{1}{20} >B. \] \end{proof} \begin{remark} It follows from the proof that conditional on the Schinzel-Zassenhaus conjecture \cite{schinzel1965refinement}, one can replace the upper bound $m^{1+\epsilon}$ for $\nu$, in Proposition \ref{case1}, by a linear bound (with linear constant just depending on $B$). \end{remark} \newtheorem*{conj:Schinzel}{Conjecture \ref{conj:Schinzel}} \begin{conj:Schinzel}(Schinzel-Zassenhaus) There exists a constant $c>0$ such that for any algebraic integer $\lambda$ of degree $d$ which is not a root of unity we have \[ \overline{|\lambda|} \geq 1+\frac{c}{d}. \] \end{conj:Schinzel} \noindent We need the next Lemma from \cite{montgomery1994ten} for the proof of Proposition \ref{case2}. \begin{lem} Let $z_1 , ... , z_n$ be all the roots of a polynomial with real coefficients. Define \[S_{\nu} = z_1^{\nu}+ ... + z_n^{\nu} \] Then $S_{\nu} \geq 0$ for some integer $\nu$ in the range $1 \leq \nu \leq n+1$. \label{lastlemma} \end{lem} \begin{proof} We closely follow the proof from \cite{montgomery1994ten}. Let $\sigma_j$ be the $j$-th elementary symmetric function of $z_1 , ... , z_n$. Therefore, $\sigma_j$ is real for each $1 \leq j \leq n$. Recall the Newton-Girard identities \[ r \sigma_r = \sum_{\nu=1}^{r}(-1)^{\nu -1}\sigma_{r-\nu} S_{\nu} \] for $1 \leq r \leq n$. Suppose that $S_{\nu}<0$ for $1 \leq \nu \leq n$. Using Newton-Girard identities and induction we deduce that $(-1)^j \sigma_j >0$ for $1 \leq j \leq n$. On the other hand, another set of Newton-Girard identities state that \[ S_{t+n+1} = \sum_{\nu=t+1}^{t+n}S_{\nu}\medskip(-1)^{t+n-\nu}\sigma_{t+n+1-\nu} \] for $t \geq 0$. Putting $t=0$ we see that $S_{\nu}<0$ and $(-1)^{n-\nu}\sigma_{n+1-\nu}<0$ for $1 \leq \nu \leq n$, therefore all summands on the right hand side are positive. Hence $S_{\nu +1}>0$. \\ \end{proof} \noindent \textbf{Proof of Proposition \ref{case2}} \begin{proof} Let $Q(z)$ be the characteristic polynomial of $A$. By the assumption, all roots of $Q$ have absolute value at most one. Recall the following theorem of Kronecker: \noindent Let $f$ be a monic polynomial with integer coefficients in $z$. If all roots of $f$ have absolute value at most $1$ then $f$ is a product of cyclotomic polynomials and/or a power of $z$.\\ Here, there can not be any power of $z$, since $Q(0)=\det(A)=1$. So we can write $Q$ as \[ Q(z)=\prod_{j=1}^{l} \Phi_{k_j}(z) \] where $\Phi_{k_j}(z)$ is the $k_j$-th cyclotomic polynomial and $k_1 \leq k_2 \leq ... \leq k_l$ are natural numbers. In particular, by comparing the degrees we deduce that \[ \varphi(k_1)+ ... + \varphi(k_l)=m \] where $\varphi$ is the Euler totient function. Take $B'$ such that for each $t>B'$ we have $\varphi(t)>B$. This is possible since $\lim _{t \rightarrow \infty} \varphi (t)=\infty$. In fact more is true. For any $\delta >0$ we have (see \cite{hardy1979introduction} Theorem 327) \[ \lim_{t \rightarrow \infty} \frac{\varphi(t)}{t^{1-\delta}}=\infty. \] \noindent Firstly, we specify how large $m$ should be. We require that $m > (B')!$. Moreover assume that $m$ is large enough so that for each $t \geq m^{1+\epsilon}$ we have $\varphi(t) > m+B$. Note that $k_l < m^{1+ \epsilon}$ since $\varphi(k_l) \leq m$. We consider two cases\\ \noindent 1) $B' < k_l < m^{1+\epsilon}$. Let $g(z)$ be the polynomial whose roots are the $k_l$-powers of the roots of $\frac{Q(z)}{\Phi_{k_l}(z)}$ allowing repetitions. Hence, $g$ has integer coefficients by Newton's identities and $\deg(g) < m$. Let $\nu = k_l \cdot \nu' $ where $1 \leq \nu ' \leq m$ is chosen such that the sum, $S$, of the $\nu'$-powers of the roots of $g$ is non-negative. Such a $\nu '$ exists by Lemma \ref{lastlemma}. \noindent Since $\varphi (k_l) > B$ we have the following \[ Tr(A^{\nu}) = \varphi(k_l)+ S > B. \] \noindent Note that $\nu = k_l \cdot \nu'$ is at most $ m^{1+\epsilon} \cdot m = m^{2+\epsilon}. $\\ \noindent 2) $k_l \leq B'$. Take $\nu = (B')!$. Then \[ Tr(A^{\nu}) = m > B. \] This completes the proof. \end{proof} \begin{conj} There is a constant $C>0$ such that for all $g \geq 2$ and $n \geq 0$ we have the following: \[ \l_{g,n} \geq \frac{C}{g} \medskip \frac{\log(|\chi(S)|)}{|\chi(S)|}. \] \end{conj} \noindent It seems plausible to prove the above conjecture by improving Propositions \ref{case1} and \ref{case2}. As mentioned previously in a remark, the bound in Proposition \ref{case1} can be replaced by a linear bound conditional on the Schinzel-Zassenhaus conjecture. We expect a similar linear bound to be true in Proposition \ref{case2}, however we do not know how to prove it.
{ "redpajama_set_name": "RedPajamaArXiv" }
7,443
using mozc::dictionary::SuffixToken; namespace mozc { namespace oss { namespace { // kLidGroup[] is defined in the following automatically generated header file. #include "data_manager/oss/pos_group_data.h" } // namespace const uint8 *OssDataManager::GetPosGroupData() const { DCHECK(kLidGroup != NULL); return kLidGroup; } namespace { #ifdef MOZC_USE_SEPARATE_CONNECTION_DATA const char *kConnectionData_data = NULL; const size_t kConnectionData_size = 0; #else // MOZC_USE_SEPARATE_CONNECTION_DATA // Automatically generated header containing the definitions of // kConnectionData_data and kConnectionData_size. We don't embed it when // connection data is supplied from outside. #include "data_manager/oss/embedded_connection_data.h" #endif // MOZC_USE_SEPARATE_CONNECTION_DATA char *g_connection_data_address = const_cast<char *>(kConnectionData_data); int g_connection_data_size = kConnectionData_size; } // namespace #ifdef MOZC_USE_SEPARATE_CONNECTION_DATA void OssDataManager::SetConnectionData(void *address, size_t size) { g_connection_data_address = reinterpret_cast<char *>(address); g_connection_data_size = size; DCHECK(g_connection_data_address); DCHECK_GT(g_connection_data_size, 0); } #endif // MOZC_USE_SEPARATE_CONNECTION_DATA void OssDataManager::GetConnectorData(const char **data, size_t *size) const { #ifdef MOZC_USE_SEPARATE_CONNECTION_DATA if (!g_connection_data_address || g_connection_data_size == 0) { LOG(FATAL) << "Connection data is not yet set."; CHECK(false); } #endif *data = g_connection_data_address; *size = g_connection_data_size; } namespace { #ifdef MOZC_USE_SEPARATE_DICTIONARY const char *kDictionaryData_data = NULL; const size_t kDictionaryData_size = 0; #else // MOZC_USE_SEPARATE_DICTIONARY // Automatically generated header containing the definitions of // kDictionaryData_data[] and kDictionaryData_size. #include "data_manager/oss/embedded_dictionary_data.h" #endif // MOZC_USE_SEPARATE_DICTIONARY char *g_dictionary_address = const_cast<char *>(kDictionaryData_data); int g_dictionary_size = kDictionaryData_size; } // namespace void OssDataManager::GetSystemDictionaryData( const char **data, int *size) const { *data = g_dictionary_address; *size = g_dictionary_size; } #ifdef MOZC_USE_SEPARATE_DICTIONARY void OssDataManager::SetDictionaryData(void *address, size_t size) { g_dictionary_address = reinterpret_cast<char *>(address); g_dictionary_size = size; DCHECK(g_dictionary_address); DCHECK_GT(g_dictionary_size, 0); } #endif // MOZC_USE_SEPARATE_DICTIONARY namespace { // Automatically generated headers containing data set for segmenter. #include "data_manager/oss/boundary_data.h" #include "data_manager/oss/segmenter_data.h" } // namespace void OssDataManager::GetSegmenterData( size_t *l_num_elements, size_t *r_num_elements, const uint16 **l_table, const uint16 **r_table, size_t *bitarray_num_bytes, const char **bitarray_data, const BoundaryData **boundary_data) const { *l_num_elements = kCompressedLSize; *r_num_elements = kCompressedRSize; *l_table = kCompressedLIDTable; *r_table = kCompressedRIDTable; *bitarray_num_bytes = kSegmenterBitArrayData_size; *bitarray_data = kSegmenterBitArrayData_data; *boundary_data = kBoundaryData; } namespace { // The generated header defines kSuffixTokens[]. #include "data_manager/oss/suffix_data.h" } // namespace void OssDataManager::GetSuffixDictionaryData(const SuffixToken **tokens, size_t *size) const { *tokens = kSuffixTokens; *size = arraysize(kSuffixTokens); } namespace { // Include kReadingCorrections. #include "data_manager/oss/reading_correction_data.h" } // namespace void OssDataManager::GetReadingCorrectionData( const ReadingCorrectionItem **array, size_t *size) const { *array = kReadingCorrections; *size = arraysize(kReadingCorrections); } namespace { #ifdef MOZC_USE_SEPARATE_COLLOCATION_DATA namespace CollocationData { const char *kExistenceFilter_data = NULL; const size_t kExistenceFilter_size = 0; } // namespace CollocationData #else // MOZC_USE_SEPARATE_COLLOCATION_DATA // Include CollocationData::kExistenceFilter_data and // CollocationData::kExistenceFilter_size. #include "data_manager/oss/embedded_collocation_data.h" #endif // MOZC_USE_SEPARATE_COLLOCATION_DATA char *g_collocation_data_address = const_cast<char *>(CollocationData::kExistenceFilter_data); int g_collocation_data_size = CollocationData::kExistenceFilter_size; // Include CollocationSuppressionData::kExistenceFilter_data and // CollocationSuppressionData::kExistenceFilter_size. #include "data_manager/oss/embedded_collocation_suppression_data.h" } // namespace #ifdef MOZC_USE_SEPARATE_COLLOCATION_DATA void OssDataManager::SetCollocationData(void *address, size_t size) { g_collocation_data_address = reinterpret_cast<char *>(address); g_collocation_data_size = size; DCHECK(g_collocation_data_address); DCHECK_GT(g_collocation_data_size, 0); } #endif // MOZC_USE_SEPARATE_COLLOCATION_DATA void OssDataManager::GetCollocationData(const char **array, size_t *size) const { #ifdef MOZC_USE_SEPARATE_COLLOCATION_DATA if (!g_collocation_data_address || g_collocation_data_size == 0) { LOG(FATAL) << "Collocation data is not yet set."; } #endif // MOZC_USE_SEPARATE_COLLOCATION_DATA *array = g_collocation_data_address; *size = g_collocation_data_size; } void OssDataManager::GetCollocationSuppressionData(const char **array, size_t *size) const { *array = CollocationSuppressionData::kExistenceFilter_data; *size = CollocationSuppressionData::kExistenceFilter_size; } namespace { // Include kSuggestionFilterData_data and kSuggestionFilterData_size. #include "data_manager/oss/suggestion_filter_data.h" } // namespace void OssDataManager::GetSuggestionFilterData(const char **data, size_t *size) const { *data = kSuggestionFilterData_data; *size = kSuggestionFilterData_size; } namespace { // Include kSymbolData_token_data and kSymbolData_token_size. #include "data_manager/oss/symbol_rewriter_data.h" } // namespace void OssDataManager::GetSymbolRewriterData( const EmbeddedDictionary::Token **data, size_t *size) const { *data = kSymbolData_token_data; *size = kSymbolData_token_size; } #ifndef NO_USAGE_REWRITER namespace { #include "rewriter/usage_rewriter_data.h" } // namespace void OssDataManager::GetUsageRewriterData( const ConjugationSuffix **base_conjugation_suffix, const ConjugationSuffix **conjugation_suffix_data, const int **conjugation_suffix_data_index, const UsageDictItem **usage_data_value) const { *base_conjugation_suffix = kBaseConjugationSuffix; *conjugation_suffix_data = kConjugationSuffixData; *conjugation_suffix_data_index = kConjugationSuffixDataIndex; *usage_data_value = kUsageData_value; } #endif // NO_USAGE_REWRITER namespace { #include "data_manager/oss/counter_suffix_data.h" } // namespace void OssDataManager::GetCounterSuffixSortedArray( const CounterSuffixEntry **array, size_t *size) const { *array = kCounterSuffixes; *size = arraysize(kCounterSuffixes); } } // namespace oss } // namespace mozc
{ "redpajama_set_name": "RedPajamaGithub" }
1,254
Barry says his home ward, which is almost entirely African American and has extremely high poverty and unemployment rates, has long been the city's "dumping ground." Making it more diverse will likely involve expanding across the Anacostia into the whiter, richer Southwest, Barry said. "We can't go south," Barry joked. Since the population of Ward 6 after the 2010 Census is counted will most likely not be enough on its own to account for 1/8th the population of the District (as mandated by law), other areas will need to be added to the ward. It was mentioned in the article that Ward 6 Council member Tommy Wells would like to add the Anacostia neighborhood to Ward 6 - taking away some turf from Barry. The District will receive Census results in the spring, so it should be an interesting time ahead in the DC Council chambers! As a Ward 6 resident, I would prefer to move into Ward 2 as I believe in Ward 6 we will always be the underdog to Capitol Hill. We are more of a true downtown neighborhood than the Hill. I don't see any advantage of being in Ward 2 compared to 6. We'd be underdogs to Dupont and G'town in Ward 2. Southwest is a true urban environment and has more in common with downtown living and DuPont than Capitol Hill. Maybe our Councilman would "get" our urban issues more than Tommy Wells. An example is the closing of one lane of traffic and the sidewalks around the L'Enfant Plaza metro station for so long, an urban-focused Councilman might have better understood how disruptive that was. I'm pretty sure you are not going to get a more urban focused CM than Tommy Wells. Though I'm not sure I know what you're referring to when it comes to closing down lanes around L'Enfant Metro. Stephen: When what is now called Constitution Square was remodeled recently, the entire block bounded by 7th, D, 6th, and E Streets SW lost one lane of traffic and the sidewalks on all four sides of the building for almost two years. In most major cities, the contractor would have been able to close only one or two streets at a time as they worked their way around the building, and the sidewalks would have had wooden "tunnels" built so pedestrian traffic could be maintained. This, at a minimum, should have been done along the east side of 7th Street so SW residents walking to the Metro would not have had to cross the street twice needlessly, which was particulary dangerous since one lane of closed traffic was causing drivers frustrations. L'Enfant Plaza is in Ward 2 and SW residents are in Ward 6 so it really wasn't any Councilman's problem to own. I agree Tommy Wells is a fine Councilman, but Capitol Hill is not an urban environment in the sense that Southwest is. I would call Capitol Hill more of an urban village, with primarily single family homes and a relatively high degree of families with children, while we in Southwest are more true "downtown" metropolitan dwellers in the sense of Penn Quarter and DuPont. The question isn't Ward 2 or Ward 6. There has been enough population growth in NoMa, Mt Vernon Triangle, 14th & U, and other areas in near NW over the past decade that both Wards 2 and 6 will be pulled north. So the only question for SW Waterfront will be Ward 6 or Ward 8. Would you rather be represented by Tommy Wells or Marion Barry? If we're in Ward 8, can we vote Barry out? No, we can't vote Barry out, there aren't nearly enough of us. From what I've read the real problem is Ward 8's shrinking population. unless they can take territory from Ward 7, I don't see any options but Ward 6, and we would be the likely victims as SW is the most contingent to Ward 8 and the most distant corner of Ward 6. We are doomed, in another words. Yes, I think the real issue here is not ward 2 or 6 (though, I hadn't realized parking was tied to this...I do kind of like being in zone for parking on the Hill). But the truly distressing thought is having Barry as city council rep. I read this article the same day I read the one about the possibility of a tax rise in DC to cover the budget shortfall and recall thinking that as committed an urbanite as I am, raising taxes AND sticking me with Barry could be exactly what it takes to make me move back to Maryland, which I grew up in and find a perfectly nice state. Please, anybody but Marion Barry as my CM. I'm happy with Tommy Wells. Do we not get any say in this? Is a majority of the population's approval not required for a Ward to be re-districted? We shouldn't end up voting for one CM only to be represented by another without our consent! "Though every councilmember will get a chance to vote on the final plan, the councilmember who gets to head the committee that draws up the first draft of a redistricting plan will likely have a bigger say in what the next map looks like. So far, Almost Council Chairman Kwame Brown has been mum on future committee appointments (except to say that Barry, who was stripped of a committee chairmanship by the current council, would get some kind of post back). There's some speculation that one of the four at-large councilmembers, who wouldn't have any dog in a ward boundary fight, will be picked to head the redistricting effort. But whether anyone wants the job is another story. A politically adept councilmember might be able to use the committee to score political points, but most likely whomever gets the gig will find it a pretty thankless task that is a surefire way to make enemies." I think Ward 8 is in our future, like it or not. It may also be too early to do anything, as the census numbers aren't even due until next month. Tommy also said Ward 6's voter participation rate is now the second-highest in District---good for us! I spotted Marion Barry in the new Safeway Friday evening. Is he here to pee on and mark "his" new territory. Please help! We can't let this buffoon have anything to do with SW. I am strongly opposed to having anything to do with Marion Barry. Period.
{ "redpajama_set_name": "RedPajamaC4" }
9,852
Q: python - How do I properly use asyncio and read csv with pandas I have many csv files in the path , and I hope to use pandas read_csv to read , then using pandas.concat to merge all return dataframe , but I think I do not use asyncio properly , because consumption of time did not shorten. import asyncio import time import pandas as pd import glob2 import os async def read_csv(filename): df = pd.read_csv(filename, header=None) return df t = time.time() path = r'C:\LRM_STGY_REPO\IB_IN' tasks = [asyncio.ensure_future(read_csv(i)) for i in list(glob2.iglob(os.path.join(path, "*.txt")))] loop = asyncio.get_event_loop() loop.run_until_complete(asyncio.wait(tasks)) df = pd.concat([t.result() for t in tasks],ignore_index=True) # print(df) print( '%.4f' %(time.time()-t)) t = time.time() def read_csv2(filename): return pd.read_csv(filename, header=None) df = pd.concat(map(read_csv2,glob2.iglob(os.path.join(path, "*.txt"))),ignore_index=True) # print(df) print( '%.4f' %(time.time()-t)) read_csv and read_csv2 have similar consumption time. Or there are other ways to reduce the concat time .
{ "redpajama_set_name": "RedPajamaStackExchange" }
4,011
\section{Introduction} \label{eq:intro} Since the experimental observation of skyrmions in ferromagnetic materials with the Dzyaloshinskii-Moriya (DM) interaction, a substantial amount of work has been devoted to their statics and dynamics \cite{EverschorMasellReeveKlaeui_JAP2018}. Chiral skyrmions are topological solitons that have the same topological features as magnetic bubbles \cite{MalozemoffSlonczewski}, but the detailed features of the chiral skyrmion profile are specific to it \cite{BogdanovHubert_JMMM1994,KomineasMelcherVenakides_NL2020}. Most work has largely focused on the axially symmetric chiral skyrmion predicted in Refs.~\cite{BogdanovYablonskii_JETP1989,BogdanovHubert_JMMM1994}. Different crystal symmetries give rise to various types of DM interactions and these, in turn, define the kinds of skyrmions that can be stabilised \cite{BogdanovYablonskii_JETP1989,HoffmannMelcherBluegel_NatComm2017}. In Ref.~\cite{NayakKumarParkin_Nature2017} the observation of antiskyrmions has been reported, that is, skyrmions that have a winding number opposite to that of the standard axially-symmetric skyrmions. The dynamics of skyrmions is linked to their topology \cite{KomineasPapanicolaou_PRB2015a,PapanicolaouTomaras_NPB1991}, specifically, it depends on the topological number, usually called the skyrmion number $Q$. Skyrmions with $Q \neq 0$, such as the axially symmetric skyrmions or the antiskyrmions with $Q = \pm 1$, are called {\it topological}, while skyrmionic textures with $Q=0$ are called {\it topologically trivial}. Topological, $Q \neq 0$, skyrmions are spontaneously pinned in a ferromagnetic film \cite{PapanicolaouTomaras_NPB1991,KomineasPapanicolaou_PhysD1996}. By contrast, topologically trivial magnetic solitons propagate freely as Newtonian particles. An example is provided by the skyrmionium \cite{BogdanovHubert_JMMM1999,LeonovMostovoy_EPJ2013,KomineasPapanicolaou_PRB2015a}, an axially-symmetric skyrmionic texture with trivial topology. We will study $Q=0$ solitons that can be constructed as skyrmion-antiskyrmion pairs. In Ref.~\cite{JagannathGobelParkin_NatComm2020}, observations of topologically trivial objects in the form of skyrmion-antiskyrmion pairs in a DM material are reported. In Ref.~\cite{RozsaPalotas_PRB2017}, $Q=0$ textures are found numerically within a model with frustrated isotropic exchange and DM interaction and they are called ``chimera skyrmions'' due to the coexistence of skyrmion and antiskyrmion parts. We find numerically, within a model with DM interaction, a skyrmionic texture with $Q=0$ that has the features of a skyrmion-antiskyrmion pair. This is an asymmetric configuration and its shape resembles that of a liquid droplet. The skyrmion part occupies a much larger area than the antiskyrmion part. We find that a static droplet is a stable structure in a ferromagnetic infinite film as well as in a disc-shaped element, within appropriate ranges of the model parameters. In the case of a film, we study the dynamics of the skyrmion-antiskyrmion pair under in-plane spin-polarized current. This is traveling in the direction of the current and presents Newtonian dynamics and no Magnus force effect. The outline of the paper is as follows. Sec.~\ref{sec:formulation} defines the model and the notions used in the paper. Sec.~\ref{sec:droplet} presents the static solutions for skyrmion-antiskyrmion pairs in an infinite film. Sec.~\ref{sec:spinTorque} studies the dynamical behavior of a skyrmion-antiskyrmion pair under spin-polarized current. Sec.~\ref{sec:dropInDot} presents skyrmion-antiskyrmion pairs in a disc-shaped particle. Sec.~\ref{sec:conclusions} contains our concluding remarks. \section{Formulation} \label{sec:formulation} We consider a ferromagnetic film with exchange, easy-axis anisotropy perpendicular to the film, and interfacial DM interaction. We denote the saturation magnetization by $M_s$ and the normalized magnetization vector by $\bm{m} = (m_x,m_y,m_z)$, with $\bm{m}^2=1$. The magnetic energy is \begin{equation} \label{eq:energy0} \begin{split} E & = A \int \left[ (\partial_x \bm{m})^2 + (\partial_y\bm{m})^2 \right] \mathrm{d}V + K \int (1-m_z^2)\, \mathrm{d}V \\ & + D \int \left[ \bm{\hat{e}}_x\cdot (\partial_y\bm{m}\times\bm{m}) - \bm{\hat{e}}_y\cdot(\partial_x\bm{m}\times\bm{m}) \right]\, \mathrm{d}V \\ & - \frac{1}{2}\mu_0 M_s^2 \int \hm\cdot\bm{m}\,\mathrm{d}V \end{split} \end{equation} where $\bm{\hat{e}}_x, \bm{\hat{e}}_y, \bm{\hat{e}}_z$ denote the unit vectors in the respective directions, $A$ is the exchange parameter, $K$ is the easy-axis anisotropy parameter, $D$ is the DM parameter, and $\hm$ is the magnetostatic field normalized to the saturation magnetization. We will consider a thin film where the magnetostatic field is approximated as an easy-plane anisotropy term, $\hm \approx -m_z\bm{\hat{e}}_z$. Taking into account this approximation, we define the non-local part $\hm'$ of the magnetostatic field $\hm$ via the relation \begin{equation} \label{eq:hmprime} \hm = -m_z\bm{\hat{e}}_z + \hm', \end{equation} and note that $\hm'$ tends to zero for very thin films. Substituting Eq.~\eqref{eq:hmprime} in Eq.~\eqref{eq:energy0}, gives rise to an effective anisotropy parameter (that includes the local part of the magnetostatic field) \begin{equation} \label{eq:Keff} \Keff = K-\frac{1}{2}\mu_0 M_s^2. \end{equation} The statics and dynamics of the magnetization vector are described by the Landau-Lifshitz equation derived from the energy in Eq.~\eqref{eq:energy0}. Including a Gilbert damping term, we have \begin{equation} \label{eq:LLG0} \partial_t\bm{m} = -\gamma \bm{m}\times\bm{F} + \alpha\,\bm{m}\times \partial_t\bm{m} \end{equation} where $\alpha$ is the damping parameter. The effective field is defined by \begin{equation} \label{eq:effectiveField_def} \bm{F} = -\frac{1}{M_s}\frac{\delta E}{\delta \bm{m}} \end{equation} and it has the form \begin{equation} \label{eq:effectiveField0} \begin{split} \bm{F} = \mu_0 M_s & \left[ \frac{2A}{\mu_0 M_s^2} \Delta\bm{m} + \frac{2\Keff}{\mu_0 M_s^2} m_z \bm{\hat{e}}_z \right. \\ & \left. + \frac{2D}{\mu_0 M_s^2} \left( \bm{\hat{e}}_y\times\partial_x\bm{m} - \bm{\hat{e}}_x\times\partial_y\bm{m} \right) + \hm' \right]. \end{split} \end{equation} It is instructive to write the dimensionless form of Eq.~\eqref{eq:LLG0}. Using $\ell_{\rm w}=\sqrt{A/\Keff}$ as the unit of length, we obtain the dimensionless Landau-Lifshitz-Gilbert (LLG) equation \begin{equation} \label{eq:LLG} \partial_\tau\bm{m} = -\bm{m} \times \bm{f} + \alpha \bm{m}\times\partial_\tau\bm{m} \end{equation} where \begin{equation} \label{eq:effectiveField} \bm{f} = \Delta\bm{m} + m_z \bm{\hat{e}}_z + 2\epsilon \left( \bm{\hat{e}}_y\times\partial_x\bm{m} - \bm{\hat{e}}_x\times\partial_y\bm{m} \right) + \frac{\hm'}{\kappa} \end{equation} and we have introduced the dimensionless DM parameter \begin{equation} \label{eq:parameterDMI} \epsilon = \frac{D}{2\sqrt{A\Keff}} \end{equation} and the dimensionless anisotropy parameter \begin{equation} \label{eq:anisotropy} \kappa = \frac{2\Keff}{\mu_0 M_s^2}. \end{equation} Eq.~\eqref{eq:effectiveField} indicates that varying $\kappa$ amounts to tuning the effect of the magnetostatic field. The normalized time variable in Eq.~\eqref{eq:LLG} is \begin{equation} \label{eq:time} \tau=\frac{t}{\tau_0},\qquad \tau_0 = \frac{1}{\gamma\mu_0 M_s\kappa}. \end{equation} When we set $\hm'=0$ in model \eqref{eq:effectiveField}, the ground state is the spiral for $\epsilon > 2/\pi \approx 0.637$ while we have the ferromagnetic state for $\epsilon < 2/\pi$ \cite{BogdanovHubert_JMMM1994}. Skyrmions with axial symmetry are stable excited states on the ferromagnetic state. All magnetic configurations are characterised by the skyrmion number defined as \begin{equation} \label{eq:skyrmionNumber} Q = \frac{1}{4\pi} \int q\, \mathrm{d}x\mathrm{d}y,\qquad q = \bm{m}\cdot(\partial_y\bm{m}\times \partial_x\bm{m}) \end{equation} where $q$ is a {\it topological density}. The skyrmion number $Q$ is integer-valued for all magnetic configurations with a constant $\bm{m}$ at spatial infinity. For definiteness, we assume $\bm{m}=(0,0,1)$ at spatial infinity in all our calculations. Skyrmion configurations often have a simple representation when we use the stereographic projection of the magnetization, defined by \begin{equation} \Omega = \frac{m_x+i m_y}{1+m_z}. \end{equation} For the model with the exchange interaction only (pure exchange model), an axially symmetric skyrmion solution of unit degree, $Q=1$, is given by \cite{BelavinPolyakov_JETP1975} \begin{equation} \label{eq:BPskyrmion} \Omega = \frac{a}{\rho}\, e^{i\phi} = \frac{a}{x-iy}, \end{equation} where $(\rho, \phi)$ are polar coordinates and $a$ is an arbitrary constant giving the skyrmion radius. The solution of the pure exchange model \begin{equation} \label{eq:BPantiskyrmion} \Omega = \frac{a}{\rho}\, e^{-i\phi} = \frac{a}{x+iy} \end{equation} presents opposite winding than solution \eqref{eq:BPskyrmion} as seen in the sign of the complex exponent. Such a configuration has skyrmion number $Q=-1$ and it is called an {\it antiskyrmion}. \section{Skyrmion-antiskyrmion textures} \label{sec:droplet} \label{sec:dropInFilm} \begin{figure}[t] \begin{center} \includegraphics[width=\columnwidth]{Fig1_initial_mag.pdf} \caption{The magnetization configuration produced by the form \eqref{eq:BP0} representing a skyrmion-antiskyrmion pair. The lower half of this configuration has the features of a skyrmion and the upper half has the features of an antiskyrmion. This is used as an initial condition in the energy relaxation algorithm for finding a static solution of the equation.} \label{fig:BP0} \end{center} \end{figure} We are looking for solutions of model \eqref{eq:LLG} with skyrmion number $Q=0$. An ansatz for a $Q=0$ configuration is conveniently given in terms of the stereographic variable as \begin{equation} \label{eq:BP0} \Omega = \frac{a}{x + i |y|} \end{equation} where $a$ is an arbitrary constant. The magnetization configuration produced by the form \eqref{eq:BP0} is shown in Fig.~\ref{fig:BP0}. Half of this configuration has the features of a skyrmion similar to the form \eqref{eq:BPskyrmion}, and the other half has the features of an antiskyrmion similar to the form \eqref{eq:BPantiskyrmion}. Such a configuration may be called a {\it skyrmion-antiskyrmion} pair. We perform numerical simulations using \textit{Mumax3} \cite{VansteenkisteWaeyenberge_AIPadv2014}. We use the form \eqref{eq:BP0} as an initial condition and apply an energy minimization procedure. First, we use the ``minimize()" function of \textit{Mumax3} that applies a conjugate gradient method for energy minimization. The energy is minimized until the error in the magnetization is smaller than $10^{-5}$ in every micromagnetic cell. We then integrate the Landau-Lifshitz-Gilbert equation without the precession term till either the total energy of the system reaches the numerical noise floor of the simulation or the total simulation time exceeds 10~ns. The above methodology converges to a static skyrmion-antiskyrmion configuration for a narrow range of values of the dimensionless parameters $\epsilon, \kappa$. The convergence of the algorithm also depends on the film thickness. \begin{table}[t] \centering \begin{tabular}{c c} \hline Parameter & Value \\ \hline $M_s$ & $8.38\times 10^5\,{\rm A/m}$ \\ A & $1.1\times 10^{-11}\,{\rm J/m}$ \\ $K (\Keff)$ & $1.193\, (7.518) \times 10^6\,{\rm J/m^3}$ \\ D & $3.5\times 10^{-3}\,{\rm J/m^2}$ \\ \hline \end{tabular} \caption{Values for material parameters used in most of our simulations presented in the figures: $M_s$ is the saturation magnetization, $A$ the exchange parameter, $K$ the easy-axis anisotropy parameter ($\Keff$ includes the local effect of the magnetostatic interaction according to Eq.~\eqref{eq:Keff}), and $D$ the DM parameter.} \label{tab:parameters} \end{table} In most of the numerical simulations presented in the following figures, we use the set of parameter values shown in Table~\ref{tab:parameters}. The resulting values for the length and time scales in Eq.~\eqref{eq:LLG} are $\ell_{\rm w}=3.81\,{\rm nm}$ and $\tau_0 = 5.14\,{\rm ps}$ respectively. The values of the dimensionless parameters are \begin{equation} \label{eq:parameters_values} \epsilon=0.6086,\qquad \kappa=1.704. \end{equation} \begin{figure}[t] \centering \includegraphics[width=\columnwidth]{Fig2_static_droplet.pdf} \caption{(a) A static skyrmion-antiskyrmion droplet with $Q=0$ found numerically for the parameter values given in Table~\ref{tab:parameters} and for film thickness $d_{\rm f}=0.5$~nm. For comparison, a $Q=1$ skyrmion in the same system has radius $\sim 16\,{\rm nm}$ (b) Blow-up of the antiskyrmion part of the configuration, where we show the full resolution of the simulation. The cell size in the simulations is $\rm 0.195~nm\times0.195~nm\times0.5~nm$. The simulated domain in the plane of the film is $400\,{\rm nm}\times400\,{\rm nm}$ and periodic boundary conditions are used. \label{fig:staticDrop}} \end{figure} Fig.~\ref{fig:staticDrop} shows a skyrmion-antiskyrmion configuration which is found as a static solution of the LLG equation \eqref{eq:LLG0}. This resembles, in its overall shape, a droplet of liquid and we will therefore refer to it as {\it a chiral droplet} (or, simply, a droplet). A larger part of the configuration has the features of a skyrmion and a smaller part has the features of an antiskyrmion. It neither is axially-symmetric nor does it have a circular shape. In the example shown in Fig.~\ref{fig:staticDrop}, the antiskyrmion part is at the top part of the droplet, but the configuration can be rotated without changing its energy. Textures with skyrmion and antiskyrmion parts (``chimera skyrmions''), similar to the ones reported here, have been found numerically and studied in Refs.~\cite{RozsaPalotas_PRB2017,PalotasRozsa_PRB2017,RozsaNowak_arXiv2020} within a model with frustrated isotropic exchange interaction and DM interaction. Related to our droplet are also the $Q=0$ magnetic bubbles reported in Ref.~\cite{MoutafisKomineas_PRB2009}. The latter are not chiral, they are stabilized primarily by the magnetostatic interaction and their overall shape is almost circular. Another related structure (also termed a droplet) has been reported in Ref.~\cite{MohseniSaniAkerman_Science2013}, in films without DM interaction, and it is a dynamical configuration exhibiting precession of spins. In the chiral droplet studied here, spin precession does not occur due to the breaking of rotational symmetry in the magnetization space introduced by the chiral interaction. In Ref.~\cite{Cooper_PRL1998}, Skyrmion-antiskyrmion pairs have been studied for a model with exchange interaction only and these were found to be necessarily propagating. \begin{figure}[t] \centering \includegraphics[width=0.9\columnwidth]{Fig3_topo_dens.pdf} \caption{The topological density $q$ for the chiral droplet of Fig.~\ref{fig:staticDrop}. The part where $q<0$ (blue) occupies a very small region compared to the part where $q>0$ (red). The density $q$ takes very large negative values in a small region while, in the region with positive values, $q$ is small. The integrated topological density, or $Q$, is zero. \label{fig:topologicalDensity}} \end{figure} Fig.~\ref{fig:topologicalDensity} shows the distribution of the topological density $q$ defined in Eq.~\eqref{eq:skyrmionNumber} for the skyrmion of Fig.~\ref{fig:staticDrop}. The area of negative topological density is concentrated in the small part of the droplet where the antiskyrmion is located and it takes very high values. The area of positive $q$ is spread over a much larger area around the droplet domain wall and it takes smaller values. We could find stable skyrmion-antiskyrmion droplets only in thin films. For $d_{\rm f}=0.5\,{\rm nm}$, we find a static droplet for values of the dimensionless parameter in a narrow range around $\epsilon=0.61$. Keeping $\epsilon$ constant, we could choose the DM parameter in the range $2.9\,{\rm mJ/m^2} \leq D \leq 6\,{\rm mJ/m^2}$. We could also find stable droplets for film thickness smaller than $d_{\rm f}=0.5\,{\rm nm}$ and for similar parameter values. We could also find a stable droplet for film thickness $d_{\rm f}=1\,{\rm nm}$ for the parameter values $D=6\,{\rm mJ/m^2}, K=26.5\times10^5\,{\rm J/m^2}$ (other parameters as in Table~\ref{tab:parameters}). These values correspond to the dimensionless parameters $\epsilon=0.6086,\,\kappa=5.01$. The droplet is stable for a range of parameter values around the ones given above. We finally note that, as the parameter space is large and hard to explore exhaustively, we cannot exclude that the droplet may also exist for values of the parameters beyond those reported in this paper. The presence of the magnetostatic field is important for the existence of a skyrmion-antiskyrmion droplet. No such solution is found if $\hm$ is not included in the LLG Eq.~\eqref{eq:LLG}. \section{Motion under spin-transfer torque} \label{sec:spinTorque} We probe the dynamics of the $Q=0$ droplet by applying an in-plane current flowing in the magnetic film. We model this system via the LLG equation including spin-transfer torque terms \cite{ZhangLi_PRL2004} \begin{equation} \label{eq:llg_stt_ip} (\partial_t + U_\mu\, \partial_\mu)\bm{m} = -\gamma \bm{m}\times\bm{F} + \bm{m}\times \left( \alpha\partial_t + \beta U_\mu\, \partial_\mu \right) \bm{m} \end{equation} where we have used the notation $x_\mu$ with $\mu=1$ or $2$ for the two coordinates in the film plane. The velocity of the spin-polarized electron flow is $(U_1, U_2)$ and we will consider the two cases $(U_1, U_2)=(U,0)$ and $(U_1, U_2)=(0,U)$, i.e., a current flowing in the $x$ and in the $y$ direction, respectively. The flow velocity $U$ is called the adiabatic spin torque parameter and it is given by \begin{equation} U = \frac{P g\mu_B}{2|e| M_s}\,J_{\rm e} \end{equation} where $J_{\rm e}$ is the current density, $P$ is the degree of polarization, $\mu_B$ is the Bohr magneton, and $g=2$ is the gyromagnetic ratio. The parameter $\beta$, called the degree of adiabaticity, represents the contribution of the non-adiabatic spin torque term relative to the adiabatic one. If we assume rigid translational motion of the droplet with a velocity $\bm{\Vel}=(V_1,V_2)$, i.e., we make the traveling wave ansatz, then we have $\partial_t\bm{m} = -V_\nu \partial_\nu\bm{m}$. We substitute this in Eq.~\eqref{eq:llg_stt_ip}, take the cross product of both sides with $\partial_\lambda\bm{m}$, then contract with $\bm{m}$, and integrate the resulting equations for $\lambda=1, 2$ over all space and set $Q=0$ \cite{KomineasPapanicolaou_PRB2015b}. This obtains that the motion is in the direction of the current flow with velocity \cite{EverschorGarst_PRB2011,KomineasPapanicolaou_PRB2015b} \begin{equation} \label{eq:vel_rigidMotion} \Vel = \frac{\beta}{\alpha} U. \end{equation} Therefore, in a steady state motion, the droplet is not expected to exhibit a component of the motion perpendicular to the current, in contrast to the typical dynamics of the $Q=1$ skyrmion. \begin{figure*}[t] \centering \includegraphics[width=2\columnwidth]{Fig4_fullmotion.pdf} \caption{(a) Snapshots of an initially static droplet during a simulation where a spin current $J_{\rm e}$ is applied in the $x$ direction for times $0\leq t\leq 75\,{\rm ns}$. The spin current parameters are $U=3.86\,{\rm m/s},\, \beta=0.075$ and the damping parameter is $\alpha=0.03$. (b) The velocity of the droplet as a function of time. Upon switching on the current the velocity is $\Vel=4.02\,{\rm m/s}$, in the current direction (denoted $V_x$ in the figure). The droplet is then accelerated up to $\Vel=9.45\,{\rm m/s}$, which is close to the value $2.5U$. After switching off the current, the velocity drops instantly by approximately $4\,{\rm m/s}$ (which is close to $U$), to $\Vel=5.37\,{\rm m/s}$. The droplet continues to travel and the damping term decelerates the motion until it stops. The component of the velocity perpendicular to the current (denoted $V_y$ in the figure) is very small and goes to zero for steady-state motion. } \label{fig:STsnapshots} \end{figure*} For a more detailed description of the droplet motion and of the following simulations, we recall a fundamental result given in Ref.~\cite{KomineasPapanicolaou_PRB2015b}. That is, a propagating solution of Eq.~\eqref{eq:llg_stt_ip} with velocity $\Vel$ is also a solitary wave solution of the conservative Landau-Lifshitz equation, i.e., Eq.~\eqref{eq:LLG0} with $\alpha=0$, albeit with a different velocity $\Vel_{\rm LL}$. Specifically, let us assume an electron flow velocity $(U,0)$ and a droplet propagating with velocity $\Vel$ in the direction of the current, i.e., $\bm{m}=\bm{m}(x-\Vel t,y)$. Eq.~\eqref{eq:llg_stt_ip} gives \begin{equation} \label{eq:llg_stt_ip_x} (U - \Vel)\, \partial_x\bm{m} = -\gamma\bm{m}\times\bm{F} + \alpha \left( \frac{\beta}{\alpha} U - \Vel \right)\bm{m}\times \partial_x\bm{m}. \end{equation} If we assume a propagating solution of Eq.~\eqref{eq:llg_stt_ip_x} with velocity $\Vel = \frac{\beta}{\alpha}U$, then the same configuration is a solitary wave satisfying the conservative ($\alpha=0$) Landau-Lifshitz equation \eqref{eq:LLG0} with a velocity \begin{equation} \label{eq:velocity_reduction} \Vel_{\rm LL} = \Vel - U = \left( \frac{\beta}{\alpha}-1 \right)U. \end{equation} In the special case $\beta=\alpha$, a static solution of Eq.~\eqref{eq:LLG0}, say $\bm{m}_0(x,y)$, gives the propagating solution $\bm{m}(x,y,t)=\bm{m}_0(x-U t,y)$ of Eq.~\eqref{eq:llg_stt_ip_x}, with velocity $\Vel=U$. We now proceed to numerical simulations where we use as initial condition the static droplet shown in Fig.~\ref{fig:staticDrop}. For the results presented in this section, we use a domain $400\,{\rm nm}\times 400\,{\rm nm}$ with periodic boundary conditions. The cells have dimensions $\rm 0.39~nm\times0.39~nm\times0.5~nm$, i.e., it is a coarser lattice than the one used for the achievement of the static droplet in Fig.~\ref{fig:staticDrop}. We use $P=0.56$, $g=2$ and, for our typical choice of current $J_{\rm e}=1.0\times 10^{11}\,{\rm A/m^2}$, we have a flow velocity $U = 3.86\,{\rm m/sec}$. For following the dynamics of the droplet, we measure its position $(X,Y)$ using the formulae \begin{equation} X = \frac{\int x (m_z-1)\,dx dy}{\int (m_z-1)\,dx dy},\quad Y = \frac{\int y (m_z-1)\,dx dy}{\int (m_z-1)\,dx dy}. \end{equation} We calculate the skyrmion velocity using finite differences of the position. In the following simulations, we have chosen a current in the $x$ direction and a damping parameter $\alpha=0.03$. We use as initial condition the droplet $\bm{m}_0(x,y)$ of Fig.~\ref{fig:staticDrop} rotated by $\pi$ (the reason for the rotation will become apparent in the following). For $\beta=\alpha$ we observe that the droplet of the initial condition is traveling with velocity $\Vel=U$ (within numerical error) in the direction of the current, $\bm{m}=\bm{m}_0(x-U t,y)$, as anticipated from the discussion in connection with Eq.~\eqref{eq:velocity_reduction}. During the simulation the initial droplet remains unchanged. We also observe a small component of the velocity $\sim0.15\,{\rm m/sec}$ perpendicular to the current direction, and we attribute it to numerical errors. \begin{figure*}[t] \centering \includegraphics[width=1.95\columnwidth]{Fig5_dynamic_beta2.pdf} \caption{Snapshots for the droplet of Fig.~\ref{fig:staticDrop} when this is placed under spin torque at time $t=0$ and it is set in motion. The flow velocity is (a) $(U,0)$ and (b) $(0,U)$ with $U=3.72\,{\rm m/sec}$. The damping parameter is $\alpha=0.1$ and the non-adiabaticity parameter is $\beta=0.2$. The simulation domain is $400\,{\rm nm}\times 400\,{\rm nm}$ and we apply periodic boundary conditions. } \label{fig:STsnapshots_pi} \end{figure*} In the next simulation, we choose $\beta=0.075$ (that is, $\beta=2.5\alpha$). A spin current is applied for the time interval $0 \leq t \leq 75\,{\rm ns}$ and it is then switched off. Fig.~\ref{fig:STsnapshots} shows snapshots of the droplet during the simulation and the velocity of the droplet as a function of time. Upon switching on the current the droplet instantly acquires a velocity $\Vel$ in the current direction, which is close to $U$. It is subsequently accelerated up to $\Vel=9.45\,{\rm m/s}$, which is close to the value $2.5U$. At this point, the velocity seems to saturate. The propagating droplet is different (larger) than the static one as clearly seen in the snapshots. When the current is switched off, at $t=75\,{\rm ns}$ the droplet velocity is reduced instantly by approximately $4\,{\rm m/s}$ (which is close to $U$), to $\Vel=5.37\,{\rm m/s}$. From this point on, the relevant equation is Eq.~\eqref{eq:LLG0} while the reduction of the velocity is anticipated based on Eq.~\eqref{eq:velocity_reduction}. Let us summarize the procedure. The spin current initially accelerates the droplet and the configuration converges to a solitary wave solution of the conservative Landau-Lifshitz equation \cite{DoeringMelcher_CVPDE2017}. The solitary wave continues to travel in the absence of the current. The damping term decelerates the motion until it eventually stops. We measure a small component of the velocity in the direction perpendicular to the current ($V_y$) during the acceleration phase. A part of it is due to the way we measure the position of the droplet and some other part is due to numerical errors. At time 75~ns, we have a sudden small change of $V_y$ and we can only attribute it to the same reasons. In the next simulations, we use as initial condition the droplet exactly as shown in Fig.~\ref{fig:staticDrop}. We choose $\alpha=0.1$ and $\beta=0.2$. The larger damping is chosen in order to avoid transients and obtain the essential dynamics in a shorter simulation time. Furthermore, we now choose $P=0.538$ and this gives $U=3.72\,{\rm m/sec}$. We apply a current in the $x$ direction, or $(U_1,U_2)=(U,0)$. Fig.~\ref{fig:STsnapshots_pi}a shows snapshots of the droplet during the simulation. The motion is initially complicated with the droplet making a full $\pi$ turn. For times greater than $150\,{\rm ns}$ a steady-state motion is reached and the velocity has a constant value $7.41\,{\rm m/sec}$ along the direction of the current, in very good agreement with the theoretical prediction $\Vel=2U$ given in Eq.~\eqref{eq:vel_rigidMotion}. We also observe a small component of the velocity perpendicular to the current direction, and we attribute it to numerical errors. We finally mention that for large values of $\beta$ (e.g., $\beta = 5\alpha$) the droplet is destroyed while it is moving, by expanding in size. We continue to present the full set of our simulations before we proceed to give an explanation for the steady state achieved by the droplet. In the next simulation, we choose a spin current along the $y$ direction, or $(U_1,U_2)=(0,U)$. Fig.~\ref{fig:STsnapshots_pi}b shows a series of snapshots of the droplet during the motion. The droplet is initially making a $\pi/2$ turn. A propagating steady-state is eventually reached with velocity $7.41\,{\rm m/sec}$ in the $y$ direction in very good agreement with the theoretical prediction $\Vel=2U$ given in Eq.~\eqref{eq:vel_rigidMotion}. The important feature shared by all the simulations that we have seen in this section is the common orientation of the skyrmion-antiskyrmion pairs with respect to the direction of motion in the steady state. Specifically, in both entries of Fig.~\ref{fig:STsnapshots_pi}, the skyrmion-antiskyrmion pair goes in steady state motion only after it rotates in order to achieve the particular orientation. This is because the steady state achieved is a solitary wave, that is, the propagating droplet is a rigidly propagating solution of the Landau-Lifshitz equation. Such solutions have well-defined features. For example, the shape of the solitary wave defines its velocity. In our case, the particular orientation of the skyrmion-antiskyrmion pair gives a solitary wave velocity in the positive $x$ axis for the case of Figs.~\ref{fig:STsnapshots}, \ref{fig:STsnapshots_pi}a and in the positive $y$ axis in the case of Fig.~\ref{fig:STsnapshots_pi}b. Exchanging the positions of the skyrmion and the antiskyrmion would invert the direction (sign) of the velocity. One could conclude that the spin current sets in motion the skyrmion-antiskyrmion droplet revealing its solitary wave character. The solitary wave character of topologically trivial skyrmionic textures has been studied for the case of a skyrmionium, a topologically trivial, $Q=0$, configuration in DM ferromagnets \cite{BogdanovHubert_JMMM1999}. A static skyrmionium is axially-symmetric and a propagating one is elongated. A slowly moving skyrmionium presents Newtonian dynamics and a fast moving one (velocity close to the maximum) presents relativistic dynamics Ref.~\cite{KomineasPapanicolaou_PRB2015a}. \section{A droplet in a disc element} \label{sec:dropInDot} We have found static chiral droplets also in the confined geometry of a magnetic disc-shaped element (a magnetic dot). We apply an energy relaxation algorithm as in Sec.~\ref{sec:dropInFilm}. This converges and gives a static skyrmion-antiskyrmion droplet for a wide range of parameter values. \begin{figure}[t] \centering \includegraphics[width=0.9\columnwidth]{Fig6_dot_static.pdf} \caption{Static skyrmion-antiskyrmion droplets in disc elements with thickness $d_{\rm f}=5\,{\rm nm}$. (a) The disc diameter is $d=70\,{\rm nm}$. The parameter values are as in Table~\ref{tab:parameters}. (b) The disc diameter is $d=120\,{\rm nm}$. The parameter values are $D=1\,{\rm mJ/m^2},\, K=4.43505\times10^5\,{\rm J/m^3}$ (other parameters are as in Table~\ref{tab:parameters}). In this case, we have a more extended antiskyrmion part. For comparison, we note that a usual skyrmion (with $Q=1$) has an approximate radius $15\,{\rm nm}$ for the dot in (a) and $30.5\,{\rm nm}$ for the dot in (b). They are thus somewhat larger than the corresponding $Q=0$ configurations shown in this figure.} \label{fig:dropInDot} \end{figure} We find droplets for a thickness $d_{\rm f}=0.5\,{\rm nm}$ and for similar parameter values as in the case of a film. In addition, we were also able to find stable droplet for larger thicknesses. Fig.~\ref{fig:dropInDot} shows droplets in dots of two different sizes with $d_{\rm f}=5\,{\rm nm}$ and for different sets of parameter values. In Fig.~\ref{fig:dropInDot}a, we have a smaller dot and the parameter values are the same as those used in Fig.~\ref{fig:staticDrop} for the infinite film. The droplet is stable for a range of parameter values. For fixed $\epsilon=0.6086$, the droplet is stable for $3.0\leq D \leq7.6~{\rm mJ/m^2}$. In Fig.~\ref{fig:dropInDot}b, we have a larger dot and the parameter values (given in the figure caption) correspond to $\epsilon=3.16$ and $\kappa=0.00515$. The value of $\epsilon$ is outside the range for the existence of a skyrmion in a film (when we neglect the long-range part of the magnetostatic field). In the case of Fig.~\ref{fig:dropInDot}b, the antiskyrmion part is smoother than in all other cases presented in this paper. This can be attributed to the magnetostatic field originating in the bulk in combination with the DM interaction. It thus appears that the details of the configuration can be tuned, at least in the case of a droplet in a confined geometry. The effect of the magnetostatic field due to the confined geometry of a dot is substantial. The magnetostatic field from the lateral boundaries contributes to stabilizing the configuration. This phenomenon has already been noted in connection with magnetic bubbles in dots \cite{DruyvesteynSzymczak_PSSA1972,IgnatchenkoMironov_JMMM1993, MoutafisKomineas_PRB2006}. The effect is verified in the present calculation. \section{Concluding remarks} \label{sec:conclusions} We have found numerically skyrmionic textures in the form of skyrmion-antiskyrmion pairs (droplets) with a skyrmion number $Q=0$ in ferromagnets with perpendicular anisotropy and DM interaction. They exist, in thin films, for a narrow range of parameter values. The magnetostatic field is crucial for their stability. Under spin-polarized current, they move along the current exhibiting no Magnus force effect and, thus, their dynamics is different than the dynamics of $Q=1$ skyrmions. The stability of droplets is a numerical finding and we stress that it was not possible to provide a proof for their existence within the Landau-Lifshitz equation including the magnetostatic interaction. Their robustness is though seen in their behavior under spin currents, where they persist for long times in order to fully reveal their dynamics. In view of the predicted narrow range of parameters for their stability in infinite films, it would appear as a challenge to observe them experimentally. Nevertheless, a skyrmion-antiskyrmion has already been observed \cite{JagannathGobelParkin_NatComm2020}. Skyrmion-antiskyrmion pairs could also be very common as transient (short-lived) states. We expect that the results of the present paper would help in understanding also such states. One could consider materials that support antiskyrmions, such as those reported in Ref.~\cite{NayakKumarParkin_Nature2017}. We have found numerically skyrmion-antiskyrmion droplets also in such systems. They are very similar to the droplets presented in this paper, except that the skyrmion part is replaced by an antiskyrmion part and vice-versa. In materials with some special form of DM interaction, such as those studied in Ref.~\cite{HoffmannMelcherBluegel_NatComm2017} (especially in Supplementary Note 1), the skyrmion and the antiskyrmion are both favored. In such models, a skyrmion-antiskyrmion droplet may have a greater significance. We have found that skyrmion-antiskyrmion droplets exist for a wider range of parameter values also in confined geometries. Given the robustness of the $Q=0$ droplets in magnetic dots, the present work indicates that some experimental observations of individual skyrmions in magnetic elements might have to be re-examined in order to distinguish whether the observed skyrmions are the symmetric $Q=1$ configurations or some sort of $Q=0$ textures. Particular attention should be given to the dynamics of a $Q=0$ texture in a dot as this is expected to be different than the rotational dynamics of $Q=1$ skyrmions \cite{SisodiaKomineasMuduli_PRB2019}. \bigskip
{ "redpajama_set_name": "RedPajamaArXiv" }
2,335
Read the original post below, and find out how we can help you ensure your air quality here. Air pollution in Europe comes with a high price tag, according to a new report from the European Environment Agency (EEA). While policies have improved air quality overall, air pollution is still the main environmental health hazard, resulting in high costs for health care systems, unhealthy workers and an estimated 400,000 premature deaths in Europe in 2011. The annual air quality report collates data from official monitoring stations across Europe. It shows that almost all city dwellers are exposed to pollutants at levels deemed unsafe by the World Health Organization (WHO). For some pollutants, more than 95 percent of the urban population is exposed to unsafe levels. Posted on November 25, 2014 by Marcus Lange and filed under Industry News and tagged air quality healthcare environment.
{ "redpajama_set_name": "RedPajamaC4" }
7,600
\section{Introduction} For quite some time, physicists have considered Einstein's General Relativity (GR) to be an effective theory of gravity. Therefore, in order to find the happy marriage between quantum theory and gravity, we need to know the underlying fundamental theory of gravity. One recent proposal in this address has been the Horava-Lifshitz (HL) theory \cite{Horava:2008jf}, that is, a power-counting renormalizable theory with consistent ultraviolet (UV) behaviour. Furthermore, the theory has one fixed point in the infrared (IR) limit namely GR \cite{Horava:2008jf, Horava:2008ih, Horava:2009if, Horava:2009uw}. In terms of the above, black holes (BH) are important solutions for field equations in any gravitational theory, including those such as Einstein-Hilbert, Brans-Dicke, HL, $f(R)$, string theories and any generalisation or modification of Einstein's gravity. At the quantum level, BH play the same role as hydrogen atom and we hope they give us some clues about the observables of any quantum theory of gravity. As such, it is important to study the physical properties of BH solutions, such as decay rate, greybody factors, or their quasinormal modes. Quasinormal modes (QNMs), known as ``ringing'' in BH, are very important in order to understand the classical and quantum aspects of BH physics. The QNMs give us hints about the stability of BH under consideration, as in this paper, and can be used to compute the spectrum of the area operator using the semiclassical approach developed by Hod \cite{Hod:1998vk}. The determination of QNMs is based on the dynamics of matter fields and on the metric perturbations in the BH background. In this work indeed, we are interested in the stability of the $1+1$-dilatonic HL BH using a QNMs' approach; QNMs associated with the perturbations of different fields have been considered in different works \cite{Kokkotas:1999bd}, including those involving dS and AdS space \cite{Horowitz:1999jd,c2,c3,c4,c5,Chan:1999sc,Wang:2001tk,Konoplya:2003dd} and higher dimensional models, where the QNMs can be computed for a brane situated in the vicinity of a $D$-dimensional BH \cite{Konoplya2}. A similar situation occurs in $2+1$ dimensions \cite{Chan:1996yk,c44,Crisostomo:2004hj}, and for acoustic BH \cite{c1, Lepe:2004kv,Saavedra:2005ug}. QNMs of dilatonic BH in $3+1$ dimensions can be found in Refs. \cite{Ferrari:2000ep, Konoplya:2002ky,Fernando:2003wc}. Two-dimensional theories of gravity have recently attracted much attention\cite{Robinson:2005pd,Myung:2000hk,Torii:1998gm} as simple toy models that possess many features of gravity in higher dimensions. They also have BH solutions which play important roles in revealing various aspects of spacetime geometry and quantization of gravity, and are also related to string theory \cite{Teo:1998kp,McGuigan:1991qp}. The QNMs of $1+1$ dilatonic BH for scalar and fermionic perturbations were studied in \cite{Becar:2007hu, Becar:2010zz, LopezOrtega:2011sc, Becar:2014jia}. The determination of QNMs for a specific geometry implies solving the field equations for different types of perturbations (scalar, fermionic, vectorial, etc.), with suitable boundary conditions that reflect the fact that this geometry describes a BH. The QNMs of a classical scalar perturbation of BH are defined as the solutions of the Klein-Gordon equation characterised by purely ingoing waves at the horizon, $\Phi \sim e^{-i\omega (t+r)}$, since, at least classically, an outgoing flux is not allowed at the horizon. In addition, one has to impose boundary conditions on the solutions in the asymptotic region (infinity), and for that reason it is crucial to use asymptotic geometry for the spacetime under study. In the case of an asymptotically flat spacetime, the condition we need to impose over the wave function is to have a purely outgoing wave function $\Phi \sim e^{-i\omega (t-r)}$ at the infinity \cite{Horowitz:1999jd}. In general, the QNMs are given by $\omega _{QNM}=\omega _{R}+i\omega _{I}$, where $\omega _{R}$ and $\omega _{I}$ are the real and imaginary parts of the frequency $\omega _{QNM}$, respectively. Therefore, the study of QNMs can be implemented as one possible simple alternative test for studying the stability of the system. In this sense, any imaginary frequency with the wrong sign would mean an exponentially growing mode, rather than a damping one. The organisation of this article is as follows: In Section II, we describe briefly the HL theory and specify the $1+1$-dilatonic BH solutions. In Section III, we compute the QNMs and explore the criteria for the stability of the two BH metrics under consideration. We finish with conclusions in Section IV. \section{Generalities of the Horava-Lifshitz gravity} In the following, we will describe the HL theory as developed in Ref. \cite{Horava:2009uw}. The HL theory provided a new approach to quantum gravity and its principal idea is based on the breaking of the Lorentz invariance by equipping the spacetime with additional geometric structure, a prefered foliation which defines the splitting of the coordinates into space and time; in this theory the Lorentz invariance is assumed to appear only at the low energies limit. One can decompose the spacetime as follows \begin{equation} ds^{2} = (N^{2}-N_{i}N^{i})dt^{2}-2N_{i}dx^{i}dt-h_{ij}dx^{i}dx^{j}, \end{equation} where $N, N^{i}$ are the lapse and shift functions respectively and $h_{ij}$ is the three-dimensional metric. The action is given by \begin{equation} S = \frac{M^{2}_{Pl}}{2}\int d^{3}xdt\sqrt{h}N\left(K_{ij}K^{ij}-\gamma K^{2}-\mathcal{V} \right), \label{eq:HLaction} \end{equation} where $M_{Pl}$ is the Planck mass, $\gamma$ is a dimensionless constant and $K_{ij}$ is the well-known extrinsic curvature tensor, which is stated in the ADM formulation as \begin{equation} K_{ij}= \frac{1}{2N}\left(\dot{h}_{ij}-\nabla_{i}N_{j}-\nabla_{j}N_{i} \right), \end{equation} being $K$ its trace. The last term in (\ref{eq:HLaction}), $\mathcal{V}$, is invariant under three-dimensional diffeomorphisms and is known as the ``potential'' term. This term is a function of the three-dimensional metric and its derivatives. In explicit form we have \begin{equation} \mathcal{V} = -\xi R + \frac{1}{M^{2}_{Pl}}\left(\pi_{1}\Delta R + \pi_{2}R_{ij}R^{ij} + ... \right) + \frac{1}{M^{4}_{Pl}}\left(\sigma_{1}\Delta^{2} R + \sigma_{2}R_{ij}R^{jk}R^{i}{}_{k} + ... \right), \end{equation} where $\xi, \pi_{n}, \sigma_{n}$ are coupling constants, $R_{ij}$ and $R$ are the Ricci tensor and the scalar curvature constructed with the spatial metric. $\Delta:= h^{ij}\nabla_{i}\nabla_{j}$. The introduction of the ``potential'' term in (\ref{eq:HLaction}) improve the UV behavior of the graviton propagator and additionally leads to different scaling of space and time \begin{equation} \mathbf{x} \rightarrow \rho^{-1}\mathbf{x}, \ \ t \rightarrow \rho^{-3}t, \ \ N \rightarrow N, \ \ N^{i} \rightarrow \rho^{2}N_{i}, \ \ h_{ij}\rightarrow h_{ij}. \end{equation} When the lapse function depend only on time, $N=N(t)$, we say that we are dealing with the ``projectable'' version of the HL theory and the ``non-projectable'' version is given when the lapse function can depend on space and time. In Ref. \cite{Blas} an extension of the non-projectable version of HL gravity was made by the introduction of a extra mode in the ``potential'' term, i.e., $\mathcal{V}(h_{ij})\rightarrow \mathcal{V}(h_{ij},a_{i})$. It was shown that this extra mode can acquire a regular quadratic Lagrangian. The extra mode is given by \begin{equation} a_{i} := \frac{\partial_{i}N}{N}. \end{equation} Geometrically this vector represents the proper acceleration of the unit normals to the spatial slices. For the two-dimensional case there are only two terms that contribute to the quadratic Lagrangian: $R$ and $a_{i}a^{i}$. \subsection{Lowest dimensional Horava-Lifshitz Black Hole} \label{sec:HLbh} The HL gravity has two-dimensional solutions that characterise dilatonic BH and can be used to study the physical properties of BH in general; furthermore, some features of this theory, due to the fact that it utilizes two dimensions, open the possibility of understanding physical consequences in higher dimensional theories. As a summary, let us start with the HL-dilaton gravity in two dimensions presented in Ref. \cite{HL}, \begin{equation} S = S_{HL}+S_{\phi}, \end{equation} where, as mentioned before, the quadratic Lagrangian for the HL theory in two dimensions comes from the contribution of the terms $R$ and $a_{i}a^{i}$ \begin{equation} S_{HL} = \frac{M^{2}_{Pl}}{2}\int dtdx\sqrt{g}\left((1-\lambda)K^{2}+\eta g^{11}a_{1}a_{1} \right), \end{equation} and \begin{equation} S_{\phi} = \int dtdxN\sqrt{g}\left[\frac{1}{2N}\left(\partial_{t}\phi -N^{1}\nabla_{1}\right)^{2}-\alpha(\nabla_{1}\phi)^{2}-V(\phi) -\beta \phi \nabla^{1}a_{1}-\varsigma \phi a^{1}\nabla_{1}\phi \right], \end{equation} where $\alpha$, $\beta, \eta$ and $\varsigma$ are constants. Using the fact that $K=0$ and admitting $N_{1} = 0$ together with the relativistic limit $\beta=\varsigma=0$, we are left with the action \begin{equation} S= \frac{M^2_{Pl}}{2}\int dtdx \left (-\frac{1}{2}\eta N^2a^2_1+\alpha N^2\phi'^2-V(\phi)\right). \label{eq:action} \end{equation} From now on, the prime denotes derivative with respect the coordinate $x$. In two dimensions the extra mode $a_{i}$ is simply $a_1=\partial_{1}\ln N = \left(\ln N \right)'$. In Ref. \cite{HL}, a new set of BH solutions in two-dimensional HL gravity was found for action (\ref{eq:action}). The solutions are described by \begin{equation} N(x)=\sqrt{\frac{A}{\eta}x^2-2C_1x+\frac{B}{\eta x}+\frac{C}{3 \eta x^2}+2C_2}\,\,\,\,, \label{eq:solutionN} \end{equation} and \begin{equation} \phi(x)=\ln \sqrt{\frac{A}{\eta}x^2-2C_1x+\frac{B}{\eta x}+\frac{C}{3 \eta x^2}+2C_2}\,\,\,\,, \label{eq:solutionphi} \end{equation} these solutions were obtained by using the quantity \begin{equation} V_{\phi}(x) = A + \frac{B}{x^{3}}+\frac{C}{x^{4}}, \end{equation} the derivative of the scalar potential given as a function of an implicit scalar field which in turns depends on the spatial coordinate. This was done because for generalized potentials is not always possible obtain analytical solutions. We would like to focus our attention on the following two cases, $\bullet$ First case: Described by fixing the constants in the following way; $A=B=C=0$, $C_1= -M$, $C_2=-1/2$ and $\eta =1$. In this case $V_{\phi}=0$. Therefore, the metric for this solution can be written as follows \begin{equation} ds^{2} = -(2Mx -1)dt^{2} + \frac{1}{2Mx - 1}dx^{2}, \label{eq:lowest} \end{equation} where the parameter $M$ is related to the lapse function $N$. This solution was found for the first time in \cite{Mann:1991md}. $\bullet$ Second Case: Here we fix the constants as $A=\Lambda$, $B=C=0$, $C_1=-M$ and $C_2=-\frac{\epsilon }{2}$; therefore, we have $V_{\phi}=\Lambda$, and the solution is given by \begin{equation} ds^{2} = \left[\left(\frac{\Lambda}{\eta}\right)x^{2} + 2Mx - \epsilon \right]dt^{2} + \frac{1}{\left(\frac{\Lambda}{\eta}\right)x^{2} + 2Mx - \epsilon}dx^{2}. \label{eq:sol2} \end{equation} The horizon of the black hole is located at \begin{equation} x_{\pm} = -\frac{\eta M}{\Lambda} \pm \sqrt{\frac{\eta}{\Lambda}\left(\frac{\eta M^{2}}{\Lambda} + \epsilon \right)}. \label{eq:sol1} \end{equation} If we define the variables $u = \sqrt{\Lambda / \eta}x + \sqrt{\eta / \Lambda}M$ and $u_{+} = \sqrt{(\eta / \Lambda)M^{2} + \epsilon}$ one gets \begin{equation} ds^{2} = -\left(u^{2} - u_{+}^{2} \right)dt^{2} + \frac{l^{2}}{\left(u^{2} - u_{+}^{2}\right)}du^{2}, \label{eq:sol3} \end{equation} which is a suitable expression to study the quasinormal modes of this black hole, and we have defined $l = (\Lambda/\eta)^{1/4}$. In this new coordinate system, the horizon of the black hole is located at $u = u_{+}$. The spacetimes, described by (\ref{eq:lowest}) and (\ref{eq:sol2}), are conformally flat \cite{Mann}. \section{Quasinormal modes} \label{sec:qnms} In order to study the QNMs, we consider a scalar field minimally coupled to gravity propagating in the background of the two-dimensional HL BH. We consider the following action for the scalar field \begin{equation} S[\varphi] = \int d^{2}x \sqrt{-g}\left(-\frac{1}{2}(\nabla \varphi)^{2} - \frac{1}{2}m^{2}\varphi^{2} \right), \end{equation} where $m$ is the mass of the scalar field. From the variation of $\delta \phi$ the field equation is given by \begin{equation} \Box \varphi - m^{2}\varphi = 0, \label{eq:eom} \end{equation} where $\Box$ is the D'Alambertian operator, in the following sections, we will solve the Klein-Gordon equation (\ref{eq:eom}) for the spacetimes described in the previous section. \subsection{Spacetime metric $ds^{2} = -(2Mx -1)dt^{2} + \frac{1}{2Mx - 1}dx^{2}$ } \subsubsection{Massive Scalar Field} The case of a massive scalar field perturbing the background described by the metric (\ref{eq:lowest}) was discussed for first time in \cite{Mann, Mann:1991md}. If we use $\varphi(t,x)=e^{-i\omega t}\varphi(x)$ the equation of motion (\ref{eq:eom}), is represented by \begin{equation} (2Mx-1)\frac{d^{2}\varphi(x)}{dx^{2}} + 2M \frac{d\varphi(x)}{dx} + \frac{\omega^{2}}{2Mx-1}\varphi(x)-m^2\varphi(x)= 0. \label{eq:nobessel0} \end{equation} For this metric and massive scalar field, the QNMs were computed in \cite{Lopez}, where the authors claimed that the solution for the QNMs are completely different from the standard case where the QNMs have a discrete spectrum. They proposed a real and continuous spectrum for the QNMs of a scalar perturbation. In the next section, we look at this situation, but instead use the confluent hypergeometric function $_{0}F_1(a,b;x)$ place of the modified Bessel functions. Now, using the tortoise coordinate defined by $x_{*}=\frac{1}{2M}\ln(2Mx-1)$ we can write the Eq. (\ref{eq:nobessel0}) as a Schroedinger type equation with effective potential \cite{Lopez} \begin{equation} V_{eff} =m^{2}e^{2Mx_{*}}, \label{eq:potential} \end{equation} this potential diverges when $x_{*}\rightarrow \infty$. If we consider the variable $z=\frac{m}{M}e^{Mx_{*}}$ \cite{Mann:1991md}, after some straightforward algebra, the equation of motion (\ref{eq:nobessel0}) can be written as the Bessel equation, \begin{equation} z^2\frac{d^2\varphi(z)}{dz^2}+z\frac{d\varphi(z)}{dz}+\left(\nu^2+z^2\right)\varphi(z)=0, \label{eq:bessel} \end{equation} where $\nu=\frac{i\omega}{M}$; this equation can be transformed into the confluent hypergeometric equation using the change $\varphi(z)=(2iz)^{\nu}e^{-2z}F(z)$ \cite{Libro}, and we find \begin{equation} z\frac{d^2F(z)}{dz^2} + (2\nu+1-2iz)\frac{dF(z)}{dz}+2i\left(\nu+\frac{1}{2}\right)F(z)=0, \label{eq:kumer} \end{equation} whose solution is given in terms of the confluent hypergeometric, or Kummer, functions \begin{equation} F(z)=A\Phi(\nu+\frac{1}{2},2\nu+1;2iz)+B(2iz)^{-2\nu}\Phi(-\nu+\frac{1}{2},1-2\nu;2iz), \label{eq:kumersolgene} \end{equation} where $A,B$ are constants and the number $\nu$ in general need not be an integer \cite{Libro}. In the following we will consider two cases for the $\nu$ parameter since we are interested in exploring all its possible values.\\ $\bullet$ non-integer $\nu$ In order to compute the QNMs, we need to impose adequate boundary conditions that represent a purely outgoing wave at infinity and purely ingoing wave near the horizon of the BH (a condition often used in flat spacetime). Another situation occurs when the asymptotic behaviour of the spacetime is not flat, e.g. asymptotically AdS space; in these kind of spaces, the potential diverges at infinity, and we can therefore impose $\varphi=0$ (Dirichlet boundary condition) or $\frac{d\varphi}{dx}=0$ (Neumann boundary condition) at infinity. As we can see from Eq. (\ref{eq:potential}), in this case we have an asymptotically AdS space. Therefore, we need to apply boundary conditions to the QNMs over the general solution of Eq. (\ref{eq:kumer}), which is given by \begin{equation} \varphi(z)=Ae^{-2z}z^{\nu}\Phi(\nu+\frac{1}{2},2\nu+1;2iz)+Be^{-2z}z^{-\nu}\Phi(-\nu+\frac{1}{2},1-2\nu;2iz), \label{eq:kumersolgen} \end{equation} to satisfy the boundary conditions properly, we set $A=0$ in order to have only ingoing waves at the horizon ($z=0$). The asymptotic behavior of $\Phi$ at infinity is given by \cite{energy} \begin{equation} \Phi(p,q;z)\rightarrow\frac{\Gamma(q)}{\Gamma(p)}z^{p-q}e^z, \label{eq:kumerlimit} \end{equation} and therefore our solution at the infinity ($z \rightarrow \infty$) reads as follow, \begin{equation} \varphi(z)\sim Bz^{-\frac{1}{2}}e^{2(i-1)z}\frac{\Gamma(1-2\nu)}{\Gamma(-\nu+1/2)}. \label{eq:sollimit} \end{equation} We can see that the scalar field vanishes as $z \rightarrow \infty$, this confirms the absence of QNMs for this HL BH under scalar perturbations, a similar situation was found in Ref. \cite{Crisostomo:2004hj} for the QNMs of the extremal BTZ BH. The conclusion of this case was discussed in \cite{Lopez}, and represents a continuous spectrum. Additionaly, if we impose the Neumann boundary condition, we obtain a similar asymptotic vanishing behavior for the flux \begin{equation} J_r(z)=\varphi^*(z)\frac{d\varphi(z)}{dz}-\varphi(z)\frac{d\varphi^*(z)}{dz}. \label{eq:current} \end{equation} In light of the meaning of QNMs, for any BH perturbation its geometry produces damped oscillations, this is the so-called ringing in BH. It is well known that the frequencies of these oscillations and their damping periods are completely fixed by the BH properties, and as such, are independent of the nature of the initial perturbation. In Ref. \cite{Lopez} it was shown that, for scalar perturbations the oscillations have a continuous spectrum and are not discrete, as would be expected for a BH. This result is very strange and in our opinion, devoid of physical meaning; this is because it is well known that oscillations of QNMs are similar to normal modes of a closed system. In the next section, we consider the second solution of the confluent hypergeometric equation and show that QNM oscillations have a discrete spectrum. $\bullet$ $\nu$ integer Now we are considering the case where $\nu$ is an integer number. For $\nu$ integer, the solution of the Eq. (\ref{eq:kumer}) changes. In the expression (\ref{eq:kumersolgen}), the function $\Phi(p,q;z)$ of the second term must be replaced by \cite{macdonal} \begin{eqnarray} W(\alpha,\gamma;z)&=&M(\alpha,\gamma;z)\left(\ln z+\psi(1-\alpha)-\psi(\gamma)+C\right)+\sum_{n=1}^{\infty}\frac{\Gamma(n+\alpha)\Gamma(\gamma)B_nz^n}{\Gamma(\alpha)\Gamma(n+\gamma)n!}+\nonumber \\ \nonumber &+&(-1)^{\gamma}\sum_{n=1}^{\infty}\frac{\Gamma(\gamma)\Gamma{n+\alpha-\gamma+1}\Gamma(\gamma-n-1)(-1)^n}{\Gamma(\alpha)n!z^{\gamma-n-1}}\label{current}\,, \end{eqnarray} where \begin{equation} \psi(\alpha)=\frac{\Gamma'(\alpha)}{\Gamma(\alpha)}, \label{eq:current} \end{equation} represents the digamma function, $C=0.577216...$ is the Euler's constant, and \begin{equation} B_n=\left(\frac{1}{\alpha}+\frac{1}{\alpha+1}+...+\frac{1}{\alpha+n-1}\right)-\left(\frac{1}{\gamma}+\frac{1}{\gamma+1}+...+\frac{1}{\gamma+n-1}\right). \label{eq:currt} \end{equation} Then, we have \begin{equation} \varphi(z)=Ae^{-2z}z^{\nu}\Phi(\nu+\frac{1}{2},2\nu+1;2iz)+Be^{-2z}z^{-\nu}W(p,q;z). \label{eq:kumersolgen6} \end{equation} In order to have only ingoing waves at the horizon ($z=0$), we set $A=0$. The asymptotic behaviour of the $W$-function at infinity is given by \begin{equation} W(p,q;y)\rightarrow \pi \cot(\pi p)\frac{\Gamma(q)}{\Gamma(p)}y^{p-q}e^y. \label{eq:W} \end{equation} Therefore, the general solution at infinity reads as follows, \begin{equation} \varphi(z)\sim B\pi \cot\left[-\pi \left(\nu+\frac{1}{2}\right)\right]z^{-\left(4\nu+\frac{3}{2}\right)}\frac{\Gamma(2\nu+1)}{\Gamma(-\nu-1/2)} e^{2(i-1)z}. \label{eq:sollimitW} \end{equation} If we consider that $\nu + 1/2=\frac{2n+1}{2}$, where $n$ is an integer number, we are able to fulfill the Dirichlet boundary condition at infinity. From this result we can obtain the frequency of the QNMs as \begin{equation} \omega=-inM, \label{eq:omegaW} \end{equation} and using the Neumann condition for a vanishing flux at infinity, we obtain the same result for the QNMs as expressed in Eq. (\ref{eq:omegaW}). \subsubsection{Massless Case} For the metric (\ref{eq:lowest}), when $m=0$, we use the standard definition for QNMs, the Klein-Gordon equation (\ref{eq:eom}) which reads \begin{equation} (2Mx-1)\frac{d^{2}\varphi(x)}{dx^{2}} + 2M \frac{d\varphi(x)}{dx} + \frac{\omega^{2}}{2Mx-1}\varphi(x) = 0, \label{eq:nobessel} \end{equation} where we have assumed $\phi(t,x) = \varphi(x)e^{-i\omega t}$. If we define the quantity $x_{+} = 1/2M$ and the change of variable $z = 1 - x_{+}/x$ we can write equation (\ref{eq:nobessel}) as follows \begin{equation} (1-z)^{2}\frac{d^{2}\varphi}{dz^{2}}-2(1-z)\frac{d\varphi}{dz}+\frac{(1-z)}{z}\frac{d\varphi}{dz}+\left(\frac{\tilde{\omega}}{z}\right)^{2} \varphi = 0, \end{equation} where $\tilde{\omega} = x_{+}\omega$. Note that in the new coordinate $z$, the horizon of the BH is located at $z = 0$ and infinity at $z=1$. With the change $\varphi(z) = z^{\alpha}(1-z)^{\beta}F(z)$, the last equation reduces to the hypergeometric differential equation for the function $F(z)$, that is, \begin{equation} z(1-z)F''(z) + (c-(a+b+1)z)F'(z)-abF(z)=0. \label{eq:hyper} \end{equation} In this case the coefficients $a$, $b$ and $c$ are given by the relations \begin{eqnarray} c &=& 2\alpha +1, \\ a+b &=& 2\alpha + 2\beta + 1, \\ ab &=& \alpha(\alpha - 1) + \beta(\beta-1)+2\alpha + 2\beta + 2\alpha \beta, \end{eqnarray} providing the expressions for the coefficients \begin{eqnarray} a &=& \alpha +\beta,\\ b &=& 1+\alpha +\beta, \end{eqnarray} and for the exponents we obtain \begin{equation} \alpha = \beta = -ix_{+}\omega. \end{equation} Without loss of generality, we have chosen the negative signs for the exponents. The solution of the radial equation reads \begin{equation} F(z) = C_{1}F_{1}(a,b,c;z)+C_{2}z^{1-c}F_{1}(a-c+1,b-c+1,2-c;z), \end{equation} where $C_{1}$ and $C_{2}$ are arbitrary constants and $F_{1}(a,b,c;z)$ is the hypergeometric function. The solution for $\varphi(z)$ is given by \begin{equation} \varphi(z) = C_{1}z^{-ix_{+}\omega}(1-z)^{-ix_{+}\omega}F_{1}(a,b,c;z) + C_{2}z^{ix_{+}\omega}(1-z)^{-ix_{+}\omega}F_{1}(a-c+1,b-c+1,2-c;z). \end{equation} In the neighborhood of the horizon $z=0$, the function $\varphi(z)$ behaves as \begin{equation} \varphi(z) = C_{1}e^{-ix_{+}\omega \ln z} + C_{2}e^{ix_{+}\omega \ln z}, \end{equation} for the scalar field $\phi$ one gets \begin{equation} \phi \sim C_{1}e^{-i\omega \left(t + x_{+}\ln z\right)} + C_{2}e^{-i\omega\left(t - x_{+}\ln z\right)}. \end{equation} The first term in the last equation corresponds to an ingoing wave at the BH, while the second one represents an outgoing wave. In order to compute the QNMs, we must impose that there exist only ingoing waves at the horizon of the BH, then $C_{2} = 0$. The radial solution at the horizon is given by \begin{equation} \varphi(z) = C_{1}z^{-ix_{+}\omega}(1-z)^{-ix_{+}\omega}F_{1}(a,b,c;z). \end{equation} In order to implement the boundary conditions at infinity, $z=1$, we use the linear transformation $z\rightarrow 1-z$, and then we apply Kummer's formula \cite{Abramowitz} for the hypergeometric function, \begin{eqnarray} \varphi(z) &=& C_{1}z^{-ix_{+}\omega}(1-z)^{-ix_{+}\omega}\frac{\Gamma(c)\Gamma(c-a-b)}{\Gamma(c-a)\Gamma(c-b)}F_{1}(a,b;a+b-c+1;1-z)\nonumber \\ &+& C_{1}z^{-ix_{+}\omega}(1-z)^{ix_{+}\omega}\frac{\Gamma(c)\Gamma(a+b-c)}{\Gamma(a)\Gamma(b)}F_{1}(c-a,c-b;c-a-b+1;1-z), \end{eqnarray} This solution near the infinity, $z=1$, takes the form \begin{equation} \varphi(z) = C_{1}(1-z)^{-ix_{+}\omega}\frac{\Gamma(c)\Gamma(c-a-b)}{\Gamma(c-a)\Gamma(c-b)} + C_{1}(1-z)^{ix_{+}\omega}\frac{\Gamma(c)\Gamma(a+b-c)}{\Gamma(a)\Gamma(b)}, \end{equation} and the scalar field solution near infinity behaves as \begin{equation} \phi \sim C_{1}e^{-i\omega(t+x_{+}\ln(1-z))}\frac{\Gamma(c)\Gamma(c-a-b)}{\Gamma(c-a)\Gamma(c-b)} + C_{1}e^{-i\omega(t-x_{+}\ln(1-z))}\frac{\Gamma(c)\Gamma(a+b-c)}{\Gamma(a)\Gamma(b)}. \end{equation} To compute the QNMs, we also need to impose the boundary conditions on the solution of the radial equation at infinity, meaning that only purely outgoing waves are allowed there. Therefore, the second term in the last equation must vanish; this is fulfilled, at the poles of $\Gamma(a)$ or $\Gamma(b)$, where the scalar field satisfies the considered boundary condition only when \begin{equation} a = -n \ \ \ \ \ \mbox{or} \ \ \ \ \ b=-n, \end{equation} where $n = 0, 1, 2,...$. These conditions determine the form of the quasinormal modes, \begin{equation} \omega = -\frac{i}{2x_{+}}\left(n + 1\right). \end{equation} \subsection{Spacetime metric $ds^{2} = \left(\left(\frac{\Lambda}{\eta}\right)x^{2} + 2Mx - \epsilon \right)dt^{2} + \frac{1}{\left(\frac{\Lambda}{\eta}\right)x^{2} + 2Mx - \epsilon}dx^{2}$} For the second metric given in Eq. (\ref{eq:sol3}), we have a spacetime that is not asymptotically flat; as such, and as mentioned before, we use a definition for the QNMs different from the one used in an asymptotically flat spacetime. The formal treatment for this kind of spacetime is discussed in \cite{Horowitz:1999jd}, where they defined QNMs to be modes with only ingoing waves near the horizon and vanishing at infinity. Thus, the Klein-Gordon equation (\ref{eq:eom}) can be written as \begin{equation} -\frac{1}{u^{2}-u_{+}^{2}}\partial_{t}\partial_{t}\phi + \frac{2}{l^{2}}u \partial_{u}\phi + \frac{1}{l^{2}}(u^{2}-u_{+}^{2})\partial_{u}\partial_{u}\phi - m^{2}\phi=0, \end{equation} now, we will consider a solution of type $\phi(t,u) = \varphi(u)e^{-i\omega t}$ and definition $l = (\lambda/\eta)^{1/4}$, for which the radial equation can be written as follows \begin{equation} \frac{1}{l^{2}}(u^{2}-u_{+}^{2})\varphi''(u) + \frac{2}{l^{2}}u\varphi'(u) + \left(\frac{\omega^{2}}{u^{2}-u_{+}^{2}} - m^{2} \right)\varphi(u) = 0, \label{eq:radial} \end{equation} where the prime denotes derivates with respect the variable $u$. If we define the change of variable $z=1-u_{+}^{2}/u^{2}$ \cite{Aros} and follow the procedure stated for the massless case, the equation (\ref{eq:radial}) transforms into the hypergeometric differential equation (\ref{eq:hyper}) for the function $F(z)$, where the coefficients $a,b,c$ are given by the following relations \begin{eqnarray} a + b &=& 2\alpha + 2\beta + \frac{1}{2},\\ ab &=& \alpha(\alpha-1)+\beta(\beta-1)+2\alpha \beta + \frac{3}{2}\alpha + \frac{3}{2}\beta,\\ c &=& 2\alpha + 1, \end{eqnarray} which gives \begin{eqnarray} a &=& \alpha + \beta,\\ b &=& \alpha + \beta + \frac{1}{2}, \end{eqnarray} and for the exponents $\alpha$ and $\beta$ \begin{eqnarray} \alpha &=& -i\frac{l}{u_{+}}\omega,\\ \beta &=& \frac{1}{4}\left(1 - \sqrt{1+4m^{2}l^{2}} \right), \end{eqnarray} where, without loss of generality, we have chosen the negative signs. The solution of the radial equation reads \begin{equation} F(z) = C_{1}F_{1}(a,b,c;z)+C_{2}z^{1-c}F_{1}(a-c+1,b-c+1,2-c;z), \end{equation} where $C_{1}$ and $C_{2}$ are arbitrary constants and $F_{1}(a,b,c;z)$ is the hypergeometric function. Since $\varphi(z) = z^{\alpha}(1-z)^{\beta}F(z)$, the behaviour of the scalar field near the horizon ($z=0$) is given by \begin{equation} \phi \sim C_{1}e^{-i\omega \left(t + \frac{l}{u_{+}}\ln z\right)} + C_{2}e^{-i\omega\left(t - \frac{l}{u_{+}}\ln z\right)}. \end{equation} Then, the scalar field $\phi$ is purely ingoing at the horizon for $C_{2} = 0$, and therefore the radial solution is \begin{equation} \varphi(z) = C_{1}z^{\alpha}(1-z)^{\beta}F_{1}(a,b,c;z). \end{equation} \begin{figure}[h] \begin{center} \includegraphics[width=0.6\textwidth]{Omega.eps} \end{center} \caption{In this plot we depict the behaviour of the QNMs expressed in Eq. (\ref{eq:of}), with some paremeters values, $l=1$ and $u_{+} = \sqrt{5}$. We can see that the BH becomes unstable for large values of the mass $m$.} \label{fig:Pot2} \end{figure} In order to implement boundary conditions at infinity ($z=1$), we use the linear transformation $z \rightarrow 1-z$ for the hypergeometric function and we obtain \begin{eqnarray} \varphi(z) &=& C_{1}z^{\alpha}(1-z)^{\beta}\frac{\Gamma(c)\Gamma(c-a-b)}{\Gamma(c-a)\Gamma(c-b)}F_{1}(a,b;a+b-c+1;1-z)\nonumber \\ &+& C_{1}z^{\alpha}(1-z)^{c-a-b+\beta}\frac{\Gamma(c)\Gamma(a+b-c)}{\Gamma(a)\Gamma(b)}F_{1}(c-a,c-b;c-a-b+1;1-z). \end{eqnarray} Using the condition of the flux \begin{eqnarray} \mathcal{F} &\sim& \varphi^{*}(z)\partial_{z}\varphi(z)-\varphi(z)\partial_{z}\varphi^{*}(z),\nonumber \\ &\sim& -2\,i{C}_{1}^{2}\frac{l\omega}{u_{+}}\, \left( \left( 1-z \right) ^{1/2\left(1-\,\sqrt {1+4\,{m}^{2}{l}^{2}}\right)}{ \Gamma_{1}} ^{2} + 2\, \Gamma_{1}\, \Gamma_{2}\sqrt{1-z}+ \left( 1-z \right) ^{1/2\left(1+\,\sqrt {1 +4\,{m}^{2}{l}^{2}}\right)}{\Gamma_{2}}^{2} \right),\nonumber \\ &\sim& -2\,i{C}_{1}^{2}\frac{l\omega}{u_{+}}\, \left( \left( 1-z \right) ^{2\beta}{ \Gamma_{1}} ^{2} + 2\, \Gamma_{1}\, \Gamma_{2}\sqrt{1-z}+ \left( 1-z \right) ^{1-2\beta}{\Gamma_{2}}^{2} \right), \label{eq:flux} \end{eqnarray} where \begin{eqnarray} \Gamma_{1} &=& \frac{\Gamma(c)\Gamma(c-a-b)}{\Gamma(c-a)\Gamma(c-b)},\\ \Gamma_{2} &=& \frac{\Gamma(c)\Gamma(a+b-c)}{\Gamma(a)\Gamma(b)}, \end{eqnarray} then, the flux (\ref{eq:flux}) has a leading term $(1-z)^{1-2\beta}$ and vanishes at infinity only if we impose that \begin{equation} a = -n \ \ \ \ \ \mbox{or} \ \ \ \ \ b = -n, \end{equation} where $n = 0, 1, 2, 3...$. These conditions lead to the determination of the quasinormal modes as follows \begin{equation} \omega = -i\frac{u_{+}}{4l}\left[4n+3-\sqrt{1+4m^{2}l^{2}}\right]. \label{eq:of} \end{equation} Our results are represented in Fig. (\ref{fig:Pot2}), where it is possible to see that, for a scalar field with large mass, the BH becomes unstable, while for lower values of the mass, or $m=0$, this kind of black hole is stable. \section{Final remarks} This article was devoted to studying the response of two $1+1$ BH under scalar perturbations. We focused on the BH solutions found in Ref. \cite{HL} in the context of HL gravity. The BH studied in the present paper also correspond to solutions arising from standard GR plus the dilaton field; therefore, the physical properties of these BH can be used in different contexts. We noted that in studying the QNM oscillations of the metric (\ref{eq:lowest}) with massive scalar field perturbations it is necessary to look at the solution in terms of the confluent hypergeometric, or Kummer, functions, and, as a result, found two different cases, in one case we have absent QNMs under scalar perturbations and in the second case we have a discrete spectrum. These results are different from those obtained in Ref. \cite{Lopez}, where the QNMs are a continuous spectrum. Also, we computed the frequencies of the massless scalar field as sources of perturbations, and again obtained a discrete spectrum. From these results, we conclude that this BH is stable under massive and massless scalar perturbations. On the other hand, for spacetime in which the cosmological constant does not vanish, we found, in addition to the exact quasinormal frequencies, that it is possible to see that when the mass of the scalar field is large, the geometry becomes unstable. Finally, we would like to note that the frequencies found in this article are purely imaginary, and as such represent pure damping behaviour. \section*{Acknowledgments} The authors would like to thank Samuel Lepe and Olivera Miskovic for useful comments. MC was supported by PUCV through {\it Proyecto DI Postdoctorado} 2015. MG-E acknowledges support from a PUCV doctoral scholarship.
{ "redpajama_set_name": "RedPajamaArXiv" }
5,910
\section{Introduction} Let $T$ be an infinite locally finite tree with the root $\emptyset$. By $layer_n$ we denote the set of all elements of $T$, $n$-distant from $\emptyset$ (i.e. elements of level $n$). We always assume that there is an infinite sequence $\bar{m}=m_1 ,m_2 ,...$ of natural numbers such that $T$ is of the form $T_{\bar{m}}$, the tree where all elements of $layer_n$ are of valency $m_{n+1}+1$ for $n>0$. Let $T_n = \bigcup _{i\le n} layer_i$ and $T_{\langle n \rangle}$ be the tree of the isomorphism type of $T_w$, the tree hanging from $w\in layer_n$. \parskip0pt The group $Iso(T)$ of all isometries of $T$ fixing $\emptyset$ is a profinite group with respect to canonical homomorphisms $\pi_{n}: Iso(T)\rightarrow Iso(T_{n})$. In fact the following examples represent all the main fields in the subject. Consider the group $GL_d (\mathbb{Z}_p )$ and its action on $D=(\mathbb{Z}_p )^d$. Let $T_{ad}$ be the tree of all cosets in $(\mathbb{Z}_p )^d$ with respect to all subgroups of the form $p^k (\mathbb{Z}_p )^d$. If $G$ is a closed subgroup of $GL_{d}(\mathbb{Z}_p )$, then $D$ becomes a continuous $G \mathbb{Z}_p$-module. Thus $G$ acts by isometries on $T_{ad}$ and fixes the root $D$. A very close example is the action of $PGL_2 (\mathbb{Z}_p )$ on the tree of lattices in $\mathbb{Q}_p \times \mathbb{Q}_p$ (see \cite{Serre}). \parskip0pt Completely different groups are provided by Grigorchuk-type constructions. In \cite{Grig} {\em branch groups} are introduced, which in some sense can be considered as an axiomatization of well-known examples (their profinite completions) of Burnside groups found by Grigorchuk. Are there any global (=model-theoretic) properties giving interesting dividing lines in these examples ? In this paper we concentrate on characterizations concerning diversity of elements occurring in the group. Our basic property can be described as follows. Consider the relation $\sim$ of conjugacy in the group $Iso(T)$ of all isometries of $T$. It is easy to see that every conjugacy class is a closed subset of $Iso(T)$. Let $G$ be a closed subgroup of $Iso(T)$. We say that $G$ has {\em a small (resp. countable) number of isometry types} if $G$ meets only $<2^{\aleph_{0}}$ (countably many) conjugacy classes of $Iso(T)$. Since $G/\sim$ is analytic, smallness is equivalent to countability. \parskip0pt We study profinite groups which can be embedded into $Iso(T)$ as closed subgroups with a small number of isometry types. It seems to us that this class natuarally arises in many situations. One of them has model-theoretic flavor and is worth mentioning here. For closed $G<Iso(T)$ consider the corresponding inverse system $G_0 \leftarrow G_1 \leftarrow ... \leftarrow G_i \leftarrow ...$ with $G_i\le Iso(T_i )$. Any automorphism of $G$ fixing all $G_i$ (as sets) is called a {\em profinite automorphism} of $G$. By $Aut^* (G)$ we denote the group of all profinite automomorphisms. \parskip0pt Assume that any profinite automorphism of $G$ is induced by an isometry of $T$ as an inner automorphism of $Iso(T)$ (by Proposition 8.1 from \cite{LN} this happens if for any $w \in T$ the point-wise stabilizer of $T\setminus T_{w}$ is level-transitive on $T_{w}$). If the {\em number of $Aut^{*}(G)$-orbits on $G$ is at most countable} then $G$ has a small number of isometry types. The former condition has been introduced by Newelski in \cite{N} as a counterpart of small theories from model theory. The main (still open) conjecture in these respects states that profinite groups with a small number of $Aut^{*}(G)$-orbits have open abelian subgroups. Some partial confirmation has been obtained by F.Wagner in \cite{Wagner}. \parskip0pt In fact Newelski's conjecture has become the first motivation for our interests to isometry groups with a small number of isometry types. Notice that when $G$ is an inverse limit of a system of finite groups $G_0 \leftarrow G_1 \leftarrow ...$ $\leftarrow G_i \leftarrow ...$ with kernels $N_i$ of the corresponding canonical homomorphisms $G\rightarrow G_i$, then $G$ acts by left multiplication on the tree $T_C (G)$ of left cosets of all $N_i$, $i\in\omega$ (any edge of the tree is defined by a pair of the form $gN_{i+1} \subset gN_{i}$). In this case any profinite automorphism of $G$ (which stabilises all $N_i$) becomes an isometry of $T_C (G)$. Thus if $G$ has a small number of $Aut^{*}(G)$-orbits, then $G$ has a small number of isometry types with respect to this tree action. It has become very surprising for us that the smallness condition above has very strong abstract consequences concerning the structure of $G$. In patricular it turns out that we can say quite a lot about $G$ when $G$ is nilpotent. We also study the related property of definability of the action in $\mathbb{Q}_p$ and show that some natural zeta functions arising in these respects are rational. We compare our approach with the one suggested by Z.Charzidakis in \cite{zoe}. We will see that $\omega$-categoricity appearing in \cite{zoe} is very far from smallness of the number of isometry types (at least in the case of examples above). \subsection{Preliminaries} Let $T=T_{\bar{m}}$, where $\bar{m}$ is an infinite sequence of positive natural numbers. For every $n$ we define an alphabet $A_n =\{ a^n_1 ,...,a^n_{m_n}\}$ and consider $T$ as the union of all finite products $A_1 \times ...\times A_n$, $n\in\omega$, interpreted as paths of $T$ started at $\emptyset$: here we identify $layer_n$ with $A_1 \times ...\times A_{n}$ If $v$ is a vertex of $T$ and $g\in Iso(T)$, then by $g(v)$ we usually denote the result of $g$ applied to $v$. On the other hand we sometimes use the form $v^g$, because it becomes more convenient when one considers iterated wreath products. Then as in \cite{Grig} any $g\in Iso(T)$ is regarded as a labelling $\{ \gamma (v)\}_{v\in T}$ of the vertices by elements of the symmetric group which acts on the edges below the vertex. The effect of $g\in Iso(T)$ on a vertex $u$ corresponding to $p_u =(a_1 ,...,a_n )$, a path from $\emptyset$ with $a_i \in A_i$, is given by the formula: $$ p^g_u = ( a^{\gamma (\emptyset )}_1 ,a^{\gamma (a_1 )}_2 , ..., a^{\gamma (a_1 ...a_{n-1})}_n ), $$ where by $a^{\gamma (v)}$ we denote the result of the permutation $\gamma (v)$ applied to the edge $a$. The rule for the composing automorphisms is given by the following formulas (see \cite{Grig}): \begin{quote} Let $h=\{\delta (v)\}_{v\in T}$, $f=\{\sigma (v)\}_{v\in T}$ and $g=\{ \gamma (v) \}_{v\in T}$. \\ If $h=fg$, then $\delta (v) = \sigma (v)\gamma (v^f )$. \\ If $f=g^{-1}$, then $\sigma (v) = (\gamma (v^{g^{-1}}))^{-1}$. \end{quote} Now assume that for every $n$ we have chosen a permutation group $P_i \le Sym (A_i )$. Then the subgroup of all labellings $\{ \gamma (v)\}_{v\in T}$ with $\gamma (v)\in P_{i+1}$ for $v \in layer_i$ is called {\em the iterated wreath product} of $(P_i ,A_i )$, $i\in \{ 1,2,...\}$ and is denoted by $\wr_{\omega} (P_i ,A_i )$. With any $v\in T$ (an end $\mathbf{e}\in \partial T$ resp.) and a group $G\le Iso(T)$ we associate the stabilizer $St_G (v)$ ($St_G ({\bf e})$ resp.). The level stabilizer is defined by $St_G (n) = \bigcap \{ St_G (v): |p_v |=n\}$. It is clear that $St_G (n)= Ker \pi_n$, where $\pi_n :Iso (T) \rightarrow Iso(T_n )$ is defined as above. On the other hand $St_{G} (n)$ is naturally identified with a subgroup of $\prod \{ Iso (T_v ): v\in layer_n\}$. The {\em rigit stabilizer} of $v$ is defined to be $Rs_G (v) = \{ g\in Iso(T): g$ fixes $T\setminus T_v$ pointwise$\}$. The rigit stabilizer of the $n$-th level $Rs_G (n)$ is defined to be the group generated by $\bigcup \{ Rs_G (v): v\in layer_n \}$. It is clear that $Rs_G (n)= \oplus \{ Rs_G (v): v\in layer_n \}$. We now define (weakly) branch groups. They have been introduced by R.Grigorchuk in \cite{Grig}. They are one of the main objects in the area. \begin{def} \label{branch} Let $T$ be a locally finite tree, $T=T_{\bar{m}}$. A closed subgroup $G<Iso(T)$ is called a branch group with respect to $T$ if \\ (i) $G$ acts transitively on each layer of the tree, and \\ (ii) for each $n\ge 1$ there exists closed $L_n \le Iso(T_{\langle n\rangle })$ such that the direct product $H_n = L^{layer_n}_n$ is normal and of finite index in $G$ (each factor of the product acts on the subtree hanging from the corresponding vertex of the $n$-th layer). \end{def} Condition (ii) of this definition is equivalent to $|G:Rs_G (n)|<\infty$ for every $n\in\omega$ \cite{Grig}. If we replace (ii) by the condition that $Rs(n)$ is always infinite, then we obtain a definition of {\em weakly branch groups}. Let $g \in Iso(T)$. By $T_g$ we denote the {\em orbit tree} of $g$, i.e. the set of all $g$-cycles $T/\langle g\rangle$ with respect top the natural ajacency. The orbit tree is labelled by natural numbers as follows: to each orbit $\langle g\rangle v$ we assosiate the size $|\langle g\rangle v|$. The following theorem from \cite{GNS} will be one of the main tools of the paper: \begin{quote} isometries $g$ and $h$ are conjugated in $Iso(T)$ if and only if the corresponding orbit trees $T_g$ and $T_h$ are isomorphic by a labelling preserving isomorphism. \end{quote} It is worth noting that the class of groups having a faithful representation with a small number of isometry types is closed under direct sums: if $G_{1}$ and $G_{2}$ have appropriate realisations on trees of valencies $N_{1}$ and $N_{2}$, then $G_{1}\oplus G_{2}$ can be realised on a tree of valency $N_{1}+N_{2}$ so that the number of isometry types is small. \subsection{Main results} In Section 2 we study profinite groups which can be realized as closed subgroups of $Iso(T)$ which have a small number of isometry types. We show that $\mathbb{Z}_p$ and the direct power $H^{\omega}$ of a finite group $H$ have such representations in $Iso(T)$ for appropriate $T$. We conjecture that nilpotent profinite groups $G$ with this property are very close to these examples. In Section 2.1 we slightly confirm this showing that only finitely many primes can divide $|G|$ and the periodic part of $G$ is of finite exponent. We show that the smallness condition is so restrictive itself that for example weakly branch groups do not have a small number of isometry types. We will also see here that the tree of cosets of kernels of canonical projections naturally arises in these respects, and the smallness of the number of isometry types implies some interesting properties of this tree (Proposition 2.2). In Section 2.2 we study pronilpotent groups $G$ which have a small number of isometry types for all faithful continuous representations in isometry groups of locally finite trees. In the case when the group $G$ is nilpotent we show that such a group must be of finite rank. This condition becomes sufficient when the group is abelian (see Theorem \ref{Z_p} ). In Section 3 we study another condition of smallness. Let $T$ be a locally finite rooted tree. For every natural $n$ we introduce the following equivalence relation on $Iso(T)$: for $g,g'\in Iso(T)$ we define $g \equiv_{n} g'$ if the restrictions of $g$ and $g'$ to $T_n$ are conjugated in $Iso(T_n )$. \parskip0pt For a subgroup $G\le Iso(T)$ we denote by $c^{G}_n$ the number of all $\equiv_{n}$-classes meeting $G$. In Section 3 we will study the question when the zeta function $\Sigma_{n\ge 0} c^{G}_{n} t^n$ is rational. In general one cannot expect, that this happens for a typical $G<Iso(T)$ For example Grigorchuk shows in \cite{Grig2} that the Hilbert-Poincar\'{e} series of some of his groups are not rational. But if we smooth up the situation (under some model theoretic assumptions) we can have very reasonable variants of the question. Moreover examples of the introduction with $GL_d (\mathbb{Z}_p )$ involved become under our methods. The idea looks as follows. Let $\Gamma$ be a closed subgroup of $Iso(T)$. We say that $g,g'\in Iso(T)$ have the same $\Gamma$-type if the exists $h\in \Gamma$ such that $g^{h}=g'$. It makes sense to study $\Gamma$-types of $Iso(T)$ assuming that $\Gamma$ is interpetable in some natural model-theoretic objects (not only in the case $\Gamma =Iso(T)$). In this situation we say that $G\le Iso(T)$ has a small number of $\Gamma$-types if $G$ meets $\le \aleph_0$ $\Gamma$-orbits of the conjugacy action of $\Gamma$ on $Iso(T)$. \parskip0pt For every natural $n$ we introduce the following equivalence relation on $Iso(T)$: for $g,g'\in Iso(T)$ we define $g \equiv^{\Gamma}_{n} g'$ if the restrictions of $g$ and $g'$ to $T_n$ are conjugated by an element of the projection of $\Gamma$ to $T_n$. Now for the subgroup $G\le Iso(T)$ we denote by $c^{G}_n$ the number of all $\equiv^{\Gamma}_{n}$-classes meeting $G$. We will really study the question when the zeta function $\Sigma_{n\ge 0} c^{G}_{n} t^n$ is rational for some $\Gamma$ interpretable in the field $\mathbb{Q}_p$. In Section 3 we discuss a number of examples. In particular we study a point-stabilizer of the action of $SL_2 (\mathbb{Q}_p )$ on the tree of lattices \cite{Serre}. In fact our material here is based on a nice paper of E.Hrushovski and B.Martin \cite{hrma}. \parskip0pt We must mention that in the eighties Z.Chatzidakis introduced some language for profinite groups and showed that model-theoretic investigations of the corresponding structures (denoted by $S(G)$) give very strong consequences in purely algebraic questions about profinite groups and field extensions. Moreover structures $S(G)$ arising in this approach, have very nice model-theoretic properties. In some typical situations they are $\omega$-categorical, i.e. small. It should be natural to start model-theoretic investigations of closed subgroups of $Iso(T)$ with these aspects. We fill this gap in Section 4. The answer which we obtain here, shows that the majority of our examples represent the simplest case of this classification. In fact we show that the dividing line of $\omega$-categoricity of structures $S(G)$ is situated inside the class of branch groups. We give here some interesting examples. In particular the profinite completion of the famous example of Grigorchuk is $\omega$-categorical in this sense. These results show why the condition of smallness which comes from the Newelski's approach is more appropriate for our examples. \section{Groups with a small number of isometry types} \subsection{Groups having representations with a small number of isometry types} We now consider groups which can be realized as closed subgroups of $Iso(T)$ with a small number of isometry types. Let $T$ be a locally finite tree and $G$ be a closed subgroup of $Iso(T)$. By $\pi_n$ we denote the canonical homomorphism from $G$ to the group $G_n$ of all restrictions of $G$ to $T_n$, $n\in \omega$. \begin{lem}\label{trans} (1) If there is some $n\in\omega$ such that $ker(\pi_n )\setminus \{ 1\}$ consists of isometries of the same type, then the group $G$ has an open subgroup which is a locally finite pro-$p$-group for some prime $p$. (2) If there is $n\in\omega$ such that the set of non-diagonal pairs of $ker(\pi_n )$ is contained in the same orbit of $Iso(T)$, then $ker(\pi_n)$ is isomorphic to the power $(\mathbb{Z}(p))^{\omega}$. \end{lem} {\em Proof.} Under the assumptions of the lemma we easily see that for every $l\ge n$ all non-trivial elements of the finite group $ker(\pi_n )/ker(\pi_l )$ are of the same order. Thus all of them are of order $p$, where $p$ is prime. This shows that $ker(\pi_n )$ is a pro-$p$-group of exponent $p$. By a theorem of Zelmanov from \cite{zelmanov} it is locally finite. \parskip0pt The second statement follows from the fact that every $ker(\pi_n )/ker(\pi_l )$ is a nilpotent group and has non-trivial centre. The assumption says that it must be isomorphic to a finite power of $\mathbb{Z}(p)$. The rest is obvious. $\Box$ \begin{prop} \label{proptrans} Let $T$ be a locally finite tree and $G$ be a closed subgroup of $Iso(T)$ with a small number of isometry types. Then the following statements hold. (1) For every $m\in\omega$ and every $h\in G$ there are some $n\in\omega$ and $g\in G\setminus ker(\pi_n )$ such that $g\cdot ker(\pi_n)\subseteq h\cdot ker(\pi_m)$ and $g\cdot ker(\pi_n)$ consists of isometries of the same type. (2) If $g\in G\setminus ker(\pi_n )$, $n\in\omega$, and the set of all non-diagonal pairs of $g\cdot ker(\pi_n )$ is contained in the same orbit of $Iso(T)$, then $G$ has an open subgroup which is a locally finite pro-$p$-group for some prime $p$. If moreover for every $g_1, g_2 ,g'_1 ,g'_2 \in g\cdot ker(\pi_n )$ with $g_1 \not= g_2$, $g'_1 \not= g'_2$ one of the pair $(g_1 ,g_2 )$ and $(g'_1 ,g'_2 )$ can be mapped to another by an isometry fixing some element of $g\cdot ker(\pi_n )$ then $ker(\pi_n )$ is isomorphic to the power $(\mathbb{Z}(p))^{\omega}$, where $p$ is prime. \end{prop} {\em Proof.} (1) By the Bair category theorem, $Iso(T)$-conjugacy classes which are open in $G$ form a dense subset of $G$. By the definition of the profinite topology we see the existence of $g\cdot ker(\pi_n )$ as in the formulation. \parskip0pt (2) If the set of all non-diagonal pairs of $g\cdot ker(\pi_n )$ is contained in the same orbit of $Iso(T)$, then each element of $ker(\pi_n )$ can be mapped to any other element of $ker(\pi_n )$ by an isometry. By Lemma \ref{trans}, $G$ has an open subgroup which is a locally finite pro-$p$-group for some prime $p$. \parskip0pt Assuming the last condition of the formulation consider any $ker(\pi_n )/ker(\pi_l )$ with $l\ge n$. As we already know this is a group of exponent $p$. Let $h_1 \cdot ker(\pi_l)$ belong to the centre of $ker(\pi_n )/ker(\pi_l )$, and $h_2 \not= h_3 \in ker(\pi_n )$. Let an isometry $\mu$ map $gh_1$ to $gh_2$ and fix the pair $g, gh_3$ (we may assume so). Then we have modulo $ker(\pi_l )$: $gh_1 \cdot gh_3 = ggh_3 (h_1 )^{g} = g(h_3 )^{g^{-1}}\cdot g(h_1 )^{g}$ and $gg(h_2 )^{g}h_3 = gh_2 gh_3 =\mu (gh_1 )\mu (gh_3)=\mu (gh_1\cdot gh_3)$ $=g(h_3 )^{g^{-1}}g(h_2 )^{g}= ggh_3(h_2 )^{g}$. Thus $h_2 ker(\pi_l )$ belongs to the centre of $ker(\pi_n )/ker(\pi_l )$. The rest is obvious. $\Box$ \bigskip \begin{cor} \label{8} Let $T$ be a locally finite tree and $G$ be a closed subgroup of $Iso(T)$ with a small number of isometry types. Then there is no infinite sequence of vertices $v_{1},v_{2},...$ such that $T_{v_{i}}\cap T_{v_{j}}=\emptyset$ for $i\not=j$, and for every $i$ the point-wise stabilizer of $T \setminus T_{v_{i}}$ in $G$ has a non-trivial action on $T_{v_{i}}$. In particular a weakly branch group does not have a small number of isometry types. \end{cor} {\em Proof.} Assume the contrary. Since $T$ is locally finite, we easily see that $G$ does not satisfy Proposition \ref{proptrans} (1). The rest is obvious. $\Box$ \bigskip {\bf Example.} One can build a tree action of the wreath product $\mathbb{Z}(p)wr\mathbb{Z}_p$ with a large number of isometry types. The following example concerns the case of a tree of valency three. \bigskip {\bf Example.} Let $T=\{ 0,1,\}^{<{\bf N}}$ be a tree with the root $\emptyset$ (${\bf N}= \omega\setminus \{ 0\}$; the elements of the tree are presented by finite sequences). Let $g\in Iso(T)$ be defined as follows: $g(0) = 1$, $g(1) = 0$. We define $g$ so that at every level $g$ has a unique $q$-cycle: If $v$ consists of $n$ $0$-s (the beginning of the cycle) then let $g^{i}(v0)=g^{i}(v)0$, $i < 2^{n}$, and $g^{i}(v0)=g^{i}(v)1$, $2^{n}\le i < 2^{n+1}$. It is clear that $g$ realises the {\em adding machine}: the result of the application of $g$ to a branch $(a_{1},a_{2},...)$ is the 2-adic sum $(a_{1},a_{2},...) + (1,0,...)$. Let $G = cl(\langle g\rangle )$; then $G\cong {\bf Z}_{2}$, the additive group of 2-adic integers. \parskip0pt If $g'\in G\cap (Ker\pi_{n}\setminus Ker\pi_{n+1})$ then for every level $m>n$ there exists exactly $2^{n}$ $g'$-cycles of length $2^{m-n}$. This follows from the fact that $g'$ can be interpreted as a 2-adic number $c_{n+1}2^{n+1}+c_{n+2}2^{n+2}+ ...$ with $c_{n+1} =1$. (Thus $c_{n+1}2^{n+i+1}+c_{n+2}2^{n+i+2}+ ...$ corresponds to $(g')^{2^i}$.) \parskip0pt By the description of conjugacy in $Iso(T)$ given in \cite{GNS} we see that $g_{1}$ and $g_{2}$ from $G$ have the same isometry type if and only if there is a number $n\in \omega$ such that $g_{1},g_{2} \in Ker\pi_{n}\setminus Ker\pi_{n+1}$. This obviously implies that the group $G$ has a small number of isometry types. $\Box$ \bigskip \begin{thm} \label{nilp} Let $T$ be a locally finite rooted tree. (2) If a pronilpotent group $G$ has a faithful representation in $Iso(T)$ with a small number of isometry types, then the number of primes dividing $|G|$ is finite. \parskip0pt (2) If a nilpotent group $G$ has a faithful representation in $Iso(T)$ with a small number of isometry types, then the periodic part of $G$ is of finite exponent. \end{thm} {\em Proof.} (1) Let $G$ be the inverse limit of the sequence $G_{1} \leftarrow G_{2} \leftarrow G_{3} \leftarrow...$ corresponding to an appropriate representation on a tree. Let $P$ be the set of primes dividing $|G|$. Since $G$ is pronilpotent, for every $X\subseteq P$ there exists $g_{X}\in G$ such that a prime number $p$ divides $|g_{X}|$ if and only if $p\in X$. If $P$ is infinite there is continuum many elements of $G$ having pairwise distinct orders in $Iso(T)$. Any two such elements cannot be conjugated in $Iso(T)$. \parskip0pt (2) Assume the contrary. Taking a subgroup if necessary we may assume that $G$ is nilpotent and periodic. Note that if each factor of the upper central series is of finite exponent then $G$ is of finite exponent. Thus we can find a characteristic subgroup $G^{-} <G$ of finite exponent such that the center $C(G/G^{-} )$ is infinite and is not of bounded exponent. Let $\pi_n$ be the standard projection $G\rightarrow G_n$ corresponding to the restriction to $T_n$. We now build a sequence of natural numbers $n_1 < n_2 <...<n_i ,...$ and a sequence $g_1 ,..., g_i ,... \in G\setminus G^{-}$ with $g_i G^{-} \in C(G/G^{-})$ such that for all $i\in\omega \setminus \{ 0\}$ the following properties hold: \begin{quote} (a) $\prod_{j\le i}|g_j | < |g_{i+1} |$ ; \\ (b) $|\pi_{n_i} (g_i )| = |g_i |$ ; \\ (c) $g_{i+1} \in Ker (\pi_{n_i})$. \end{quote} This can be done by induction as follows. Using the fact that $C(G/G^{-})$ is infinite and is not of finite exponent we choose $g_{i+1}$ such that conditions (a) and (c) are satisfied. Since $|g_{i+1}|<\infty$, we can find $n_{i+1}$ such that (b) is satisfied. \parskip0pt Now for every subset $Y\subset \omega$ take $g_Y := \prod \{ g_i : i\in Y\}$. It is clear that $g_Y$ is the limit of all products $\prod \{ g_j : j\le i, j\in Y\}$. If $Y\not= Y'$ and $i$ is the least nutural number from $Y\Delta Y'$, then the restrictions of $g_Y$ and $g_{Y'}$ to $T_{n_i}$ have distinct orders and cannot be conjugated by an isometry. This contradicts the assumption that $G$ has a small number of isometry types. $\Box$ \bigskip Note that it is not very difficult to realize the situation of Theorem \ref{nilp}(2). The following observation gives a wide class of examples of representations with small numbers of isometry types. \begin{prop} \label{product} Let $H$ be a finite group. Then the product $G= H^{\aleph_{0}}$ has a faithful action on a rooted tree with countably many isometry types. \end{prop} {\em Proof.} Consider the inverse system of projections $$ H \leftarrow H\oplus H \leftarrow H\oplus H\oplus H \leftarrow...$$ where the groups consist of sequences $$ H=\{ h_{1},h_{2},...,h_{n}\} \mbox{ , } H\oplus H=\{ h_{1}h_{1},h_{1}h_{2},...,h_{n-1}h_{n},h_{n}h_{n}\} $$ $$,...,H\oplus ...\oplus H= \{h_{1}...h_{1},...,h_{n}...h_{n}\},... \mbox{ . } $$ The same sequences form an $n$-ary tree where the groups act by multiplication: $h_{i}w\cdot h_{l}=h_{j}w$ for $h_{i}\cdot h_{l}= h_{j}$, $h_{i}h_{m}w\cdot h_{k}h_{l}=h_{j}h_{n}w$ for $(h_{i},h_{m})\cdot (h_{k},h_{l})= (h_{j},h_{n})$,... . It is easy to see that if $g\in G$ and $|g|=s$ then there are sequences $n_{1},n_{2},...,n_{t}$ and $s_{1},...,s_{t}=s$ such that $|\pi_{n_{1}-1}(g)|=1$, $|\pi_{n_{1}}(g)|=s_{1}=|\pi_{n_{2}-1}(g)|$, $|\pi_{n_{2}}(g)|=s_{2}=|\pi_{n_{3}-1}(g)|,...$ and $|\pi_{n_{t}}(g)|=s_{t}= |\pi_{m}(g)|$ for all $m\ge n_{t}$. In that case for every level $n_{j}\le m< n_{j+1}$ all $g$-cycles are of length $s_{j}$ and for every level $m\ge n_{t}$ all $g$-cycles are of length $s$. By the description of conjugacy in $Iso(T)$ given in [GNS] we see that $g_{1}$ and $g_{2}$ from $G$ have the same isometry type if and only if the corresponding sequences for them are the same. This obviously implies that the group $G$ has a countable number of isometry types. $\Box$ \bigskip We now see that any group of the form $$ \mathbb{Z}(n_{1})^{\omega}\oplus...\oplus \mathbb{Z}(n_{k})^{\omega} \oplus \mathbb{Z}_{p_{1}}^{k_1}\oplus... \oplus \mathbb{Z}_{p_{l}}^{k_l} $$ has a faithful representation with a small number of isometry types. {\bf Conjecture.} An abelian profinite group $A$ has a faithful representation with a small number of isometry types if and only if $$ A\cong \mathbb{Z}(n_{1})^{\omega}\oplus...\oplus \mathbb{Z}(n_{k})^{\omega} \oplus \mathbb{Z}_{p_{1}}^{k_1}\oplus... \oplus \mathbb{Z}_{p_{l}}^{k_l}. $$ \bigskip Proposition \ref{proptrans} motivates the question of description of subgroups $G$ of $Iso (T)$ having a small number of isometry types in $G\times G$. We have a small remark concerning this case. \begin{lem} If $G<Iso(T)$ has a small number of isometry types of $G\times G$, then $G$ does not have an element of infinite order. \end{lem} {\em Proof.} Indeed, if $|g|=\infty$, then for any $z_1 ,z_2 \in \mathbb{Z}_p$ with $z_1 \not=z_2$ we have $g^{z_1} \not=g^{z_2}$. If an isometry $h\in Iso(T)$ centalizers $g$, then it must centralize any $g^{z}$, $z\in\mathbb{Z}_p$. Thus all paires $(g,g^{z})$, $z\in \mathbb{Z}_p$, have pairwise distinct isometry types. $\Box$ \bigskip {\bf Remark.} The group $A= \prod_{\omega}\langle g_{i}\rangle$ with $\langle g_{i}\rangle\cong \mathbb{Z}(p)$, is an example of a small group in the sense of Newelski (this was observed by G.Bezulski in his master thesis). It is worth noting that the action of $A$ on a binary tree as in Proposition \ref{product}, has a small number of isometry types. We thus consider the inverse system of projections $$ \mathbb{Z}(2) \leftarrow \mathbb{Z}(2)\oplus \mathbb{Z}(2) \leftarrow \mathbb{Z}(2)\oplus \mathbb{Z}(2)\oplus \mathbb{Z}(2) \leftarrow... $$ where the groups consist of sequences $0,1, 00,01,10,11, 000,...,111,...$ . The same sequences form a binary tree where the groups act as follows: $0w+0=0w$, $0w+1= 1w$, $00w+00=00w$, ..., $01w+11= 10w$, $11w+11= 00w$,... . It is easy to see that if $g'\in A\cap (Ker\pi_{n}\setminus Ker\pi_{n+1})$ then for every level $m>n$ there exists exactly $2^{m-n-1}$ $g'$-cycles of length $2$. By the description of conjugacy in $Iso(T)$ given in \cite{GNS} we see that $g_{1}$ and $g_{2}$ from $A$ have the same isometry type if and only if there is a number $n\in \omega$ such that $g_{1},g_{2} \in Ker\pi_{n}\setminus Ker\pi_{n+1}$. This obviously implies that the group $A$ has a small number of isometry types. \parskip0pt On the other hand by Lemma \ref{8} the group $A$ does not have a small number of isometry types under the following presentation in $T= p^{{\bf N}}$: for any $i$ the element $g_{i}$ stabilizers $T\setminus T_{1...10}$ and $(1...10jw)g_{i}= 1...10(j+1)w$). $\Box$ \bigskip \subsection{Isometry groups having a small number of isometry types for all isometry representations} \begin{lem} \label{lemnilp} (1) Let a profinite group $G$ and a sequence $g_i\in G$, $i\in\omega$, satisfy the condition that for every $i\in\omega$ there is a continuous epimorphism $\rho_i$ from $G$ to a finite group such that $\rho_i (g_i )\not= 1$, but all $g_j$ with $j\not= i$ belong to the kernel of $\rho_i$. Then there is a locally finite rooted tree $T$ such that $G$ has a faithful representation in $Iso (T)$ with continuum many isometry types. \parskip0pt (2) If $G$ is a pro-$p$-group as above, then there is a faithful representation of $G$ in $Iso (T)$ with continuum many isometry types, where $T$ is the rooted tree of valency $p+1$. \end{lem} {\em Proof.} (1) We define $T$ as a composition of two parts. To define the first one assume that $G$ can be presented as an inverse limit of a system $G_0\leftarrow G_1 \leftarrow ...\leftarrow G_i \leftarrow ...$ of finite groups. Let $N_i$, $i\in \omega$, be the kernels of the corresponding canonical homomorphisms $G\rightarrow G_i$. Then $G$ acts by left multiplication on the tree $T_C (G)$ of left cosets of all $N_i$, $i\in\omega$. \parskip0pt The second part $T_S (G)$ of the tree consists of all elements of $\bigcup\{ (\bigoplus^{j}_{i=0} \rho_i (G)) : j\in\omega \}$, where the ordering of the tree is defined by extension. We define an action of $G$ on this tree as follows. For every $g\in G$ and every vertex of the form $(1,...,1,\rho_{i-1}(h),\rho_{i}(h'),\rho_{i+1}(h''),...)$ with $h\not=1$, define the $g$-image of this vertex as $(1,...,1,\rho_{i-1}(h),\rho_{i}(gh'),\rho_{i+1}(h''),...)$. \parskip0pt Adding to the disjoint union $T_C (G)\cup T_S (G)$ an additional root $\emptyset$ together with edges to the roots of these trees (denoted by $\emptyset_C$ and $\emptyset_S$) we obtain the required tree $T$. Since the $G$-set $T_C (G)$ is equivariantly embedded into $T$ the latter is a faithful $G$-set. \parskip0pt For any subset $J=\{ j_1 ,j_2 ,...\}\subset \{ 1,2,...\}$ consider the sequence $g_{j_1}\cdot g_{j_2}\cdot ...\cdot g_{j_k}$, $k\in\omega$. Since $G$ is compact, there is the limit $g_J$ of this sequence. By the definition of the action of $G$ on $T_S (G)$ we easily see that for $J\not= K$ the elements $g_{J}$ and $g_{K}$ have distinct isometry types. (2) The proof of this statement is similar to the proof above. We modify it as follows. Replace $T_C (G)$ above by any faithful representation of $G$ on the rooted tree $T(p)$ of valency $p+1$ (see Proposition 2 of \cite{Grig}). It is well-known that any finite $p$-group can be embedded into the isometry group of a finite part of $T(p)$ (consisting of several levels). Thus every $\rho_i (G)$ can be presented as an isometry group of some $T(p)_{k_i}$ (consisting of $k_i$ levels). We now replace $T_S (G)$ above by the tree $T(p)$. We define an action of $G$ on this tree as follows. For every $g\in G$ and every vertex of the form $(0^{k_1},...,0^{k_{i-1}},1 ,t_1,t_2 ,...,t_{k_i} ,...)$ define the $g$-image of this vertex as $(0^{k_1},...,0^{k_{i-1}},1, \rho_{i}(g)t_1 ,\rho_{i}(g)t_2 ,...,\rho_{i}(g)t_{k_i},...)$ (we define the action of $g$ identically on other elements). The rest is obvious. $\Box$ \begin{thm} \label{nilp2} Let $G$ be a nilpotent profinite group. If all representations of $G$ in isometry groups of locally finite trees have small numbers of isometry types, then $G$ is a group of finite rank. \end{thm} {\em Proof.} Assume that the rank of $G$ is infinite. By Theorem \ref{nilp} (1) there is a prime number $p$ such that the Sylow $p$-subgroup of $G$ has infinite rank. Assume that $G$ is a pro-$p$-group. By composing trees as in the proof of Lemma \ref{lemnilp} we can reduce the situation to this case. \parskip0pt Take the upper central series $1=G_0 <G_1 <...<G_n =G$. It is clear that all $G_i$ are closed. Since $rk(G)=\infty$ we have some $i$ with infinite $rk(G_{i+1}/G_i )$. Find the least $i\le n$ such that all $rk(G_{i+j+1}/G_{i+j} )$ are finite. We denote this number by $m$. Thus $rk(G/G_m )$ is finite and the group $G_m /G_{m-1}$ is a $\mathbb{Z}_p$-module which does not have a finite generating set. \parskip0pt Let cosets $g'_1 G_m,...,g'_t G_m$ generate topologically $G/G_m$ and $K$ be the intersection of $G_m$ with the closure of the subgroup $\langle g'_1 ,...,g'_t\rangle$. Note that the closure of $\langle g'_1 ,...,g'_t \rangle G_{m-1}$ is a normal subgroup of $G$ such that the corresponding quotient is isomorphic to $G_m /KG_{m-1}$ ($=cl(\langle g'_1 ,...,g'_t\rangle )G_m /cl(\langle g'_1 ,...,g't\rangle )G_{m-1}$). Thus to prove the statement it suffices to build a sequence $g_1 ,g_2 ,..., g_i ,...\in G\setminus G_{m-1}$ representing pairwise distinct cosets of $G_m /KG_{m-1}$ which satisfy the conditions of Lemma \ref{lemnilp}(1) with respect to this group. \parskip0pt Assume that $g_1 ,g_2 ,...,g_l$ and the corresponding $rho_i$ are already defined. Since $G_m /KG_{m-1}$ is a $\mathbb{Z}_p$-module which does not have a finite generating set, there is an open normal subgroup $L$ of $G_m$ containing $KG_{m-1}$ such that $L/KG_{m-1} <(\bigcap^{l}_{i=1} ker(\rho_i ))$ $\bigcap^{l}_{i=1} ker(\rho_i )/L \not\le \langle g_1 ,...,g_l \rangle L$. Let $g_{l+1}$ represent an $L$-coset of $\bigcap^{l}_{i=1} ker(\rho_i )/L$ which does not belong to $\langle g_1 ,...,g_l \rangle L$ and let $\rho_{l+1}$ be the natural homomorphism onto $$ (\bigcap^{l}_{i=1} ker(\rho_i ))/(\bigcap^{l}_{i=1} ker(\rho_i )\cap \langle g_1 ,...,g_l \rangle L). $$ It is easy to see that this construction produces a required sequence. $\Box$ \bigskip \begin{thm} \label{Z_p} (1) Let $\Delta$ be a set of prime numbers. For each $p_i\in\Delta$ let $G_i$ be a pro-$p_i$-group such that all faithful representations of $G_i$ in isometry groups of locally finite rooted trees have a small number of isometry types. The set $\Delta$ is finite exactly when every faithful representation of $\oplus_{p_i\in\Delta}G_{i}$ in the isometry group of a locally finite rooted tree has a small number of isometry types. (2) An abelian profinite group $G$ has a small number of isometry types for all faithful representations of $G$ in isometry groups of locally finite rooted trees if and only if the set of prime numbers dividing $|G|$ is finite and $G$ is of finite rank. \end{thm} {\em Proof.} (1) Necessity of the statement follows from Theorem \ref{nilp}. Consider the case of a representation of $H \oplus G_s$, where $H$ is a pro-$\Delta$-group where $\Delta$ is finite and $p_s \not\in\Delta$. Assume that all representations of $H$ as closed subgroups of isometry groups of locally finite rooted trees have a small number of isometry types. We want to prove that so does the group $H \oplus G_s$. \parskip0pt Let $H \oplus G_s$ act on $T$ as a closed subgroup of $Iso(T)$. Let $g \in G_s$. Consider the tree of $g$-cycles $T/\langle g\rangle$ induced by $g$ on $T$. To each vertex $\langle g\rangle t$ of $T/\langle g \rangle$ we assign two numbers: $v(t)$, the valency of $t$, and $m(t) =|\langle g \rangle t|$. Since $G_s$ has a small number of isometry types, there is a small number of such trees corresponding to elements of $G_s$ (up to label preserving isomorphism). Note that $m(t)$ is a power of $p_s$. \parskip0pt Since $[H ,G_s ]=1$ the action of $H$ on $T$ induces an action of $H$ on $T/\langle g\rangle$. If $h\in H$ defines a $h$-cycle $\langle h \rangle t$ in $T$, then let $l_{h}(t )$ be the length of the corresponding $h$-cycle in $T/\langle g\rangle$. Since $p_s$ does not divide $|\langle h\rangle t|$, we see that $l_h (t)=|\langle h\rangle t|$. Moreover we also see that the length of the $g h$-cycle of $t$ is equal to $m(t) l_h (t)$. Now it is straightforward that the isometry type of $h$ on $T/\langle g \rangle$ determines the isometry type of $g h$ on $T$ (even the isometry type of the pair $(g ,h)$). Since the number of isometry types of elements of $H$ on $T/\langle g\rangle$ is less than continuum we easily obtain the statement of the proposition. (2) Let $\Delta$ be a set of prime numbers. Let $G= \prod_{p\in\Delta}\mathbb{Z}^{l_p}_{p}$ where all $l_p$ are finite. It is enough to prove that if $\Delta$ is finite then every faithful representation of $G$ in $Iso(T)$ has a small number of isometry types. \parskip0pt By part (1) it suffices to consider the case when $\Delta$ is a singleton. Let $l_p =l\ge 1$. Let $T$ be a tree as in the formulation and $\mathbb{Z}^{l}_{p}$ act as a closed subgroup of $Iso(T)$. By $\pi_n$ we denote the canonical homomorphism from $\mathbb{Z}^{l}_{p}$ to the group of all restrictions of $\mathbb{Z}^{l}_{p}$ to $T_n$, $n\in \omega$. Thus for every $g\in \mathbb{Z}^{l}_{p}$ every $\pi_n$ maps the group $g^{\mathbb{Z}_p}$ to a cyclic $p$-group. \parskip0pt We fix a tuple of free topological generators of $\mathbb{Z}^{l}_{p}$: $g_1 ,...,g_l$ (so that $\mathbb{Z}^{l}_{p}=g^{\mathbb{Z}_{p}}_{1}\times ...\times g^{\mathbb{Z}_{p}}_{l}$). To an element $g^{z_1}_1 \cdot ...\cdot g^{z_l}_{l}$ we associate the tuple $(v_p (z_1 ) ,...,v_p (z_l ))$ of the corresponding values with respect to the $p$-adic valuation. We claim that given an action of $\mathbb{Z}^{l}_{p}$ on $T$ {\em the tuple $(v_p (z_1 ) ,...,v_p (z_l ) )$ determines the size of the $g^{z_1}_1 \cdot ...\cdot g^{z_l}_{l}$-cycle of any $t\in T$.} In particular this tuple determines the isometry type of $g^{z_1}_1 \cdot ...\cdot g^{z_l}_{l}$ (thus the action has a small number of isometry types). We prove this by induction by $l$. \parskip0pt Let $l=1$. For any $t\in T_n$ the $g^{z_1}_1$-cycle of $t$ is determined by $\pi_n (g^{z_1}_{1})$. Thus to find $|g^{z_1}_1 t|$ we may assume that $z_1$ belongs to $\mathbb{Z}$. Let $v_p (z_1 ) = s$, i.e. $z_1 = p^{s}k$ with $(k,p)=1$. Since the $g^{k}_{1}$-cycle of $t$ coincides with the $g_1$-cycle of $t$, the length of the $g^{z_1}_1$-cycle of $t$ equals the first integer not less than $|\langle g_1 \rangle t|/ p^{s}$. The rest of this case is an obvious application of \cite{GNS}. \parskip0pt Now consider the case of an action of $H\times g^{\mathbb{Z}_p}_l$, where $H= g^{\mathbb{Z}_p}_1 \times ...\times g^{\mathbb{Z}_p}_{l-1}$. Assume that for any isometric action of $H$ on a locally finite rooted tree $T'$ and any $t'\in T$ the number $|\langle h \rangle t'|$, $h\in H$, is determined by the corresponding tuple of $p$-adic values of $h$. In particular the isometry type of $h$ is determined by this tuple of $p$-adic values. We want to prove these statement for the action on $T$ and any $g^{z_1}_1 \cdot ...\cdot g^{z_l}_l$. \parskip0pt As above we assume that $t\in T_n$ and $z_l =p^{s}k$ with $(k,p)=1$. Then $g_l$ and $g^{k}_l$ have the same cycles of $t$. Let $h= g^{z_1}_1 \cdot ...\cdot g^{z_{l-1}}_{l-1}$. Consider the tree of $h$-cycles $T/\langle h\rangle$ induced by $h$ on $T$. To each vertex $\langle h\rangle t'$ of $T/\langle h \rangle$ we assign two numbers: $v(t')$, the valency of $t'$, and $m(t') =|\langle h \rangle t'|$. Note that $m(t')$ is a power of $p$ and is determined by the corresponding tuple of $p$-adic values of $h$. \parskip0pt Since $[H ,g_l ]=1$ the action of $g_l$ on $T$ induces an action of $g_l$ on $T/\langle h\rangle$. Then $g_l$ and $g^{k}_l$ have the same cycles of $\langle h\rangle t$. \parskip0pt As a result we see that $|\langle g^{z_l}_l\rangle t|$ equals the first integer not less than $|\langle g_l\rangle t|/p^s$, and the length of the $g^{z_l}_l$-cycle of $\langle h \rangle t$ in $T/\langle h\rangle$ equals the first integer not less than $|\langle g_l\rangle (\langle h\rangle t)|/p^s$. Dividing $|\langle g^{z_l}_l \rangle t|$ by the length of the $g^{z_l}_l$-cycle of $\langle h \rangle t$ we find $|(\langle h\rangle t) \cap (\langle g^{z_l}_l \rangle t)|$. This number uniquely determines all equalities $h^i (t) = g^{jz_l}_l (t)$ for $i\le |\langle h\rangle t|$, $j\le |\langle g^{z_l}_l \rangle t|$. In particular it uniquely determines the length of the $hg^{z_l}_l$-cycle of $t$. $\Box$ \bigskip \begin{quote} {\bf Conjecture.} Let $G$ be a closed subgroup of $GL_d (\mathbb{Z}_p )$. Then $G$ has a small number of isometry types for all faithful actions on locally finite rooted trees. \end{quote} In the case of abelian groups the conjecture holds by Theorem \ref{Z_p} (2). By Lemma \ref{lemnilp} we see that groups of the form $H^{\omega}$ always have a representation with continuum of isometry types. On the other hand it is easy to see that this lemma cannot be applied to closed subgroups of $GL_d (\mathbb{Z}_p )$. Thus our conjecture looks reasonable. Another interesting question is to prove the converse of Theorem \ref{nilp2}. \section{Rooted groups interpretable in $\mathbb{Q}_p$} One of the main obstacles to deeper study of isometry groups of rooted trees is the fact that there is no interesting first-order structure where these groups can be interpreted. To remedy this situation one can choose a rich algebraic object and consider isometry groups interpretable in it. Then the general case can be somehow approximated by information obtained in this way. We now describe one of possible realizations of this idea. \parskip0pt Let $T$ be a locally finite rooted tree. Let $\Gamma$ be a closed subgroup of $Iso(T)$. We say that $g,g'\in Iso(T)$ have the same $\Gamma$-type if the exists $h\in \Gamma$ such that $g^{h}=g'$. It makes sense to study $\Gamma$-types of $Iso(T)$ assuming that $\Gamma$ is interpetable in some natural model-theoretic objects. In this situation we say that $G\le Iso(T)$ has a small number of $\Gamma$-types if $G$ meets $\le \aleph_0$ $\Gamma$-orbits of the conjugacy action of $\Gamma$ on $Iso(T)$. \parskip0pt In this section we concentrate on trees $T$ and groups $\Gamma \le Iso(T)$ which are interpretable in the theory of the $p$-adics $\mathbb{Q}_p$. This provides new tools and, moreover, gives a possibility of extention of the matter of the previous sections by some additional questions. \parskip0pt For every natural $n$ we introduce the following equivalence relation on $Iso(T)$: for $g,g'\in Iso(T)$ we define $g \equiv^{\Gamma}_{n} g'$ if the restrictions of $g$ and $g'$ to $T_n$ are conjugated by an element of the projection of $\Gamma$ to $T_n$. For a subgroup $G\le Iso(T)$ we denote by $c^{G}_n$ the number of all $\equiv^{\Gamma}_{n}$-classes meeting $G$. We will also study the question when the zeta function $\Sigma_{n\ge 0} c^{G}_{n} t^n$ is rational. Applying the main results of \cite{hrma} we give several positive examples. \bigskip We start with the group $GL_d (\mathbb{Z}_p )$ and the corresponding tree $T_{ad}$, i.e. the tree of cosets in $(\mathbb{Z}_p )^d$ with respect to all subgroups of the form $p^k (\mathbb{Z}_p )^d$. \parskip0pt Let $\Gamma$ be a closed subgroup of $GL_{d}(\mathbb{Z}_p )$. Then the group $D=(\mathbb{Z}_p )^d$ becomes a continuous $\Gamma \mathbb{Z}_p$-module. Thus $\Gamma$ acts by isometries on $T_{ad}$. We now give some examples which show that typical subgroups of $GL_d (\mathbb{Z}_p )$ have continuum $\Gamma$-types with respect to their standard action on $T_{ad}$. {\bf Example 1.} Let $\Gamma =GL_d (\mathbb{Z}_p )=G$ with $d>1$. Since $|\mathbb{Z}_p |=2^{\aleph_0}$, one can produce continuum many Jordan matrices over $\mathbb{Z}_p$. Assuming that $d=2$ and $G$ is the group $SD_2 (\mathbb{Z}_p )$ of all diagonal matrices over $\mathbb{Z}_p$ with determinant 1 we obtain an abelian group of isometries of the tree $T_{ad}$ having continuum $\Gamma$-types where $\Gamma =GL_d (\mathbb{Z}_p )$. On the other hand extending this idea we can prove the following statement. \begin{prop} \label{titsalt} Let $\Gamma =GL_d (\mathbb{Z}_p )$ and $G\le GL_d (\mathbb{Z}_p )$. If $G$ is a closed subgroup of $Iso(T_{ad})$ which does not have a soluble subgroup of finite index, then $G$ meets $2^{\aleph_0}$ $\Gamma$-types. \end{prop} {\em Proof.} By Tits alternative $G$ has a free subgroup. The proof of \cite{tits} shows that $G$ contains a semisimple element $g$ of infinite order. Then any pair of powers $g^{\mathbb{Z}_p}$ cannot be conjugated in $\Gamma$. $\Box$ \bigskip {\bf Example 2.} Let $\Gamma =SL_2 (\mathbb{Z}_p )$ and $G=UT_2 (\mathbb{Z}_p )$ be the abelian group of all unitriangular matrices over $\mathbb{Z}_p$, $p>2$. Straightforward computations show that matrices $(a_{11},a_{12},a_{21},a_{22})$ and $(b_{11},b_{12},b_{21},b_{22})$ frome $G$ (written as vectors, i.e. $a_{21}=b_{21}=0$ and $a_{11}=b_{11}=a_{22}=b_{22}=1$) are conjugated by $(x_{11},x_{12},x_{21},x_{22})\in \Gamma$ if and only if $x_{21}=0$ and $b_{12}=x^2_{11}a_{12}$. It is worth noting that since $(\mathbb{Z}^{\times}_p )^2 =\mathbb{Z}^{\times}_p$, any two elements $\bar{a},\bar{b}\in G$ as above with $\nu (a_{12}) =\nu(b_{12})$, are $\Gamma$-conjugated (by a diagonal matrix). In this case $G$ has a small (i.e.countable) number of $\Gamma$-types. \bigskip {\bf Remark.} It is natural to ask if the actions above have a small number of isometry types with respect to the corresponding tree $T$ and $Iso(T)$. It is clear that if for some $\Gamma <Iso(T)$, $G$ has a small number of $\Gamma$-types then $G$ has a small number of isometry types. Thus the case when $G=UT_2 (\mathbb{Z}_p )$ acts on the corresponding tree $T_{ad}$ is clear. Assume that $G$ is the group $SD_2 (\mathbb{Z}_p )$ of all diagonal matrices over $\mathbb{Z}_p$ with determinant 1. Then \begin{quote} {\em the natural isometric action of $G$ on the tree $T_{ad}$ has a small number of isometry types.} \end{quote} Indeed identify $G$ with the set of diagonals $\{ (u,u^{-1}): u\in \mathbb{Z}^{\times}_p \}$. Then $G$ acts on the module $\mathbb{Z}_p \times\mathbb{Z}_p$ by the action: $$ (u,u^{-1})\cdot (z_1 ,z_2 ) = (uz_1 ,u^{-1}z_2 ). $$ This action naturally induces the action of $G$ on $T_{ad}$. It is clear that $G\cong \mathbb{Z}^{\times}_p$. On the other hand it is known that the group $\mathbb{Z}^{\times}_p$ can be decomposed into a direct sum isomorphic to $\mathbb{Z}(p) \oplus \mathbb{Z}_p$, where the factor isomorphic to $\mathbb{Z}(p)$ is represented by the cyclic subgroup $\{ \varepsilon_1 ,...,\varepsilon_{p-1}\}$ of $p-1$-th roots of unity. The factor $\mathbb{Z}_p$ is represented by all elements of $\mathbb{Z}^{\times}_p$ which are congruent to 1 mod $p$ (see Chapter 18 in \cite{fuchs}). It only remains to apply Theorem \ref{Z_p}. \parskip0pt In fact the latter factor contains $l$ such that all other elements are represented by $l^{z}$, where $z\in p\mathbb{Z}_p$. To find this $l$ let $t$ be a primitive root mod $p$ which is a primitive root modulo every power of $p$ and let $l= t^{p-1}$ (see \cite{fuchs}, p.316). As a result the action of $(u,u^{-1})=(l^{z},l^{-z})$ on a vertex $(z_1 ,z_2 )+p^{k}\mathbb{Z}_p$ is defined as follows. Assuming that $z_1 ,z_2 \in \omega$ and $z_1 ,z_2 <p^k$ we represent $(z_1 ,z_2 )=(p^{s_1}\varepsilon_i l^{t_1}, p^{s_2}\varepsilon_j l^{t_2})$ with $s_1 ,s_2 <k$ and $t_1 < k-s_1 -1$ and $t_2 <k-s_2 -1$ (this is possible because $l^i \equiv 1$ mod $p^k$ if and only if $p^{k-1}|i$). Then the action $(u,u^{-1})\cdot ((z_1 ,z_2 )+p^{k}\mathbb{Z}_p )$ is defined by the representative $(p^{s_1}\varepsilon_i l^{z+t_1}, p^{s_2}\varepsilon_j l^{-z+t_2})$. \bigskip According to Proposition \ref{titsalt}, when $\Gamma$ and $T_{ad}$ are as in this proposition, groups with a small number of $\Gamma$-types must be almost soluble. According to Theorem 1 of \cite{Grig2} the Hilbert-Poincar\'{e} series of such a group $G<GL_d (\mathbb{Z}_p )$ is $\mathbb{Q}$-rational. To show that zeta-functions of the numbers of conjugacy classes are rational for these examples we now formulate some general theorem. We consider the $p$-adics $\mathbb{Q}_p$ with the standard valuation $\nu :\mathbb{Q}^{*}_{p}\rightarrow \mathbb{Z}$ (and the value group), the valuation ring $\mathcal{O}=\mathbb{Z}_p$, the maximal ideal $M=\mathbb{Z}_p \setminus \mathbb{Z}^{\times}_p$ and the corresponding residue field. We remind the reader that $\nu$ is a homomorphism such that $\nu (x+y)\ge inf (\nu (x),\nu (y))$ and $\nu (p)=1$. Then $\mathbb{Z}_p =\{ x\in \mathbb{Q}_p : \nu (x)\ge 0\}$ is the valuation ring of $\mathbb{Q}_p$ and $\mathbb{Z}/p\mathbb{Z}$ is the corresponding residue field. \parskip0pt For each $n\in \omega$ we add the sort $GL_n (\mathbb{Q}_p )$ and the set of lattices $S_n (\mathbb{Q}_p )=GL_n (\mathbb{Q}_p )/GL_n (\mathbb{Z}_p ) $. We also add the natural map $GL_n (\mathbb{Q}_p )\rightarrow S_n (\mathbb{Q}_p )$. Structures of this form were introduced in \cite{hahrma} and \cite{hrma}, where sorts of these structures were called geometric. It is proved in \cite{hrma} that $\mathbb{Q}_p$ in this language admits elimination of imaginaries. The following theorem from \cite{hrma} (Theorem 6.2) plays the central role in this section. Let $R=(R_l )_{l\in \omega^{r}}$ be a definable family of subsets of $\mathbb{Q}^{N}_p$ (i.e. a definable subset of $\mathbb{Q}^{N}_p \times \mathbb{Z}^r$, where $\mathbb{Z}$ is the value group and $\omega \subset \mathbb{Z}$). Let $E=(E_l )_{l\in\omega^{r}}$ be a definable family of equivalence relations on $R$ (i.e. every equivalence class is contained in an $l$-fibre of $R$ for some $l\in\omega^{r}$). Suppose that for each $l\in\omega^{r}$ the set of equivalence classes $R_l /E_l$ is finite. Let $a_l =|R_l /E_l |$. Then the power series $\sum_{l\in\omega^{r}}t^l\in \mathbb{Q}[[t_1 ,...,t_r ]]$ is $\mathbb{Q}$-rational. We now describe how it can be applied in our situation. \begin{prop} \label{interpretation} Let $\Gamma$ and $G$ be subgroups of $GL_d (\mathbb{Z}_p )$ definable in $\mathbb{Q}_p$. Let $c^{G}_n$ be the number of all $\equiv^{\Gamma}_{n}$-classes meeting $G$ defined with respect to $T_{ad}$. Then the zeta function $\Sigma_{n\ge 0} c^{G}_{n} t^n$ is $\mathbb{Q}$-rational. \end{prop} {\em Proof.} We interpret $T_{ad}$ in $\mathbb{Q}_p$ as the set of equivalence classes on $\mathbb{Z}^d_p \times \omega$ (where $\omega \subset \mathbb{Z}$ is the non-negative part of the value group) with respect to the following equivalence relation. Let \begin{quote} $(\bar{z}_1 ,c_1 )\sim (\bar{z}_2 ,c_2 )$ if $(c_1 = c_2 )\wedge (\nu (\bar{z}_1 -\bar{z}_2 )\ge c_1 )$. \end{quote} It is clear that this defines the set of cosets of all groups of the form $p^c (\mathbb{Z}_p )^d$. The inclusion relation of cosets (which is the order relation of $T_{ad}$) is defined by representatives: $$ ( \bar{z}_1 ,c_1 )\le (\bar{z}_2 ,c_2 ) \Leftrightarrow (c_1 \ge c_2 )\wedge (\nu (\bar{z}_1 -\bar{z}_2 )\ge c_1 ). $$ Let $R= (R_i )_{i\in\omega} = G\times \omega$. We define a family $E=(E_i )$ of equivalence relations on $R$ as follows: $$ ((g_1 ,c_1 ),(g_2 ,c_2 ))\in E \Leftrightarrow (c_1 =c_2 )\wedge (\exists \gamma \in \Gamma ) $$ $$ (\forall \bar{z}\in (\mathbb{Z}_p )^d )(\forall c \le c_1 ) (\gamma (g_1 ([(\bar{z},c)]_{\sim} ))= g_2 (\gamma ([(\bar{z},c)]_{\sim} )). $$ The rest follows by Theorem 6.2 of \cite{hrma}. $\Box$ \bigskip \begin{prop} Let $G$ be a finitely generated, torsion free, nilpotent pro-$p$-group. Then for some natural $d$ there is an embedding of $G$ into $GL_{d}(\mathbb{Z}_p )$ such that the corresponding action $(G,T_{ad})$ is definable in $\mathbb{Q}_p$. The zeta function $\Sigma_{n\ge 0} c^{G}_{n} t^n$ of this action with respect to $\Gamma =GL_d (\mathbb{Z}_p )$ is $\mathbb{Q}$-rational. \end{prop} {\em Proof.} Let $a_1 ,...,a_n$ be a Malcev basis of $G$ (i.e. any element of $G$ can be written uniquely in the form $a^{\lambda_1}_1 \cdot ...\cdot a^{\lambda_n}_n$, $\lambda_i \in \mathbb{Z}_p$). A well-known theorem (see \cite{merzlakov}) states that group multiplication and inversion in $G$ are given by polynomials in $\lambda_i$ with coefficients in $\mathbb{Q}$ as is the map $G\times \mathbb{Z}_p \rightarrow G$, $(g,\lambda )\rightarrow g^{\lambda}$. This gives an interpretation of $G$ in $\mathbb{Q}_p$ as $\mathbb{Z}^{n}_p$. \parskip0pt By Theorem 59.2.1 from \cite{merzlakov} there is a natural number $d$ and a polynomial map $\phi :\mathbb{Z}^{n}_p \rightarrow UT_d (\mathbb{Z}_p )$ giving an isomorphic representation of $G$ on $\mathbb{Q}_p$. Moreover the converse mape $\phi^{-1}$ is linear. This clearly provides a definable action $(G,T_{ad})$. By Proposition \ref{interpretation} we have the last statement of our proposition. $\Box$ \bigskip One of the main examples of $G$-trees in geometric group theory is the construction of $SL_2 (\mathbb{Q}_p )$-tree of lattices of Serre and Tits. We now show that the actions studied in the proposition above (and in Proposition \ref{interpretation}) arise as point-stabilizers in this example. The following well-known construction is taken from \cite{Serre}). A {\em lattice} in $\mathbb{Q}_p \times\mathbb{Q}_p$ is a two-generated $\mathbb{Z}_p$-submodule of $\mathbb{Q}_p \times\mathbb{Q}_p$ which generates the $\mathbb{Q}_p$-vector space $\mathbb{Q}_p \times\mathbb{Q}_p$. By $\mathcal{X}$ we denote the set of equivalence classes of lattices with respect to the equivalence $$ L\sim L' \leftrightarrow (\exists z\in \mathbb{Q}^{*}_p )(L'=Lz ). $$ The distance $d([L]^{\sim} ,[L']^{\sim} )$ between the corresponding classes is defined as follows. Find $L''\sim L'$ such that $L''=\langle e_1 p^{a}, e_2 p^{b}\rangle < \langle e_1 ,e_2 \rangle =L$. Then the number $|a-b|$ does not depend on the choice of the basis $e_1 ,e_2$ and the representative $L''$; so it is taken as the corresponding distance. \parskip0pt The metric space $(\mathcal{X},d)$ is a simplicial tree of valency $p+1$, and $PGL_2 (\mathbb{Q}_p )$ has an isometric action on $\mathcal{X}$. Moreover the group $PSL_2 (\mathbb{Q}_p )$ acts on $\mathcal{X}$ without inversions. To interpret this $G$-space in $Th(\mathbb{Q}_p )$, the set of lattices is usually identified with the left coset space $S_2 = GL_2 (\mathbb{Q}_p )/GL_2 (\mathbb{Z}_p )$ (as above, see \cite{hahrma}). Then $\mathcal{X}$ can be considered as the coset space $GL_2 (\mathbb{Q}_p )/(\mathbb{Q}^{*}_p\cdot GL_2 (\mathbb{Z}_p ))$, where $PGL_2 (\mathbb{Q}_p )$ ($PSL_2 (\mathbb{Q}_p )$ resp.) acts from the left and $\mathbb{Q}^{*}_p$ now stands for the subgroup of scalar matrices. Formally we consider $\mathcal{X}$ as $GL_2 (\mathbb{Q}_p )$ with respect to the following equivalence relation: $$ (a_{ij})_{i\le 2,j\le 2} \sim (b_{ij})_{i\le 2,j\le 2} \Leftrightarrow (\exists (c_{ij})_{i\le 2,j\le 2},(d_{ij})_{i\le 2,j\le 2} \in GL_2 (\mathbb{Q}_p )) (\exists q\in \mathbb{Q}^{*}_p ) $$ $$ [\nu (c_{ij})\ge 0 \wedge \nu (d_{ij})\ge 0 \wedge ((c_{ij})_{i\le 2,j\le 2}\cdot (d_{ij})_{i\le 2,j\le 2} = E )\wedge $$ $$ ((a_{ij})_{i\le 2,j\le 2} = (b_{ij})_{i\le 2,j\le 2}\cdot (q\cdot c_{ij})_{i\le 2,j\le 2})]. $$ The group $PGL_2 (\mathbb{Q}_p )$ ($PSL_2 (\mathbb{Q}_p$ resp.) is considered as $GL_2 (\mathbb{Q}_p )$ ($SL_2 (\mathbb{Q}_p$) with respect to the equivalence relation induced by the center. The action of $PGL_2 (\mathbb{Q}_p )$ on $\mathcal{X}$ is defined by left multiplication of the corresponding representatives. \parskip0pt To define the distance $d$ on $\mathcal{X}$ we use the following fomula: $$ d((a_{ij})_{i\le 2,j\le 2}, (b_{ij})_{i\le 2,j\le 2}) =k \Leftrightarrow (\exists t\in \mathbb{Q}^{*}_p )(\exists (c_{ij})_{i\le 2,j\le 2})\in GL_2 (\mathbb{Z}_p ) $$ $$ (\exists q,r \in \mathbb{Z}_p )[k= |\nu (q)-\nu (r)| \wedge [(qa_{11}, ra_{12}, qa_{21},ra_{22}) = (b_{ij})_{i\le 2,j\le 2}\cdot (t\cdot c_{ij})_{i\le 2,j\le 2}]. $$ Let $L_0 =\langle (1,0),(0,1)\rangle$. The stabilizer of the point $[L_0 ]^{\sim}$ is exactly the subgroup of $PGL_2 (\mathbb{Q}_{p})$ consisting of all cosets of the form $g\mathbb{Q}^{*}_p$ where $g\in GL_2 (\mathbb{Z}_p)$. Each vertex of $\mathcal{X}$ is represented by a unique lattice $L\subseteq L_0$ such that $L_0 /L \cong \mathbb{Z}_p /p^{n}\mathbb{Z}_p$, where $n$ is the distance between $L_0$ and $L$. In this case $L/p^n L_0$ is a direct factor of rank 1 of the $\mathbb{Z}_p /p^n \mathbb{Z}_p$-module $L_0 /p^{n}L_0$. When $L = \langle u,vp^{n}\rangle$ for some $u$ and $v$ generating $L_0$, we identify $L$ with the $\mathbb{Z}_p /p^n \mathbb{Z}_p$-module generated by $u+p^n L_0$. Formally we present the set of these modules as follows. In the rooted tree $\mathcal{L}$ of all cosets of $L_0 = \mathbb{Z}_p \times \mathbb{Z}_p$ of the form $u + p^n (\mathbb{Z}_p \times \mathbb{Z}_p )$ with $\nu (u)=0$ we introduce the following equivalence relation : $$ u+p^n L_0 \approx u' +p^l L_0 \Leftrightarrow (n=l) \wedge (\exists x\in \mathbb{Z}^{\times}_p )(xu-u' \in p^n L_0 ). $$ It is easy to see that the action of any $g\mathbb{Q}^{*}_p$ with $g\in GL_2 (\mathbb{Z}_p )$ on the set of elements of $\mathcal{X}$ which are $n$-distant from $L_0$, corresponds to the action of $g$ on the set of these classes. Moreover if $g$ and $g'\in GL_2 (\mathbb{Z}_p )$ determine the same element of $PGL_2 (\mathbb{Z}_p )$, then $g(u+p^n L_0 )\approx g'(u +p^n L_0 )$, where $u$ as above. Thus the stabilizer of the element $L_0$ in $\mathcal{X}$ is identified with $PGL_2 (\mathbb{Z}_p )$, which is considered with respect to its action on $\mathcal{L}/\approx$. \parskip0pt Note that the rooted tree $\mathcal{L}/\approx$ is divided into levels represented by cosets $u+p^n L_0$ with the same $p^n$. Moreover the vertices $u+p^n L_0$ and $u'+p^{n+1} L_0$ are ajacent if and only if $(\mathbb{Z}_p /p^n \mathbb{Z}_p )(u+p^n L_0 )=(\mathbb{Z}_p /p^n \mathbb{Z}_p )(u' +p^n L_0 )$ (by the definition of $\mathcal{X}$). \begin{thm} \label{PGL} In the notation above let $\Gamma =G= PGL_2 (\mathbb{Z}_p )$. (1) Let $c^{G}_n$ be the number of all $\equiv^{\Gamma}_{n}$-classes meeting $G$ defined with respect to the rooted tree $\mathcal{L}/\approx$. Then the zeta function $\Sigma_{n\ge 0} c^{G}_{n} t^n$ is $\mathbb{Q}$-rational. (2) The group $G$ has a small number of isometry types with respect to the action on $\mathcal{L}/\approx$ and there is continuum $\Gamma$-types meeting $G$. \end{thm} {\em Proof.} (1) Repeating the proof of Proposition \ref{interpretation} we interpret $\mathcal{L}/\approx$ in $\mathbb{Q}_p$ as the set of equivalence classes on a definable subset of $\mathbb{Z}^2_p \times \omega$ (where $\omega \subset \mathbb{Z}$ is the non-negative part of the value group). Then we consider $R= (R_i )_{i\in\omega} = GL_2 (\mathbb{Z}_p )\times \omega$. with respect to the family $E=(E_i )$ of equivalence relations on $R$ as it was described above. The rest follows by Theorem 6.2 of \cite{hrma}. (2) By obvious arguments from linear algebra we may assume that elements of $PGL_2 (\mathbb{Z}_p )$ are presented by upper triangular matrices. Let $(a_{11}, a_{12}, a_{22})$ be a tuple of non-zero entries of such a matrix $g\in G$. Then multiplying $g^n$ with $u=(z_1 ,z_2 )\in \mathbb{Z}_p \times\mathbb{Z}_p$ we obtain a representative of the projective line corresponding to $$ (z_1 +\frac{a_{12}z_2 }{a_{11}-a_{22}}(1-(\frac{a_{22}}{a_{11}})^n), (\frac{a_{22}}{a_{11}})^n z_2 ). $$ Decompose the group $\mathbb{Z}^{\times}_p$ into a direct sum isomorphic to $\mathbb{Z}(p) \oplus \mathbb{Z}_p$, where the factor isomorphic to $\mathbb{Z}(p)$ is represented by the cyclic subgroup $\{ \varepsilon_1 ,...,\varepsilon_{p-1}\}$ of $p-1$-th roots of unity. The factor $\mathbb{Z}_p$ is represented by all elements of $\mathbb{Z}^{\times}_p$ which are congruent to 1 mod $p$ (see Chapter 18 in \cite{fuchs}). As above we find $l$ such that all elements of this factor are represented by $l^{z}$, where $z\in p\mathbb{Z}_p$ (see \cite{fuchs}, p.316). Assuming that $a_{22}/a_{11} = \varepsilon_j l^{z}$ we see that $(\frac{a_{22}}{a_{11}})^n \equiv 1$ mod $p^k$ if and only if $\varepsilon^n_j =1$ and $p^{k-1}|nz$. Assume $(a'_{11}, a'_{12}, a'_{22})$ be a tuple of non-zero entries of a matrix $g'\in G$ such that $\nu (a_{12})= \nu (a'_{12})$, $a'_{22}/a'_{11} = \varepsilon_j l^{z'}$ and $\nu (z) = \nu (z')$. Then for every pair $(z_1 ,z_2 )\in \mathbb{Z}_p \times \mathbb{Z}_p$ the elements $g$ and $g'$ have cycles of the same length at the element of $\mathcal{X}$ represented by $(z_1 ,z_2 )+ p^m L_0$. This proves the statement. $\Box$ \bigskip {\bf Remark.} There is another version of Proposition \ref{interpretation} which does not use definability in $\mathbb{Q}_p$ and which can be also applied to examples of this kind. \begin{prop} \label{interpretation2} Let $\Gamma$ and $G$ be closed subgroups of $GL_d (\mathbb{Z}_p )$ . Let $c^{G}_n$ be the number of all $\equiv^{\Gamma}_{n}$-classes meeting $G$ defined with respect to $T_{ad}$. Then the zeta function $\Sigma_{n\ge 0} c^{G}_{n} t^n$ is $\mathbb{Q}$-rational. \end{prop} To prove this proposition we must apply a more involved argument. In fact we must repeat the proof of Theorem 1.2 from \cite{dS} with some sligth changes. Since this theorem is outside the main theme of the paper we just mention that our adaptation requires that formula (1.1) from \cite{dS} (counting the number of conjugacy classes in a finite group) must be replaced by the formula $$ c^{G}_n = |\Gamma_n |^{-1} \sum\{ |C_{\Gamma_n}(g)|: g\in GL_d (\mathbb{Z}_p /p^n \mathbb{Z}_p ) $$ $$ \wedge \exists g'\in G_n (g\equiv^{\Gamma}_n g' )\} , $$ where $G_n$ and $\Gamma_n$ are the natural projections of $G$ and $\Gamma$ with respect to $GL_d (\mathbb{Z}_p /p^n \mathbb{Z}_p )$. Since any closed subgroup of $GL_d (\mathbb{Z}_p )$ is compact $p$-adic analytic, the arguments of Section 1 of \cite{dS} show that $G$, $\Gamma$ and $\equiv^{\Gamma}_n$ are definable in the theory of analytic functions from \cite{DvdD}. This allows us to adapt the proof from \cite{dS} to our case. \bigskip \section{Branch groups. Model theoretic aspects. } As we have noted above, branch groups have a large number of isometry types. Can they be defined (in $\mathbb{Q}_p$) as closed subgroups of matices over $\mathbb{Z}_p$ ? This is not true. Indeed, let $G$ be the profinite completion of Grigorchuk's 3-generated 2-group from \cite{Grig1}. In fact it is shown in \cite{Grig2} that the coefficients $a_n$ of the Hilber-Poincar\'{e} series of $G$ do not have a polynomial growth. By Theorem 1 of \cite{Grig2} (or Interlude A of \cite{DdSMS}) the group $G$ is not a closed subgroups of matices over $\mathbb{Z}_p$. \parskip0pt How do branch groups look with respect to other model theoretic properties of profinite groups studied so far ? In this section we concentrate on $\omega$-categoricity of branch groups in the language introduced by Z.Chatzidakis in \cite{zoe}. Let $G$ be a profinite group. As in \cite{zoe} to $G$ we associate a structure $S(G)$ of the language $L=(\le ,\sim ,C^2 ,P^3 ,1)$ as follows. The structure $S(G)$ is defined on the set of all cosets $gN$ for all open normal subgroups $N<G$. The symbol $P$ is interpreted as follows: $$ S(G) \models P(g_1 N_1 ,g_2 N_2 , g_3 N_3 )\Leftrightarrow (N_1 =N_2 =N_3 )\wedge (g_1 g_2 N_1 = g_3 N_1 ). $$ The symbol $C$ corresponds to inclusion: $$ S(G)\models C(gN, hM) \Leftrightarrow (N\subseteq M )\wedge (gM=hM). $$ The relation $gN\le hM$ means $N\subseteq M$ and we define $gN\sim hM\Leftrightarrow (gN\le hM )\wedge (hM\le gN)$. The costant $1$ corresponds to $G$. It is convenient to view $S(G)$ as an $\omega$-sorted structure with respect to sorts $S_n =\{ gN :|G:N|=n\}$, $n\in \omega$. In this language the class of structures $S(G)$ becomes elementary. In Section 1.5 of \cite{zoe} some natural axioms of this class are given: it is shown that for any structure $S$ satisfying these axioms the $\sim$-classes naturally form a projective system of finite groups such that $S$ is $S(G)$ for the projective limit $G$ of this system. \parskip0pt We say that $A\subseteq S(G)$ {\em forms a substructure} if $(\forall x,y\in S(G))((x\le y)\wedge (x\in A)\rightarrow y\in A)$ and $(\forall x,y\in A)(\exists z\in A)((z\le y)\wedge (z\le y))$. It is straightforward that any substructure $A\subseteq S(G)$ defines a profinite group $H$ with $A=S(H)$ such that $H$ is a continuous homomorphic image of $G$. On the other hand if $\phi :G\rightarrow H$ is a continuous epimorphism of profinite groups, then the map $S(H)\rightarrow S(G)$: $gN \rightarrow \phi^{-1}(gN)$ defines an embedding of $S(H)$ into $S(G)$ as a substructure. The following property can be taken as the definition of $\omega$-categoricity (see Section 1.6 of \cite{zoe}). \begin{definicja} We say that a countable structure $S(G)$ is $\omega$-categorical if for all $n, j_1 ,...,j_k \in \omega$ the group $Aut(S(G))$ has finitely many orbits on $(S_{j_1} \cup ...\cup S_{j_k})^{n}$ (i.e. $Aut(S(G))$ is {\em oligomorphic}). \end{definicja} It is easy to see that \begin{quote} for every finitely generated profinite group $G$ each sort $S_i (G)$ is finite (see \cite{DdSMS}, Proposition 1.6); i.e. $S(G)$ is $\omega$-categorical. \end{quote} Z.Chatzidakis has noticed in \cite{zoe} (Theorem 2.3) that when $G$ has the {\em Iwasawa Property} (IP), the structure $S(G)$ is $\omega$-categorical. We remind the reader that $G$ has (IP) if for every epimorphism $\theta :H\rightarrow K$ of finite groups with $H\in Im(G)$, and for every epimorphism $\phi :G\rightarrow K$ there is an epimorphism $\psi :G\rightarrow H$ such that $\phi =\theta \cdot \psi$. Note that (IP) is very close to projectivity (which in the case of pro-$p$-groups (IP) is equivalent to $p$-freeness, see \cite{FJ}, Chapter 20). It is worth noting that all closed subgroups of $GL_n (\mathbb{Z}_p )$ have $\omega$-categorical structures $S(G)$. This follows from the fact that they have a finite number of open subgroups of index $k$, $k\in \omega$ (moreover the corresponding function counting the number of subgroups is polynomial \cite{DdSMS}). Thus groups considered in the previous sections usually had $\omega$-categorical structures $S(G)$ (excluding $\mathbb{Z}(p)^{\omega}$). In the following proposition we consider {\em just infinite profinite groups}. This means that each proper continuous homomorphic image of the group is finite. Just infinite branch groups are described in Theorem 4 of \cite{Grig}. It states that a profinite group with a branch structure $\{ L_i ,H_i :i\in \omega\}$ is just infinite if and only if the commutator subgroup $[L_i ,L_i ]$ is of finite index in $L_i$ for all $i$. \begin{prop} \label{justinf} Let $G<Iso(T)$ be a weakly branch profinite group such that the restricted stabilizer $Rs(n)$ is level-transitive on every subtree of level number $n$. (1) Then $S(G)$ is $\omega$-categorical if and only if every sort $S_k$ is finite. (2) If $G$ is additionally a branch group which is just infinite, then $S(G)$ is $\omega$-categorical. \end{prop} {\em Proof.} (1) The first statement of the proposition is a straightforward consequence of Theorem 7.5 and Proposition 8.1 of \cite{LN}. Indeed each automorphism $\phi$ of $S(G)$ naturally defines a continuous automorphism of the group $G$. By \cite{LN} under our assumptions, each automorphism of $G$ is induced by conjugation of an isometry $h$ of $T$ such that $G^h = G$. This means that if an open subgroup $N<G$ is the pointwise stabilizer in $G$ of the first $l$ levels of $T$, then $\phi (N)=N$. Since each open subgroup of $G$ contains such a subgroup $N$ we easily see that all $Aut(S(G))$-orbits on $S(G)$ are finite. (2) Let us show that any open normal subgroup $N<G$ has a finite orbit with respect to automorphisms of $G$ induced by conjugation by isometries of $T$. Let groups $L_i$ and $H_i$ be defined as in the definition of branch groups (see Section 1.1). Since $G$ is just infinite, by Theorem 4 of \cite{Grig} the commutator subgroup $[L_i ,L_i ]$ is of finite index in $L_i$, and thus $[H_i ,H_i]$ is of finite index in $G$. On the other hand for every $n$ there is a number $l=l(n)$ such that every subgroup of $G$ of index $n$ contains an element which does not fix the subtree $T_l$ pointwise. \parskip0pt Assume $|G:N|=n$ and $N$ is normal in $G$. Find an element $g\in N$ which does not fix the subtree $T_l$ pointwise for $l=l(n)$. By the proof of Theorem 4 the group $N$ contains the commutator subgroup $[H_{i+1} ,H_{i+1}]$. Since the index $|G:[H_{i+1} ,H_{i+1} ]|$ is finite the number of normal subgroups of $G$ of index $n$ is finite. $\Box$ \bigskip {\bf Example.} Let $G$ be the profinite completion of Grigorchuk's 3-generated 2-group from \cite{Grig1}. Since $G$ is a finitely generated profinite group, $S(G)$ is $\omega$-categorical. It is worth noting that it is proved in \cite{Grig} that $G$ is a just infinite branch group. \bigskip In the remained part of the section we study $\omega$-categoricity of {\em wreath branch groups} (see \cite{LN}). Since it can happen that such a group is not finitely generated and not just infinite, Proposition \ref{justinf} does not work very well here. Let $A_i$, $i\in \omega$, be the sequence of alphabets defining $T$ as in Introduction. Assume that for every $n$ we have chosen a permutation group $P_i \le Sym (A_i )$. Then the subgroup of all labellings $\{ \gamma (v)\}_{v\in T}$ with $\gamma (v)\in P_{i+1}$ for $v \in layer_i$ is {\em the iterated wreath product} of $(P_i ,A_i )$, $i\in \{ 1,2,...\}$ and is denoted by $\wr_{\omega} (P_i ,A_i )$. It is noticed in \cite{LN} (Theorem 8.2) that this group is a branch group. Thus it is called in \cite{LN} a wreath branch group. \begin{prop} (1) Assume that all groups $(P_i ,A_i )$ are simple transitive permutation groups. Then the group $G= \wr_{\omega} (P_i ,A_i )$ has the Iwasawa property (IP) and $S(G)$ is $\omega$-categorical. (2) The group $Iso(T)$ (i.e. $\wr_{\omega} (Sym (A_i ),A_i )$ ) does not have the Iwasawa property (IP) and $S(G)$ is not $\omega$-categorical. \end{prop} {\em Proof.} (1) This statement follows from the observation that every closed normal subgroup of $G$ is of the form $Ker \pi_n$ for appropriate $n$. We think that this fact is folklor, but we give some scketch of it. Let $K$ be a closed normal subgroup of $G$. Let $n$ is the least natural number such that there is $g\in K\setminus \{ 1\}$ of {\em depth} $n$, i.e. $n$ is the minimal number such that in the corresponding labelling $\{ \gamma (v)\}_{v\in T}$ there is $v\in T_n$ with $\gamma (v)\not= 1$. Then we claim that $K= Ker\pi_{n}$. To see this it is enough to show that for every $l\ge n$ every isometry of the form $\{ \delta (v)\}_{v\in T}$ such that $T_l$ has a unique non-trivial $\delta (w)$ and $w\in layer_l$, is contained in $K$. Let $g$ and $n$ be as above and $v_0\in T_n$ witness that $g$ is of depth $n$. We start with the observation that for every $l\ge n$ there is $h\in K$ of depth $l$. This $h$ can be chosen as an appropriate commutator $[g,f]$ where $f\in G$ is of depth $l$. For example, if for all $w\in T_l$ which are below $v$ we have $\gamma (w)=1$, then we just arrange that the labelling of $f$ is non-trivial only at two points $w ,w^g \in T_l$ below $v$ and the corresponding $(\sigma (w))^{-1}$ is not equal to $\sigma (w^g )$. To finish the proof of our claim take an isometry $g_{\delta}\in K$ defined by $\{ \delta (v)\}_{v\in T}$ as in the previous paragraph. Choose $w\in layer_l$ with non-trivial $\delta (w)$. Conjugating $g_{\delta}$ by elements representing labellings with the unique non-trivial element at $w$ we generate a subgroup of $K$ having an element $g'$ such that the corresponding labelling is non-trivial at $w$ but trivial at all points of $T_l \setminus \{ w\}$. By sipmlicity of $(P_{l+1} ,A_{l+1} )$, the point $w$ in our $g'$ can be labelled by any element of $P_{l+1}$. It is also clear that conjugating $g'$ by elements of $G$ we can move $w$ to any element of $layer_l$. This finishes the proof. (2) Here we use implicitly the characterization of closed normal subgroups of the group $Iso(T)= \wr_{\omega} (Sym (A_i ),A_i )$ from \cite{susz}. For every natural $l$ consider the subgroup (denoted by $N_l$ of all labellings $\{ \gamma (v)\}_{v\in T}$ such that the product $\prod \{ sgn(\gamma (v)):v \in layer_l \}$ is $0$. By $sgn(\sigma )$ we denote the parity of the permutation $\sigma$ (i.e. it equals to 1 when it is odd). It is easy to see that $N_l$ is a normal closed subgroup of $Iso(T)$, $l\in \omega$, and $|Iso(T):N_l |=2$. Thus the sort of $S(Iso(T))$ consisting of normal open subgroups of index 2, is infinite. Since $Iso(T)$ is a branch group by Proposition \ref{justinf} we see that $S(Iso(T))$ is not $\omega$-categorical. Thus it does not satisfy (IP). $\Box$ \bigskip Note that the Iwasawa property is slightly casual in the context of branch groups. \begin{prop} Let $T=T_{\bar{m}}$, where 8 divides some $m_1\cdot ...\cdot m_i$. Let $G<Iso(T)$ be a profinite branch group with the corresponding system $\{ H_i ,L_i :i\in\omega\}$. Then there is a profinite branch group $G^{*}$ which does not satisfy (IP), such that for some natural numbers $k$ and $k'$ the branch system of $G^{*}$ with indexes above $k$ becomes $\{ H_i ,L_i :i\ge k'\}$. Moreover $S(G^* )$ is $\omega$-categorical if and only if $S(G)$ is $\omega$-categorical. \end{prop} {\em Proof.} To prove this find $k$, such that the number $l=|layer_k |$ is of the form $8 l'$. Consider the tree $T'=T_{\bar{m}'}$ with $m'_1 =l$ and $m'_i = |layer_{k+i-1}|$ for $i\ge 2$. The group $G^{*}$ acts on $T'$ as follows. Let the kernel $Ker \pi_1 (G^{*} )$ be $H_k$ with respect to the action copied from the action of $H_k$ on the $k$-th layer of $T$. Define $\pi_1 (G^{*})$ to be a regular action of $\mathbb{Z}(2)\oplus \mathbb{Z}(4 l')$ on the first layer of $T'$. Then let $G^{*}$ act on $T'$ as the wreath product of these groups. \parskip0pt To see that $G^*$ does not satisfy (IP) consider the resulting homomorphism $\phi$ from $$ G^* \rightarrow \pi_1 (G^*)\rightarrow \mathbb{Z}(2), $$ where the second one is the projection to $\mathbb{Z}(2)$. As an epimorphism $\theta$ of finite groups $H\rightarrow K$ take $\mathbb{Z}(4l') \rightarrow \mathbb{Z}(2)$ induced by $2l'$-th exponentiation. If $\theta:G^* \rightarrow \mathbb{Z}(4l')$ makes this diagram commutative, then the generator of $\mathbb{Z}(2)$ in the decomposition $\mathbb{Z}(2)\oplus \mathbb{Z}(4l')$ must have a $2l'$-root. This is a contradiction. The second statement of the proposition follows from the fact that for any profinite group $H$ and an open subgroup $P$ the structure $S(H)$ is $\omega$-categorical if and only if $S(P)$ is $\omega$-categorical. This follows from the fact that the map $K\rightarrow P\cap K$ for open subgroups $K<H$ is finite-to-one. $\Box$ \bigskip As we already noted a finitely generated profinite group $G$ has finitely many open subgroups of index $n$. Thus the structure $S(G)$ for any closed subgroup $G<GL_d (\mathbb{Z}_p )$ or $PGL_d (\mathbb{Z}_p )$, realizes the simplest case. This shows that if any of the groups which we mentioned in the introduction, has $\omega$-categorical $S(G)$, then it becomes very simple. When it is not $\omega$-categorical, then it becomes extremely complicated branch group (very close to the whole $Iso(T)$). This shows that the properties studied in Sections 2 and 3 correspond to our example much better than $\omega$-categoricity of $S(G)$.
{ "redpajama_set_name": "RedPajamaArXiv" }
3,508
Żabia Wola (gromada w powiecie bychawskim) (lubelski/bychawski) Żabia Wola (gromada w powiecie grodziskomazowieckim)
{ "redpajama_set_name": "RedPajamaWikipedia" }
8,791
The Rugby Championship The Power of the Pacific Teams & Tickets Results & Fixtures NSW Waratahs Nemani Nadolo signs with NSW Waratahs The NSW Waratahs can today confirm the signing of Fijian International Nemani Nadolo on a one-year deal for the 2023 Super Rugby Pacific season. One of world rugby's box office players, Nadolo joins the Waratahs with an illustrious Test and club resume, adding a wealth of knowledge, size, and experience to the Waratahs' backline. Born in Sigatoka, Fiji, Nadolo moved to Brisbane at three months of age before attending Nudgee College and representing the Queensland Schoolboys team. The big ball-carrying winger made his Test debut for Fiji in 2010, where Waratahs Head Coach Darren Coleman was the Assistant Coach, against Australia and has scored 22 tries in 32 Tests and starred for the Flying Fijians in the 2015 Rugby World Cup. The Fijian flyer is no stranger to the Waratahs as he was originally contracted to the team in 2008. Limited opportunities saw Nadolo head abroad, with stints in France, England, and Japan before returning to Super Rugby with the Crusaders. Nadolo was one of the standout players for the Crusaders in 2014, ending the season as the competition's equal leading try-scorer with 12 tries, including scoring in their Super Rugby Final loss to the Waratahs. Stints at Montpellier and the Leicester Tigers followed, with Nadolo finishing the 2021-22 season with ten tries from 21 matches in his most recent season for the Tigers in Premiership Rugby. After a stellar club career spanning Europe and New Zealand, the Fijian winger was excited to return to Australia and play for the Waratahs. Nemani Nadolo scoring for Fiji against Georgia in 2020 Autumn Nations Cup Nemani Nadolo said, "Who would've thought after leaving these doors 12 years ago I'd get another opportunity to be part of the club again. "I'm grateful to Darren and the board for having faith in me and giving me another opportunity. "NSW are heading in the right direction and I'm glad I can be a part of their journey." Waratahs Head Coach Darren Coleman was thrilled to add another experienced Test player with a unique point of difference to his roster. "We've all seen what Nemani has done in World Rugby in the last ten years and it's incredibly exciting to have him in a sky-blue jersey," Coleman said. "One of the things we identified from our season review was the need to add some size and power to our roster, and Nemani brings a lot of size and physicality which will add an extra dimension to our team. "He's motivated to get back to Sydney have another crack at Super Rugby and push his claims to get in the Fijian Test team for their 2023 Rugby World Cup campaign," Nadolo will join the squad on December 1 for the Waratahs' pre-season. The NSW Waratahs are also pleased to announce the signing of loosehead prop Tom Lambert on a two-year deal, bolstering the team's front-row stocks ahead of their 2023 Super Rugby Pacific campaign. A product of the NSW pathways, born in Sydney and schooled at Trinity Grammar School, Lambert represented Australian Schools and was part of the Waratahs Academy before heading overseas to Scotland in 2020. Lambert represented Scotland's under-20s side during the 2020 Six Nations and signed with Glasgow Warriors on an academy contract before graduating to the main team in 2021 The Australia-born prop has spent two seasons with the Warriors and brings invaluable European front-row experience to the Waratahs ahead of their 2023 campaign. SuperRugby Broadcasters Sign up for your digital programme Match Judiciary Super Rugby News Final loss still hurts for Papalii 50 Days Until Melbourne Super Round SUPER RUGBY PACIFIC LOCKED IN UNTIL 2030 Rebels looking to be 'fast and fearless' in 2023 NZ SUPER RUGBY PACIFIC 2023 SQUADS ANNOUNCED Super Rugby Pacific 2023 Match Schedule Super Round Melbourne Returns in 2023 NSW Waratahs Sign New Forwards Trio Fijian Drua Appoint New CEO Melbourne Rebels Revamp Their High Peformance Team Melbourne Rebels Sanzaar: The home of rugby in the southern hemisphere About Sanzaar Contact Sanzaar License footage from Teams & Players © 2023 SANZAAR Super Rugby Site by webqem
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
7,578
Q: How to rewrite Headless Autodesk Forge Viewer in ES6 I'm trying to write the ES6 version of the Headless Forge Viewer implemented in this link: https://forge.autodesk.com/en/docs/viewer/v2/tutorials/headless/ I've installed the npm package for the Forge Viewer, and I succeed to do most of the code till I reached these two lines: var modelNodes = viewerApp.bubble.search(av.BubbleNode.MODEL_NODE); // 3D designs var sheetNodes = viewerApp.bubble.search(av.BubbleNode.SHEET_NODE); // 2D designs I don't know how to define the "av" in Typescript. Can anyone help please? A: I just figured out the answer, just replace the av with Autodesk.Viewing and everything work as a charm :)
{ "redpajama_set_name": "RedPajamaStackExchange" }
3,205
\section{Introduction} \noindent Let $(M^{2n},\omega)$ be a symplectic manifold. The notion of \emph{symplectically harmonic form} was introduced by Brylinski in \cite{Br} as a closed form $\alpha$ such that its symplectic star is also closed, i.e. $d\alpha=0=d*\alpha$. Mathieu~\cite{Ma} proved (see also \cite{Yan} for a different proof) that every de Rham cohomology class has a symplectically harmonic representative if and only if $(M^{2n},\omega)$ satisfies the Hard Lefschetz Condition (HLC for short), i.e. the homomorphisms $L^k\colon H^{n-k}(M)\longrightarrow H^{n+k}(M)$ are surjective for every $1\leq k\leq n$. Here $H^q(M)$ denotes the $q$-th de Rham cohomology group of $M$ and $L^k$ is the homomorphism given by the cup product by the class $[\omega^k]\in H^{2k}(M)$. As there exist many symplectic manifolds which do not satisfy the HLC, one has that the quotient $H^q_{\rm hr}(M)=\Omega^q_{\rm hr}(M)/(\Omega^q_{\rm hr}(M)\cap \text{im}\, d)$, $\Omega^q_{\rm hr}(M)$ being the space of symplectically harmonic $q$-forms, measures the de Rham cohomology classes in $H^q(M)$ containing harmonic representative. Additional symplectic invariants of cohomological type were introduced by Bouch\'e \cite{B} as follows. A differential form $\alpha$ is called \emph{coeffective} if it annihilates $\omega$, i.e. $\alpha\wedge\omega=0$. The space of coeffective forms with the (restriction of the) exterior derivative provides a subcomplex of the de Rham complex that is elliptic in any degree $q\not= n$. It turns out~\cite{B} that for compact K\"ahler manifolds $(M^{2n},\omega)$ and for every $q\geq n+1$, the $q$-th coeffective cohomology group, that we will denote here by $H^q_{(1)}(M)$, is isomorphic to the \emph{$[\omega]$-truncated $q$-th de Rham group}. However, this is no longer true for arbitrary compact symplectic manifolds~\cite{FIL1}. On the other hand, Tseng and Yau have developed in \cite{TY1,TY2} a symplectic Hodge theory by considering various cohomologies where the \emph{primitive} cohomologies $PH_{d+d^\Lambda}(M), PH_{dd^\Lambda}(M), PH_{\partial_+}(M)$ and $PH_{\partial_-}(M)$ play a central role. Recently, Eastwood~\cite{E} has introduced an extension of the coeffective complex which is elliptic in any degree and such that the corresponding cohomology groups are isomorphic to the primitive cohomology groups. The symplectically harmonic cohomology and the coeffective cohomology, to our knowledge, have been studied separately in the literature. Our first goal in this paper is to obtain some relations between both cohomologies by considering a natural generalization of the coeffective cohomology, which in addition will allow us to provide a coeffective version of the \emph{filtered} cohomologies. The latter have recently been introduced by Tsai, Tseng and Yau \cite{TY3}, and extend the primitive cohomologies \cite{TY1,TY2}. Another aspect in the study of the symplectic harmonicity is the notion of \emph{flexibility} \cite{IRTU1,Yan}. This is motivated by the following question, which is related to some problems of group-theoretical hydrodynamics~\cite{AK}, posed by Khesin and McDuff (see \cite{Yan}): which closed manifolds $M$ possess a continuous family $\omega_t$ of symplectic forms such that the dimension of $H^q_{\rm hr}(M,\omega_t)$ varies with respect to $t$? In~\cite{Yan} Yan proved the existence of a 4-dimensional flexible manifold, whereas in \cite{IRTU1} several 6-dimensional nilmanifolds satisfying such property were found. Our second goal in this paper is to relate harmonic flexibility to corresponding notions of flexibility for the generalized coeffective and filtered cohomologies, as well as to construct closed manifolds which are flexible with respect to all these symplectic cohomologies. In greater detail, the paper is structured as follows. In Section~\ref{gen-coef-cohom} we introduce and study the generalized coeffective cohomologies of a symplectic manifold $(M^{2n},\omega)$. For each integer $k$, $1\leq k\leq n$, we consider the complex of $k$-coeffective differential forms as the subcomplex of de Rham one constituted by all the forms that annihilate $\omega^k$. The associated cohomology groups are denoted by $H^q_{(k)}(M)$. This complex is elliptic in any degree $q\not= n-k+1$, however one can define in a natural way a quotient $\hat H^{n-k+1}(M)$ of $H^{n-k+1}_{(k)}(M)$ which shares the same properties as the cohomology groups $H^q_{(k)}(M)$, $q\geq n-k+2$ (see Propositions~\ref{propiedades-finitas} and~\ref{propiedades-finitas-para-c-sombrero}). The spaces $\hat H^{1}(M), \ldots, \hat H^{n}(M)$ play an important role in this paper since they will allow us to relate the different symplectic cohomologies involved. We will refer to the collection \begin{equation}\label{todos-los-gen-coef-groups} \hat H^{n-k+1}(M),\ H^{n-k+2}_{(k)}(M),\ldots,\ H^{2n}_{(k)}(M),\quad\ 1\leq k\leq n, \end{equation} as the \emph{generalized coeffective} cohomology groups of the symplectic manifold $(M^{2n},\omega)$. It turns out that these spaces are symplectic invariants that only depend on the de Rham class $[\omega^k] \in H^{2k}(M)$ (see Remark~\ref{coef-inv-cohom-class} and Lemma~\ref{iso-equiv-coef}). When $M$ is of finite type, in Proposition~\ref{invtopologicos} we prove that, for each $1\leq k\leq n$, the alternating sum $\chi^{(k)}(M)$ of the dimensions of the generalized $k$-coeffective cohomology groups only depends on the topology of the manifold $M$. Eastwood~\cite{E} has introduced an elliptic extension of the usual coeffective complex (i.e. $k=1$) such that the corresponding cohomology groups are isomorphic to primitive cohomology groups defined by Tseng and Yau~\cite{TY1,TY2}. In Section~\ref{exten-gen-coef-cohom}, for any $1\leq k\leq n$, we consider an extension of the $k$-coeffective complex, which is also elliptic, whose cohomology groups $\check{H}^q_{(k)}(M)$ $(0\leq q\leq 2n+2k-1)$ are isomorphic to the filtered cohomology groups introduced by Tsai, Tseng and Yau in \cite{TY3} (see Remark~\ref{comparison with filtered} for details); in particular, $$ \check{H}^{q}_{(1)}(M) \cong P H^q_{\partial_+}(M), \quad\ \check{H}^{2n-q+1}_{(1)}(M) \cong H^{2n-q}_{(1)}(M) \cong P H^q_{\partial_-}(M), \quad\ 0\leq q\leq n-1, $$ and $$ \check{H}^{n+k-1}_{(k)}(M) \cong P H^{n-k+1}_{dd^\Lambda}(M), \quad\ \check{H}^{n+k}_{(k)}(M) \cong P H^{n-k+1}_{d+d^\Lambda}(M), \quad\ 1\leq k\leq n. $$ In Proposition~\ref{propiedades-finitas-suc-check} we show that these extended cohomologies also satisfy the main properties of the generalized coeffective cohomology groups. When $M$ is of finite type, we consider $\check{\chi}^{(k)}_+(M)$ as the alternating sum of the dimensions of the cohomology groups of the first half of the extended complex, and in Corollary~\ref{HLC-caracterizacion} we prove the following characterization of the HLC: $(M^{2n},\omega)$ satisfies the HLC if and only if $\check{\chi}^{(k)}_+(M)=\chi^{(k)}(M)$ for every $1\leq k\leq n$. In Section~\ref{relacion-con-armonica} we obtain some relations of the generalized coeffective cohomologies (and therefore also of the filtered cohomologies) with the symplectically harmonic cohomology. Concretely, using the description of $H^q_{\rm hr}(M)$ obtained in \cite{IRTU1,Ym,Yan} we prove that the generalized coeffective cohomologies measure the differences between the harmonic cohomology groups in the following sense: if $(M^{2n},\omega)$ is a symplectic manifold of finite type, then $$ \dim H^{n-k+1}_{\rm hr}(M) - \dim H^{n+k+1}_{\rm hr}(M) = \dim \hat H^{n-k+1}(M) $$ for every $k=1,\ldots,n$ (see Theorem~\ref{rel_armonica_coef}). As a consequence, we find the relation between the dimension of the primitive cohomology group $P H^q_{d+d^\Lambda}(M)$ and the harmonic cohomology for $q=1,2,3$. We introduce in Section~\ref{flex} the notion of generalized coeffective flexibility and filtered flexibility, as an analogous notion of the concept of harmonic flexibility. We say that a closed smooth manifold $M^{2n}$ is \texttt{c}-\emph{flexible} (resp. \texttt{f}-\emph{flexible} or \texttt{h}-\emph{flexible}) if $M$ possesses a continuous family of symplectic forms $\omega_t$ such that the dimension of some generalized coeffective (resp. filtered or symplectically harmonic) cohomology group varies with $t$. We prove in Theorem~\ref{dim4} that in four dimensions $M$ is never \emph{\texttt{c}}-flexible, and that $M$ is \emph{\texttt{f}}-flexible if and only if it is \emph{\texttt{h}}-flexible. This result allows us to prove, for each $n\geq 2$, the existence of $2n$-dimensional \emph{\texttt{f}}-flexible closed manifolds having a continuous family of symplectic forms $\omega_t$ such that the dimension of the primitive cohomology group $P H^{2}_{d+d^\Lambda}(M,\omega_t)$ varies with respect to $t$ (see Theorem~\ref{existencia-f-flexible-en-cualquier-dimension}). In Theorem~\ref{dim6} and Proposition~\ref{dim2n} we study flexibility in higher dimensions; in particular, it turns out that in dimension $2n\geq 6$, if $M$ is \emph{\texttt{c}}-flexible then $M$ is \emph{\texttt{f}}-flexible or \emph{\texttt{h}}-flexible. This shows that coeffective flexibility is a stronger condition than the other flexibilities. All the cohomology groups can be computed explicitly for symplectic solvmanifolds satisfying the Mostow condition, in particular for any symplectic nilmanifold. In Section~\ref{sec_ex} we consider the class of 6-dimensional nilmanifolds and compute the dimensions of all the cohomology groups for any symplectic form. This extends the previous study for the symplectically harmonic cohomology given in \cite{IRTU1,IRTU2}. As a consequence, we identify all the 6-dimensional nilmanifolds which are \emph{\texttt{c}}-flexible, \emph{\texttt{f}}-flexible or \emph{\texttt{h}}-flexible (see Table~1). \section{Generalized coeffective cohomologies}\label{gen-coef-cohom} \noindent Let $(M^{2n},\omega)$ be a symplectic manifold and let $k$ be an integer such that $1\leq k\leq n$. Next we introduce the notion of $k$-coeffective forms. \begin{definition}\label{def-gen-coef} {\rm A $q$-form $\alpha$ on $M$ is said to be \emph{$k$-coeffective} if $\alpha$ annihilates the form $\omega^k$, i.e. $\alpha\wedge \omega^k=0$. The space of $k$-coeffective forms of degree $q$ will be denoted by ${\mathcal A}^q_{(k)}(M,\omega)$, or simply ${\mathcal A}^q_{(k)}(M)$. } \end{definition} \begin{remark}\label{limit-cases} {\rm The above definition has also sense in the ``limit'' case $k=n+1$ because $\omega^{n+1}=0$ and then ${\mathcal A}^*_{(n+1)}(M)=\Omega^*(M)$. Also the case $k=0$ has sense if we consider $\omega^0$ as the constant function 1, i.e. ${\mathcal A}^*_{(0)}(M)=\{0\}$. Thus, there exists the following strictly increasing sequence of differential ideals $$ \{0\}={\mathcal A}^*_{(0)}(M) \subset {\mathcal A}^*_{(1)}(M) \subset \cdots \subset {\mathcal A}^*_{(n)}(M) \subset {\mathcal A}^*_{(n+1)}(M) =\Omega^*(M). $$ } \end{remark} Since for each $k$ the space ${\mathcal A}^*_{(k)}(M)$ is closed by $d$, we can consider the \emph{$k$-coeffective complex} \begin{equation}\label{complejo-k-coef} \xymatrix{\cdots\ar [r]^-d &{\mathcal A}^{q-1}_{(k)}(M)\ar[r]^d &{\mathcal A}^{q}_{(k)}(M)\ar[r]^d &{\mathcal A}^{q+1}_{(k)}(M)\ar[r]^-d&\cdots}, \end{equation} which is a subcomplex of the standard de Rham complex $(\Omega^*(M),d)$. \begin{definition}\label{def-gen-coef-cohom} {\rm The \emph{$q$-th $k$-coeffective cohomology group} will be denoted by $$H^q_{(k)}(M)=\frac{\ker\,\{d:{\mathcal A}^q_{(k)}(M)\longrightarrow {\mathcal A}^{q+1}_{(k)}(M)\}} {\textrm{im}\,\{d:{\mathcal A}^{q-1}_{(k)}(M)\longrightarrow {\mathcal A}^{q}_{(k)}(M)\}}. $$ } \end{definition} It is clear that the $k$-coeffective cohomology groups are invariant by symplectomorphism. Moreover, we will show below that, for each $k$, they are invariants of the de Rham class $[\omega^k]\in H^{2k}(M)$. Let $L_{\omega}^k\colon\Omega^*(M)\longrightarrow \Omega^{*}(M)$ be given by $L_{\omega}^k(\alpha)=\alpha\wedge \omega^{k}$. Since ${\mathcal A}^q_{(k)}(M) =\ker \{L_{\omega}^k:\Omega^q(M)\longrightarrow \Omega^{q+2k}(M)\}$ and the map $L_{\omega}^k:\Omega^q(M)\longrightarrow\Omega^{q+2k}(M)$ is injective for any $q\leq n-k$ and surjective for any $q\geq n-k$, one has that $H^q_{(k)}(M)=0$ for $q\leq n-k$ and $H^q_{(k)}(M)\cong H^q(M)$ for every $q\geq 2n-2k+2$. The short exact sequence $$\xymatrix{0 \ar[r] &{\mathcal A}^*_{(k)}(M)\ar[r]^-i &\Omega^*(M)\ar[r]^-{L_{\omega}^k} &L_{\omega}^k(\Omega^{*}(M)) \ar[r] &0,}$$ where $i$ denotes the inclusion, provides the following short exact sequence of complexes \xymatrixcolsep{2pc}\xymatrixrowsep{1.5pc} $$\xymatrix{ &0 &0 &0 \\ \cdots\ar[r]^-d &L_{\omega}^k(\Omega^{q-1}(M))\ar[r]^d\ar[u] &L_{\omega}^k(\Omega^{q}(M))\ar[r]^d\ar[u] &L_{\omega}^k(\Omega^{q+1}(M))\ar[u]\ar[r]^-d &\cdots \\ \cdots\ar[r]^-d &\Omega^{q-1}(M)\ar[r]^d\ar[u]^{L_{\omega}^k} &\Omega^{q}(M)\ar[r]^d\ar[u]^{L_{\omega}^k} &\Omega^{q+1}(M)\ar[u]^{L_{\omega}^k}\ar[r]^d &\cdots\\ \cdots\ar[r]^-d&{\mathcal A}^{q-1}_{(k)}(M)\ar[r]^d\ar[u]^-i &{\mathcal A}^q_{(k)}(M)\ar[r]^d\ar[u]^-i &{\mathcal A}^{q+1}_{(k)}(M)\ar[u]^-i\ar[r]^d &\cdots \\ &0\ar[u] &0\ar[u] &0\ar[u] }$$ Now, since $L_{\omega}^k(\Omega^{q-2k}(M))=\Omega^q(M)$ for any $q\geq n+k$ we have that $H^{q}(L_{\omega}^k(\Omega^{*}(M)))=H^q(M)$ for $q\geq n+k+1$, and therefore the associated long exact sequence in cohomology is \xymatrixcolsep{3pc}\xymatrixrowsep{0.8pc} $$\xymatrix{ 0\ar[r]& H^{n-k}(M)\ar[r]^-{L^k} &H^{n+k}(L_{\omega}^k(\Omega^{*}(M))) \ar[r]^-{f_{n-k+1}} &H^{n-k+1}_{(k)}(M)&}$$ \begin{equation}\label{selarga} \xymatrixcolsep{3.5pc}\xymatrix{ \ar[r]^-{H(i)}& H^{n-k+1}(M)\ar[r]^-{L^k} &H^{n+k+1}(M)\ar[r]^-{f_{n-k+2}}& H^{n-k+2}_{(k)}(M)&} \end{equation} \xymatrixcolsep{3pc}$$ \xymatrix{\ar[r]^-{H(i)}& H^{n-k+2}(M)\ar[r]^-{L^k} &H^{n+k+2}(M)\ar[r]^-{f_{n-k+3}}&H^{n-k+3}_{(k)}(M)\ \cdots\, ,&} $$ where $H(i)$ and $L^k$ are the homomorphisms in cohomology naturally induced by $i$ and $L_{\omega}^k$, respectively, and $f_{q}$ is the connecting homomorphism. Recall that $f_{q}$ is defined by $f_{q}([\alpha])=[d\beta]$, where $\beta\in\Omega^{q-1}(M)$ is any $(q-1)$-form satisfying $L_{\omega}^k(\beta)=\alpha$. \begin{definition}\label{def-gen-coef-numbers} {\rm When the group $H^q_{(k)}(M)$ has finite dimension, we will denote it by $c_q^{(k)}(M)$ and we shall refer to it as the \emph{$q$-th $k$-coeffective number} of $(M^{2n},\omega)$. } \end{definition} Notice that $c_q^{(k)}(M)=0$ for any $q\leq n-k$, and $c_q^{(k)}(M)= b_q(M)$ for any $q\geq 2n-2k+2$. \medskip In what follows, by a manifold of \emph{finite type} we mean a manifold, not necessarily compact, such that its Betti numbers $b_q(M)=\dim H^q(M)$ are all finite. \begin{proposition}\label{propiedades-finitas} Let $(M^{2n},\omega)$ be a symplectic manifold of finite type and let $1\leq k\leq n$. Then, for every $q\geq n-k+2$, the following properties hold: \begin{enumerate} \item[{\rm (i)}] \emph{Finiteness and bounds for the coeffective numbers:} the group $H^q_{(k)}(M)$ is finite dimensional and its dimension $c_q^{(k)}(M)$ satisfies the inequalities \begin{equation}\label{des} b_q(M)-b_{q+2k}(M)\leq c_q^{(k)}(M)\leq b_q(M)+b_{q+2k-1}(M). \end{equation} \item[{\rm (ii)}] \emph{Symplectic manifolds satisfying the HLC:} if $(M^{2n},\omega)$ satisfies the HLC then the lower bound in \eqref{des} is attained, i.e. $$ c_q^{(k)}(M)=b_q(M) - b_{q+2k}(M). $$ \item[{\rm (iii)}] \emph{Exact symplectic manifolds:} if $\omega$ is exact then the upper bound in \eqref{des} is attained, i.e. $$ c_q^{(k)}(M)=b_q(M)+b_{q+2k-1}(M). $$ \item[{\rm (iv)}] \emph{Poincar\'e lemma:} if $U$ is the open unit disk in $\mathbb{R}^{2n}$ with the standard symplectic form $\omega=\sum_{i=1}^n dx^i\wedge dx^{n+i}$, then $c_q^{(k)}(U)=0$. \end{enumerate} \end{proposition} \begin{proof} From the long exact sequence \eqref{selarga}, one has for any $q\geq n-k+2$ the five-terms exact sequence \xymatrixcolsep{1.6pc} \begin{equation}\label{5terminos} \xymatrix{0\ar[r]&\text{im}\,f_{q}\, \hookrightarrow\, H^q_{(k)}(M)\ar[r]^-{H(i)}& H^q(M)\ar[r]^-{L^k}&H^{q+2k}(M)\ar[r]^-{f_{q+1}\,\,}&\,\text{im}\,f_{q+1}\ar[r]&0}. \end{equation} If the manifold is of finite type then it is clear that $H^q_{(k)}(M)$ has finite dimension for any $q\geq n-k+2$. Moreover, taking dimensions in \eqref{5terminos} $$ \xymatrix{c_q^{(k)}(M) =\text{dim (im}\,f_{q})+b_q(M) -b_{q+2k}(M)+\text{dim (im}\,f_{q+1})}\!, $$ which implies the inequalities \eqref{des}. This completes the proof of {\rm (i)}. Property {\rm (ii)} is a direct consequence of \eqref{5terminos} taking into account that HLC implies that $L^k\colon H^{q-1}(M)\longrightarrow H^{q+2k-1}(M)$ are surjective and then the connecting homomorphisms $f_{q}$ vanish for every $q\geq n-k+2$. Property {\rm (iii)} is a consequence of \eqref{5terminos} since $L^k\colon H^{q-1}(M)\longrightarrow H^{q+2k-1}(M)$ are identically zero because~$\omega$ is exact, and then the connecting homomorphisms $f_{q}$ are injective for every $q\geq n-k+2$. Finally {\rm (iv)} is a direct consequence of {\rm (iii)} since $\omega$ is exact on $U$. \end{proof} Notice that for $k=1$ the previous proposition was proved by Fern\'andez, Ib\'a\~nez and de Le\'on in \cite{FIL2}. It is easy to check (see \cite{B} for $k=1$) that, for each $1\leq k\leq n$, the $k$-coeffective complex \eqref{complejo-k-coef} is elliptic in any degree $q\not= n-k+1$. The coeffective group $H^{n-k+1}_{(k)}(M)$ can be infinite dimensional, however in view of the sequence \eqref{selarga} there is a natural quotient of this coeffective group by considering the (in general also infinite dimensional) space $H^{n+k}(L_{\omega}^k(\Omega^{*}(M)))$. We will see below that such quotient has finite dimension on symplectic manifolds of finite type. \begin{definition} Let us consider the space $$\hat H^{n-k+1}(M):= \frac{H^{n-k+1}_{(k)}(M)}{H^{n+k}(L_{\omega}^k(\Omega^{*}(M)))/H^{n-k}(M)}.$$ If its dimension is finite, then we will denote it by $\hat c_{n-k+1}(M)$. \end{definition} Hence, we have an additional collection of $n$ symplectic invariants given by $\hat H^{n-k+1}(M)$ for $k=1,\ldots,n$, that is, $$\hat H^{n}(M),\ \hat H^{n-1}(M), \ldots, \hat H^{2}(M),\ \hat H^{1}(M).$$ From now on, we will refer to the collection \eqref{todos-los-gen-coef-groups} as the \emph{generalized coeffective cohomology groups} of the symplectic manifold $(M^{2n},\omega)$. \medskip Consider the short exact sequence \xymatrixcolsep{2.35pc} $$\xymatrix{ 0\ar[r]&\hat H^{n-k+1}(M)\ar[r]^{\hat{\!\emph{\i}}}&H^{n-k+1}(M)\ar[r]^-{L^k}& H^{n+k+1}(M)\ar[r]^-{f_{n-k+2}\,\,}& \text{im}\,f_{n-k+2}\ar[r]&0}, $$ where $\hat{\!\emph{\i}}$ is the homomorphism naturally induced by $H(i)$ in \eqref{selarga}. Since $\hat{\!\emph{\i}}$ is injective, it is clear that $\hat H^{n-k+1}(M)$ is finite dimensional whenever $H^{n-k+1}(M)$ is, and in such case we have $$ \hat c_{n-k+1}(M)=b_{n-k+1}(M)-b_{n+k+1}(M)+ \dim (\text{im}\,f_{n-k+2}). $$ Therefore, the properties obtained in Proposition~\ref{propiedades-finitas} extend to the space $\hat H^{n-k+1}(M)$ as follows: \begin{proposition}\label{propiedades-finitas-para-c-sombrero} Let $(M^{2n},\omega)$ be a symplectic manifold of finite type and let $1\leq k\leq n$. The following properties hold for $\hat c_{n-k+1}(M)$: \begin{enumerate} \item[{\rm (i)}] \emph{Finiteness and bounds for the coeffective number $\hat c_{n-k+1}(M)$:} the space $\hat H^{n-k+1}(M)$ is finite dimensional and its dimension satisfies the inequalities \begin{equation}\label{des-para-c-sombrero} b_{n-k+1}(M) - b_{n+k+1}(M)\leq \hat c_{n-k+1}(M)\leq b_{n-k+1}(M). \end{equation} \item[{\rm (ii)}] \emph{Symplectic manifolds satisfying the HLC:} if $(M^{2n},\omega)$ satisfies the HLC then the lower bound in \eqref{des-para-c-sombrero} is attained, i.e. $$ \hat c_{n-k+1}(M)=b_{n-k+1}(M)-b_{n+k+1}(M). $$ \item[{\rm (iii)}] \emph{Exact symplectic manifolds:} if $\omega$ is exact then the upper bound in \eqref{des-para-c-sombrero} is attained, i.e. $$ \hat c_{n-k+1}(M)=b_{n-k+1}(M). $$ \item[{\rm (iv)}] \emph{Poincar\'e lemma:} if $U$ is the open unit disk in $\mathbb{R}^{2n}$ with the standard symplectic form $\omega=\sum_{i=1}^n dx^i\wedge dx^{n+i}$, then $\hat c_{n-k+1}(U)=0$. \end{enumerate} \end{proposition} Inspired by the definition of the Euler characteristic of a manifold, we define the following invariants: \begin{definition} Let $(M^{2n},\omega)$ be a symplectic manifold of finite type. For each $1\leq k\leq n$, we define $$\chi^{(k)}(M) = (-1)^{n-k+1} \hat c_{n-k+1}(M)+ \sum^{2n}_{i=n-k+2}(-1)^i \, c_i^{(k)}(M).$$ \end{definition} The next proposition shows that each $\chi^{(k)}(M)$ is actually a topological invariant of the manifold. \begin{proposition}\label{invtopologicos} Let $(M^{2n},\omega)$ be a symplectic manifold of finite type. For any $1\leq k\leq n$, $$\chi^{(k)}(M)=\sum_{r=n-k+1}^{n+k}(-1)^{r} \, b_{r}(M).$$ \end{proposition} \begin{proof} The long exact sequence \eqref{selarga} implies $$ 0=\hat c_{n-k+1}-b_{n-k+1}+b_{n+k+1}+\sum_{j=2}^{n+k}(-1)^{j-1} (c_{n-k+j}^{(k)}-b_{n-k+j}+b_{n+k+j}). $$ Writing this sum in terms of $\chi^{(k)}$ we get $$ (-1)^{n-k+1} \chi^{(k)} + \sum_{j=1}^{n+k}(-1)^j b_{n-k+j} - \sum_{j=1}^{n+k}(-1)^j b_{n+k+j}=0. $$ Since $b_i=0$ for $i\geq 2n+1$, the previous equality reduces to: \begin{eqnarray*} 0&=&(-1)^{n-k+1} \chi^{(k)} + \sum_{j=1}^{n+k}(-1)^j b_{n-k+j} - \sum_{j=1}^{n-k}(-1)^j b_{n+k+j}\\ &=& (-1)^{n-k+1} \chi^{(k)} + \sum_{r=n-k+1}^{2n}(-1)^{r-n+k}\, b_{r} - \sum_{r=n+k+1}^{2n}(-1)^{r-n-k}\, b_{r}. \end{eqnarray*} Equivalently, \begin{eqnarray*} \chi^{(k)}&=&\sum_{r=n-k+1}^{2n}(-1)^{r} b_{r} - \sum_{r=n+k+1}^{2n}(-1)^{r} b_{r} = \sum_{r=n-k+1}^{n+k}(-1)^{r} b_{r}. \end{eqnarray*} \end{proof} Observe that the Euler characteristic of $M$ is recovered if we allow $k=n+1$ (see Remark~\ref{limit-cases}). \medskip \begin{remark}\label{coef-inv-cohom-class} {\rm It is clear from the long exact sequence \eqref{selarga} that, for each $k$, the generalized $k$-coeffective cohomology groups \eqref{todos-los-gen-coef-groups} are invariants of the de Rham cohomology class $[\omega^k]$ given by the cap product of $[\omega]$ by itself $k$ times. Even more, if $[\omega^k]\not=0$ and we denote by $[[\omega^k]]$ the corresponding element in $\mathbb{P}(H^{2k}(M))$, then all the generalized $k$-coeffective groups are invariants of $[[\omega^k]]$. In conclusion, if $(M^{2n},\omega)$ is a symplectic manifold and $\omega^k$ are not exact, then the generalized coeffective cohomologies only depend on the element $[[\omega]]\in \mathbb{P}(H^{2}(M))$. } \end{remark} From this remark it follows \begin{lemma}\label{iso-equiv-coef} Let $F\colon (M,\omega)\longrightarrow (M',\omega')$ be a diffeomorphism such that $F^*[\omega']=\lambda [\omega]$ for some non-zero $\lambda\in \mathbb{R}$. Then, for any $1\leq k\leq n$, $\hat H^{n-k+1}(M')\cong \hat H^{n-k+1}(M)$ and $H^{q}_{(k)}(M')\cong H^{q}_{(k)}(M)$ for every $q\geq n-k+2$. \end{lemma} \medskip Notice that it suffices to know the de Rham cohomology of $M$ together with the action of $L^k$ on it, in order to know the generalized $k$-coeffective cohomology. This can be applied in particular to solvmanifolds satisfying the Mostow condition \cite{Mostow}, that is to say, to compact quotients $G/\Gamma$ of solvable Lie groups $G$ by a lattice $\Gamma$ satisfying that the algebraic closures $\mathcal{A}(\rm{Ad}_G(G))$ and $\mathcal{A}(\rm{Ad}_G(\Gamma))$ are equal. In fact, under this condition one has that the natural map $(\bigwedge^* {\frak g}^*,d) \hookrightarrow (\Omega^* (M),d)$ from the Chevalley-Eilenberg complex of the Lie algebra ${\frak g}$ of $G$ to the de Rham complex of the solvmanifold $M=G/\Gamma$ is a quasi-isomorphism, i.e. $H^q(M)\cong H^q({\frak g})$ for any $0\leq q\leq \dim M$. The following result is straightforward from the long exact sequence in cohomology: \begin{proposition}\label{calculo-coef-solvariedades} Let $(M=G/\Gamma,\omega)$ be a $2n$-dimensional symplectic solvmanifold satisfying the Mostow condition. Let ${\frak g}$ be the Lie algebra of $G$ and let $\omega'\in \bigwedge^2 {\frak g}^*$ be a left-invariant symplectic form representing the de Rham class $[\lambda \omega]\in H^2(M)$ for some $\lambda\not=0$. Then, for any $1\leq k\leq n$, the inclusion $\bigwedge^* {\frak g}^* \hookrightarrow \Omega^* (M)$ induces isomorphisms $\hat H^{n-k+1}(M)\cong \hat H^{n-k+1}({\frak g})$ and $H^{q}_{(k)}(M)\cong H^{q}_{(k)}({\frak g})$ for every $q\geq n-k+2$. \end{proposition} In particular, the previous result holds for nilmanifolds \cite{Nomizu} and in the completely solvable case \cite{Hattori}, i.e. when the adjoint representation ${\rm ad}_X$ has only real eigenvalues for all $X \in {\frak g}$. Note that for the usual coeffective cohomology, i.e. $k=1$ and $q\geq n+1$, this result was proved in \cite{FIL1} (see also \cite{FIL2}). \medskip \begin{remark}\label{cohom-mas-en-general} {\rm For other results on the de Rham cohomology of compact solvmanifolds $G/\Gamma$, even in the case that the solvable Lie group $G$ and the lattice $\Gamma$ do not satisfy the Mostow condition, see \cite{CF,Guan}. Notice that for infra-solvmanifolds Baues proved in \cite{Baues} an analogous result to Nomizu's theorem about the isomorphism of its cohomology and that of a certain complex of left-invariant forms, result that is used in \cite{Kasuya} to study the $1$-coeffective cohomology of certain symplectic aspherical manifolds. } \end{remark} \medskip \begin{remark} {\rm For general symplectic manifolds (not necessarily of finite type) one has for every $q\geq n-k+2$ the following isomorphism $$ \frac{H^{q}_{(k)}(M)}{H^{q+2k-1}(M)/L^k(H^{q-1}(M))} \cong \ker \{L^k\colon H^{q}(M)\longrightarrow H^{q+2k}(M) \}. $$ In particular, if the HLC is satisfied then $H^{q}_{(k)}(M) \cong \ker \{L^k\colon H^{q}(M)\longrightarrow H^{q+2k}(M) \}$. Since any compact K\"ahler manifold satisfies the HLC, we conclude that, for any $q\geq n-k+2$, the $k$-coeffective group $H^{q}_{(k)}(M)$ is isomorphic to the space of de Rham cohomology classes that annihilate the class $[\omega^k]$. For $k=1$ this result was proved by Bouch\'{e} in \cite{B}, where he refers to the latter groups as the \emph{truncated de Rham groups}. In \cite{FIL1,Kasuya} the relation of the 1-coeffective cohomology with the truncated de Rham cohomology is also investigated. } \end{remark} \section{Extension of the generalized coeffective complexes}\label{exten-gen-coef-cohom} \noindent In this section, for any $1\leq k\leq n$, we consider an extension of the $k$-coeffective complex, which is elliptic and whose cohomology groups will be isomorphic to the \emph{filtered} cohomology groups introduced by Tsai, Tseng and Yau in \cite{TY3}. For $k=1$ such extension was constructed by Eastwood in~\cite{E}. Let us fix $k$ such that $1\leq k\leq n$. For each $q$, let us consider the quotient space $\check{\Omega}^q_{(k)}(M)=\frac{\Omega^{q}(M)}{L_{\omega}^k(\Omega^{q-2k}(M))}$. We denote by $\check{d}\colon \check{\Omega}^q_{(k)}(M)\longrightarrow \check{\Omega}^{q+1}_{(k)}(M)$ the natural map induced by the exterior differential, i.e. $\check{d}(\check{\alpha})=(d\alpha)\!\check{\phantom{i}}=d\alpha+L_{\omega}^k(\Omega^{q-2k+1}(M))$, for any $\check{\alpha} \in \check{\Omega}^q_{(k)}(M)$. Then, we have the following complex \xymatrixcolsep{1.4pc}\xymatrixrowsep{2pc} \begin{equation}\label{complejo_nuevo_k} \xymatrix{ 0\ar[r]&\ar[r]\Omega^0\ar[r]^-{d}&\Omega^1\ar[r]^-{d}& \cdots\ar[r]^-{d}&\Omega^{2k-1}\ar[r]^-{\check{d}}& \check{\Omega}^{2k}_{(k)}\ar[r]^-{\check{d}}&\cdots\ar[r]^-{\check{d}} & \check{\Omega}^{n+k-2}_{(k)}\ar[r]^-{\check{d}} &\check{\Omega}^{n+k-1}_{(k)}\ar[d]^{D}\\ 0 &\Omega^{2n}\ar[l]&\Omega^{2n-1}\ar[l]_-{\ d} &\cdots\ar[l]_-{\ d}&\Omega^{2n-2k+1}\ar[l]_-{\ d} & {\mathcal A}^{2n-2k}_{(k)}\ar[l]_-{\ d} & \cdots\ar[l]_-{\ d} &{\mathcal A}^{n-k+2}_{(k)}\ar[l]_-{\ d} &{\mathcal A}^{n-k+1}_{(k)}\ar[l]_-{\ d}} \end{equation} where $D$ is a second-order differential operator defined as $D(\check{\alpha}) = d\gamma$, being $\gamma$ the unique $(n-k)$-form satisfying $d\alpha=L_{\omega}^k(\gamma)$. It can be checked that this complex is elliptic in any degree, however we will not use this fact in what follows since the main properties of its cohomology groups will be derived from a long exact sequence as in Section~\ref{gen-coef-cohom}. Let us denote by $\check{H}^q_{(k)}(M)$ the cohomology groups associated to the complex~\eqref{complejo_nuevo_k} for $0\leq q\leq 2n+2k-1$. Notice that $\check{H}^q_{(k)}(M)=H^q(M)$ for any $q\leq 2k-2$ and $\check{H}^q_{(k)}(M)=H^{q-2k+1}_{(k)}(M)$ for any $q\geq n+k+1$. Now, the sequences of complexes \xymatrixcolsep{1.5pc}\xymatrixrowsep{1.5pc} $$\xymatrix{ &&\Omega^{n+k-1}\ar[r]^-d&\Omega^{n+k}\ar[r]^-d&\Omega^{n+k+1}\ar[r]^-d& \Omega^{n+k+2}\ar[r]^-d& \Omega^{n+k+3}\ar[r]^-d&\cdots\\ &&\Omega^{n-k-1}\ar[r]^-d\ar[u]^-{L_{\omega}^k}&\Omega^{n-k}\ar[r]^-d\ar[u]^-{L_{\omega}^k}&\Omega^{n-k+1}\ar[r]^-d\ar[u]^-{L_{\omega}^k} & \Omega^{n-k+2}\ar[r]^-d\ar[u]^-{L_{\omega}^k}& \Omega^{n-k+3}\ar[r]^-d\ar[u]^-{L_{\omega}^k}&\cdots\\ \cdots\ar[r]^-{\check{d}}& \check{\Omega}^{n+k-3}_{(k)}\ar[r]^-{\check{d}}& \check{\Omega}^{n+k-2}_{(k)}\ar[r]^-{\check{d}}&\check{\Omega}^{n+k-1}_{(k)}\ar[r]^-D &{\mathcal A}^{n-k+1}_{(k)}\ar[u]^-i\ar[r]^-d& {\mathcal A}^{n-k+2}_{(k)}\ar[u]^-i\ar[r]^-d& {\mathcal A}^{n-k+3}_{(k)}\ar[u]^-i\ar[r]^-d&\cdots\\ \cdots\ar[r]^-{d}& \Omega^{n+k-3}\ar[u]^-p\ar[r]^-d& \Omega^{n+k-2}\ar[u]^-p\ar[r]^-d&\Omega^{n+k-1}\ar[u]^-p\ar[r]^-d&\Omega^{n+k}\ar[r]^-d&\Omega^{n+k+1}&&\\ \cdots\ar[r]^-{d}& \Omega^{n-k-3}\ar[u]^-{L_{\omega}^k}\ar[r]^-d& \Omega^{n-k-2}\ar[u]^-{L_{\omega}^k}\ar[r]^-d&\Omega^{n-k-1}\ar[u]^-{L_{\omega}^k}\ar[r]^-d &\Omega^{n-k}\ar[u]^-{L_{\omega}^k}\ar[r]^-d&\Omega^{n-k+1}\ar[u]^-{L_{\omega}^k}&&\\ }$$ where $i$ denotes the inclusion and $p$ the natural projection, give rise to the following long exact sequence in cohomology: \xymatrixcolsep{2.75pc}\xymatrixrowsep{0.8pc} $$\xymatrix{ \cdots\ar[r]^-{\check{f}_{n-k-2}} & H^{n-k-2}(M)\ar[r]^-{L^k} & H^{n+k-2}(M)\ar[r]^-{H(p)} & \check{H}^{n+k-2}_{(k)}(M) &\\ \ar[r]^-{\check{f}_{n-k-1}} & H^{n-k-1}(M)\ar[r]^-{L^k} & H^{n+k-1}(M)\ar[r]^-{H(p)} & \check{H}^{n+k-1}_{(k)}(M) &}$$ \xymatrixcolsep{3.3pc}\begin{equation}\label{sexl-E_k}\xymatrix{ \ar[r]^-{\check{f}_{n-k}} & H^{n-k}(M)\ar[r]^-{L^k} & H^{n+k}(M)\ar[r]^-{\check{f}_{n-k+1}} & \check{H}^{n+k}_{(k)}(M) &} \end{equation} \xymatrixcolsep{2.75pc}$$\xymatrix{ \ar[r]^-{H(i)} & H^{n-k+1}(M)\ar[r]^-{L^k} & H^{n+k+1}(M)\ar[r]^-{\check{f}_{n-k+2}} & \check{H}^{n+k+1}_{(k)}(M)&\\ \ar[r]^-{H(i)} & H^{n-k+2}(M)\ar[r]^-{L^k} & H^{n+k+2}(M)\ar[r] ^-{\check{f}_{n-k+3}} & \check{H}^{n+k+2}_{(k)}(M)&\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\cdots. } $$ Here $H(i)$ and $H(p)$ are the homomorphisms induced in cohomology by $i$ and $p$, respectively, and $\check{f}_q$ are the connecting homomorphisms, which are given as follows: $\bullet$ for any $j\leq n+k-1$ and $[\alpha]\in\check H^j_{(k)}(M)$: $\check f_{j-2k+1}([\alpha]) = [\beta]$, where $d\alpha = L^k_{\omega}(\beta)$; $\bullet$ for any $j\geq n+k$ and $[\alpha]\in H^j(M)$: $\check f_{j-2k+1}([\alpha]) = [d\beta]$, where $\alpha = L^k_{\omega}(\beta)$. \medskip Let $\check{c}_q^{(k)}(M)$ be the dimension of $\check{H}^q_{(k)}(M)$ when it is finite. As in Section~\ref{gen-coef-cohom}, using five-terms exact sequences from \eqref{sexl-E_k}, we arrive at the following result, that provides an extension of Proposition~\ref{propiedades-finitas}. \begin{proposition}\label{propiedades-finitas-suc-check} Let $(M^{2n},\omega)$ be a symplectic manifold of finite type and let $1\leq k\leq n$. Then, for every $0\leq q\leq 2n+2k-1$, the following properties hold: \begin{enumerate} \item[{\rm (i)}] \emph{Finiteness and bounds for the numbers $\check{c}_q^{(k)}(M)$:} the group $\check{H}^q_{(k)}(M)$ is finite dimensional and its dimension $\check{c}_q^{(k)}(M)$ satisfies the inequalities \begin{equation}\label{des-check} b_{q-2k+1}(M)-b_{q+1}(M)\leq \check{c}_q^{(k)}(M)\leq b_{q-2k+1}(M)+b_{q}(M). \end{equation} \item[{\rm (ii)}] \emph{Symplectic manifolds satisfying the HLC:} if $(M^{2n},\omega)$ satisfies the HLC then the lower bound in \eqref{des-check} is attained for every $q\geq n+k$, i.e. $$ \check{c}_q^{(k)}(M)=b_{q-2k+1}(M)-b_{q+1}(M), \quad\ q\geq n+k. $$ \item[{\rm (iii)}] \emph{Exact symplectic manifolds:} if $\omega$ is exact then the upper bound in \eqref{des-check} is attained, i.e. $$ \check{c}_q^{(k)}(M)=b_{q-2k+1}(M)+b_{q}(M). $$ \item[{\rm (iv)}] \emph{Poincar\'e lemma:} if $U$ is the open unit disk in $\mathbb{R}^{2n}$ with the standard symplectic form $\omega=\sum_{i=1}^n dx^i\wedge dx^{n+i}$, then $\check{c}_0^{(k)}(U)=1=\check{c}_{2k-1}^{(k)}(U)$ and $\check{c}_{q}^{(k)}(U)=0$ for any other value of $q$. \end{enumerate} \end{proposition} \begin{remark}\label{case ii for Lk injective} {\rm By (ii) the lower bound in \eqref{des-check} is attained for every $q\geq n+k$ for symplectic manifolds satisfying the HLC. Similarly, it can be proved from \eqref{sexl-E_k} that if $(M^{2n},\omega)$ satisfies that all the maps $L^k\colon H^{n-k}(M)\longrightarrow H^{n+k}(M)$ are \emph{injective} then $\check{c}_q^{(k)}(M)=b_{q}(M)-b_{q-2k}(M)$ for every $q\leq n+k-1$. In conclusion, if $L^k\colon H^{n-k}(M)\longrightarrow H^{n+k}(M)$ is an isomorphism for any $1\leq k\leq n$ (for instance, if $(M^{2n},\omega)$ is a \emph{closed} symplectic manifold satisfying the HLC) then the following equalities hold: \begin{eqnarray*} && \check{c}_q^{(k)}(M)=b_{q}(M)-b_{q-2k}(M),\quad 0\leq q\leq n+k-1;\\[4pt] && \check{c}_q^{(k)}(M)=b_{q-2k+1}(M)-b_{q+1}(M),\quad n+k\leq q\leq 2n+2k-1. \end{eqnarray*} } \end{remark} \begin{example}\label{R2n} {\rm By Proposition~\ref{propiedades-finitas-suc-check}~(iv) we have $\check{c}_{2k-1}^{(k)}(U)=1$. Next we show the non-zero cohomology class generating $\check{H}^{2k-1}_{(k)}(U)$. Let $\alpha=\sum_{i=1}^n x^i\wedge dx^{n+i}$. The $(2k-1)$-form $\beta=\alpha\wedge\omega^{k-1}$ is $\check{d}$-closed because $d\alpha=\omega$ and hence $d\beta=\omega^{k} \in L_{\omega}^k(\Omega^{0}(U))$. Clearly, $\beta$ is not $\check{d}$-exact, because it is not $d$-exact. In conclusion, $[\beta]$ defines a non-zero cohomology class and $\check{H}^{2k-1}_{(k)}(U)=\langle [\beta] \rangle$. } \end{example} \begin{remark}\label{comparison of indexes} {\rm Notice that the generalized coeffective space $\hat H^{n-k+1}(M)$ is isomorphic to a quotient of $\check{H}^{n+k}_{(k)}(M)$; concretely, \begin{equation}\label{rel-hat-check} \hat H^{n-k+1}(M)\cong \frac{\check{H}^{n+k}_{(k)}(M)}{H^{n+k}(M)/L^k(H^{n-k}(M))}. \end{equation} } \end{remark} \medskip Let $(M^{2n},\omega)$ be a symplectic manifold of finite type. For every $1\leq k\leq n$, we define $$\check{\chi}^{(k)}(M) = \sum^{2n+2k-1}_{i=0}(-1)^i \, \check{c}_i^{(k)}(M).$$ Let us write $\check{\chi}^{(k)}(M) = \check{\chi}^{(k)}_+(M) + \check{\chi}^{(k)}_-(M)$, where $$\check{\chi}^{(k)}_+(M)=\sum^{n+k-1}_{i=0}(-1)^i \, \check{c}_i^{(k)}(M),\quad \mbox{ and }\quad \check{\chi}^{(k)}_-(M)=\sum^{2n+2k-1}_{i=n+k}(-1)^i\, \check{c}_i^{(k)}(M).$$ \begin{proposition}\label{chis-filtered} Let $(M^{2n},\omega)$ be of finite type. For every $1\leq k\leq n$: \begin{enumerate} \item[{\rm (i)}] $\check{\chi}^{(k)}(M) = 0$; consequently, $\check{\chi}^{(k)}_-(M)=-\check{\chi}^{(k)}_+(M)$. \item[{\rm (ii)}] $\check{\chi}^{(k)}_+(M)=(-1)^{n+k+1}(\check{c}^{(k)}_{n+k}(M)-\hat{c}_{n-k+1}(M)) + \chi^{(k)}(M)$. \end{enumerate} \end{proposition} \begin{proof} Property (i) follows from~\eqref{sexl-E_k} arguing similarly to the proof of Proposition~\ref{invtopologicos}. For the proof of (ii), taking into account that $(-1)^{n+k+s}\check{c}^{(k)}_{n+k+s}(M)=-(-1)^{n-k+s+1}c^{(k)}_{n-k+s+1}(M)$ for $s\geq 1$, by Proposition~\ref{invtopologicos} we get $\chi^{(k)}(M)+\check{\chi}^{(k)}_-(M)= (-1)^{n+k}(\check{c}^{(k)}_{n+k}(M)-\hat{c}_{n-k+1}(M))$. Since $\check{\chi}^{(k)}_+(M)=-\check{\chi}^{(k)}_-(M)$, relation (ii) follows. \end{proof} Equality (ii) in the above proposition means that the behaviour of the symplectic invariant $\check{\chi}^{(k)}_+(M)$ only depends on $\check{c}^{(k)}_{n+k}(M)-\hat{c}_{n-k+1}(M)$, because $\chi^{(k)}(M)$ is a topological invariant by Proposition~\ref{invtopologicos}. Moreover, one has the following characterization of the HLC in terms of $\check{\chi}^{(k)}_+(M)$, which in particular implies that the HLC is determined by the cohomology of the first half of the complexes \eqref{complejo_nuevo_k}. \begin{corollary}\label{HLC-caracterizacion} A symplectic manifold $(M^{2n},\omega)$ of finite type satisfies the HLC if and only if $\check{\chi}^{(k)}_+(M)=\chi^{(k)}(M)$ for every $1\leq k\leq n$. \end{corollary} \begin{proof} By \eqref{rel-hat-check}, a symplectic manifold satisfies the HLC if and only if $\hat H^{n-k+1}(M) \cong \check{H}^{n+k}_{(k)}(M)$ for every $1\leq k\leq n$. Therefore, if $M$ is of finite type then, $(M^{2n},\omega)$ satisfies the HLC if and only if $\hat{c}_{n-k+1}(M)=\check{c}^{(k)}_{n+k}(M)$ for every $1\leq k\leq n$. By Proposition~\ref{chis-filtered}~(ii), this is equivalent to $\check{\chi}^{(k)}_+(M)=\chi^{(k)}(M)$ for every $1\leq k\leq n$. \end{proof} \begin{remark}\label{comparison with filtered} {\rm In \cite[Theorem 3.1]{TY3} Tsai, Tseng and Yau have introduced elliptic differential complexes of filtered forms that extend the complex of primitive forms \cite[Proposition 2.8]{TY2}. Complexes \eqref{complejo_nuevo_k} may be thought as a coeffective version of such filtered complexes. Moreover, in \cite[Theorem 4.2]{TY3} they obtain long exact sequences that provide a resolution of the Lefschetz maps $L^k$. Comparing with \eqref{sexl-E_k}, which also gives a resolution of the same Lefschetz maps, one immediately concludes that the cohomology $\check{H}^{*}_{(k)}(M)$ is isomorphic to the $(k-1)$-filtered cohomology as follows: \medskip $\bullet$ $\check{H}^{n+k-s}_{(k)}(M) \cong F^{k-1}H^{n+k-s}_+(M)$,\, for $s=1,\ldots,n+k$, \medskip $\bullet$ $\check{H}^{n+k+s}_{(k)}(M) \cong F^{k-1}H^{n+k-s-1}_-(M)$,\, for $s=0,1,\ldots,n+k-1$. \medskip \noindent In particular, for any $k\geq 1$ one has the following isomorphism between the $(k-1)$-filtered cohomology group and the $k$-coeffective cohomology group \begin{equation}\label{rel-otra} F^{k-1}H^{n+k-s-1}_-(M)\cong H^{n-k+s+1}_{(k)}(M) \cong \check{H}^{n+k+s}_{(k)}(M), \quad 1\leq s\leq n+k-1. \end{equation} \noindent For $k=1$ we recover the isomorphisms proved in \cite{E} between the extended coeffective cohomology of Eastwood and the primitive cohomology $PH=F^0H$ of Tseng-Yau \cite{TY1,TY2}. More generally, the isomorphism for any primitive cohomology group is as follows: $$ \begin{array}{l} P H^q_{\partial_+}(M) \cong F^{0}H^{q}_+(M) \cong \check{H}^{q}_{(1)}(M), \quad\ 0\leq q\leq n-1;\\[7pt] P H^q_{\partial_-}(M) \cong F^{0}H^{q}_-(M) \cong \check{H}^{2n-q+1}_{(1)}(M) \cong H^{2n-q}_{(1)}(M), \quad\ 0\leq q\leq n-1;\\[7pt] P H^{n-k+1}_{dd^\Lambda}(M) \cong F^{k-1}H^{n+k-1}_+(M) \cong \check{H}^{n+k-1}_{(k)}(M), \quad\ 1\leq k\leq n;\\[7pt] P H^{n-k+1}_{d+d^\Lambda}(M) \cong F^{k-1}H^{n+k-1}_-(M) \cong \check{H}^{n+k}_{(k)}(M), \quad\ 1\leq k\leq n. \end{array} $$ From now on, due to the above identifications, we will refer to the cohomology groups $\check{H}^{q}_{(k)}(M)$ as the filtered cohomology groups of $(M^{2n},\omega)$. } \end{remark} \medskip Notice that an analogous observation as Remark~\ref{coef-inv-cohom-class} is also valid for the filtered cohomologies. Hence, a similar result to Lemma~\ref{iso-equiv-coef} holds: \begin{lemma}\label{iso-equiv-filtered} Let $F\colon (M,\omega)\longrightarrow (M',\omega')$ be a diffeomorphism such that $F^*[\omega']=\lambda [\omega]$ for some non-zero $\lambda\in \mathbb{R}$. Then, for any $1\leq k\leq n$, $\check{H}^q_{(k)}(M') \cong \check{H}^q_{(k)}(M)$ for every $0\leq q\leq 2n+2k-1$. \end{lemma} A similar result to Proposition~\ref{calculo-coef-solvariedades} for computation of the filtered cohomologies of certain solvmanifolds is also available: \begin{proposition}\label{calculo-filtered-solvariedades} Let $(M=G/\Gamma,\omega)$ be a $2n$-dimensional symplectic solvmanifold satisfying the Mostow condition. Let ${\frak g}$ be the Lie algebra of $G$ and let $\omega'\in \bigwedge^2 {\frak g}^*$ be a left-invariant symplectic form representing the de Rham class $[\lambda \omega]\in H^2(M)$ for some $\lambda\not=0$. Then, for any $1\leq k\leq n$, the inclusion $\bigwedge^* {\frak g}^* \hookrightarrow \Omega^* (M)$ induces isomorphisms $\check{H}^q_{(k)}(M) \cong \check{H}^q_{(k)}({\frak g})$ for every $0\leq q\leq 2n+2k-1$. \end{proposition} \begin{remark}\label{BCyA} {\rm In \cite{TY1} Tseng and Yau introduced and studied more generally Bott-Chern and Aeppli type cohomologies using $d$ and $d^\Lambda$ for a symplectic manifold. A characterization of the HLC in the compact case from an \emph{\`a la Fr\"olicher} inequality is given in \cite{Angella-T}. Note that for the Bott-Chern and Aeppli type symplectic cohomologies, a similar result to Proposition~\ref{calculo-filtered-solvariedades} is obtained in \cite[Theorem 3]{Macri} (see also \cite[Theorem 2.31]{Angella-K}) by using another argument. } \end{remark} \section{Relations with the symplectically harmonic cohomology}\label{relacion-con-armonica} \noindent In this section we relate the symplectically harmonic cohomology with the cohomologies studied in the previous sections. Let $(M^{2n},\omega)$ be a symplectic manifold of dimension $2n$. The \emph{symplectic star} operator $ *\colon \Omega^q(M) \longrightarrow \Omega^{2n-q}(M) $ is defined by $$ \alpha \wedge (*\beta )=\Lambda^q(\Pi)(\alpha, \beta)\frac{\omega^n}{n!}, $$ for every $q$-forms $\alpha$ and $\beta$, where $\Pi$ is the bivector field dual to $\omega$, i.e. the natural Poisson structure associated to $\omega$. Let $\delta\colon \Omega^q(M)\longrightarrow \Omega^{q-1}(M)$ be the operator given by $\delta\alpha=(-1)^{q+1}*d*\alpha$, for every $q$-form $\alpha$. Brylinski proved that $\delta=[i(\Pi),d]$, where $i(\cdot)$ denotes the interior product. \begin{definition}\cite{Br} {\rm A form $\alpha$ is called \emph{symplectically harmonic} if $d\alpha =0=\delta \alpha$. } \end{definition} We denote by $\Omega^q_{\rm hr}(M)$ the linear space of symplectically harmonic $q$-forms. Unlike the Hodge theory, there are non-zero exact symplectically harmonic forms. Now, following Brylinski \cite{Br}, one defines the \emph{symplectically harmonic cohomology} $$ H^q_{\rm hr}(M)=\frac{\Omega^q_{\rm hr}(M)}{\Omega^q_{\rm hr}(M)\cap \text{im}\, d}, $$ for $0\leq q\leq 2n$. Hence, $H^q_{\rm hr}(M)$ is the subspace of the $q$-th de Rham cohomology group consisting of all the de Rham cohomology classes of degree $q$ containing a symplectically harmonic representative. By analogy with the Hodge theory, Brylinski~\cite{Br} conjectured that any de Rham cohomology class admits a symplectically harmonic representative. Mathieu~\cite{Ma} (and independently Yan~\cite{Yan}) proved that Brylinski conjecture holds, namely $H^{q}_{\rm hr}(M,\omega) = H^q(M)$ for every $0\leq q\leq 2n$, if and only if $(M^{2n},\omega)$ satisfies the~HLC. An important result is that for any symplectic manifold every de Rham cohomology class up to degree~2 admits a symplectically harmonic representative~\cite{Yan} (see also \cite{IRTU1} for more general results), that is, $H^q_{\rm hr}(M)=H^q(M)$ for $q=0,1,2$. For every $q \leq n$, if we set $$ P^q(M)=\{[\alpha]\in H^q(M)\,|\,L^{n-q+1}[\alpha]=0\}, $$ then $P^{q}(M) \subset H^{q}_{\rm hr}(M)$ \cite{Yan}. Moreover, the following result (proved in \cite[Corollary 2.4]{IRTU1} and \cite[Lemma 4.3]{Ym}) gives a description of the spaces $H^q_{\rm hr}(M)$: \begin{theorem}\label{harm} Let $(M^{2n},\omega)$ be a symplectic manifold of dimension $2n$. Then, \begin{eqnarray} &&H^{q}_{\rm hr}(M) = P^{q}(M,\omega)+L(H^{q-2}_{\rm hr}(M)), \quad \mbox{ for } 0\leq q \leq n;\nonumber\\[5pt] &&H^{q}_{\rm hr}(M) = {\rm Im}\,\{L^{q-n}\colon H^{2n-q}_{\rm hr}(M) \longrightarrow H^{q}(M)\}, \quad \mbox{ for } n+1\leq q \leq 2n.\nonumber \end{eqnarray} \end{theorem} Next we suppose that $(M^{2n},\omega)$ is of finite type and denote by $h_q(M)$ the dimension of $H^q_{\rm hr}(M)$. \begin{example}\label{TM} {\rm Let $M^{n}$ be a manifold of dimension $n$ and of finite type, and let $(T^*M,\omega_0)$ be the \emph{cotangent bundle} endowed with the \emph{standard} symplectic form. Since $\omega_0$ is exact, the homomorphisms $L^{k}$ are identically zero and by Theorem~\ref{harm} we have $$h_{q}(T^*M,\omega_0)=b_q(M), \ \mbox{ for } q\leq n, \quad \mbox{ and }\quad h_{q}(T^*M,\omega_0)=0, \ \mbox{ for } n+1\leq q\leq 2n.$$ For the generalized coeffective cohomology, from Propositions~\ref{propiedades-finitas}~(iii) and~\ref{propiedades-finitas-para-c-sombrero}~(iii) it follows that $$\hat c_{n-k+1}(T^*M,\omega_0)=b_{n-k+1}(M)$$ and $$c_q^{(k)}(T^*M,\omega_0)=b_q(M)+b_{q+2k-1}(M), \ \mbox{ for } n-k+2\leq q\leq 2n.$$ Furthermore, from Proposition~\ref{propiedades-finitas-suc-check}~(iii) we get $$\check{c}_q^{(k)}(T^*M,\omega_0)=b_{q-2k+1}(M)+b_{q}(M), \ \mbox{ for } q\leq 2n+2k-1.$$ } \end{example} \medskip In the following result we relate the generalized coeffective cohomology with the harmonic cohomology via the coeffective groups $\hat H^{1}(M),\ldots, \hat H^{n}(M)$. \begin{theorem}\label{rel_armonica_coef} Let $(M^{2n},\omega)$ be a symplectic manifold of finite type. The following relation holds for every $k=1,\ldots,n$: $$ h_{n-k+1}(M) - h_{n+k+1}(M) = \hat c_{n-k+1}(M). $$ \end{theorem} \begin{proof} By Theorem~\ref{harm}, $H^{n-k+1}_{{\rm hr}}(M)=P^{n-k+1}(M)+L(H^{n-k-1}_{{\rm hr}}(M))$. Hence, $$ h_{n-k+1}(M) = \dim P^{n-k+1}(M) + \dim L(H^{n-k-1}_{{\rm hr}}(M)) - \dim (P^{n-k+1}(M)\cap L(H^{n-k-1}_{{\rm hr}}(M))). $$ It follows from \eqref{selarga} that $P^{n-k+1}(M)$ is isomorphic to the space $\hat H^{n-k+1}(M)$, and therefore, $\dim P^{n-k+1}(M)=\hat c_{n-k+1}(M)$. On the other hand, $$ P^{n-k+1}(M)\cap L(H^{n-k-1}_{{\rm hr}}(M)) = \ker L^{k}\big\vert_{L(H^{n-k-1}_{{\rm hr}}(M))}. $$ Now, \begin{eqnarray*}h_{n-k+1}(M) \!&\!\!=\!\!& \hat c_{n-k+1}(M) + \dim L(H^{n-k-1}_{{\rm hr}}(M)) - \dim \left( \ker L^{k}\big\vert_{L(H^{n-k-1}_{{\rm hr}}(M))} \right)\\ \!&\!\!=\!\!& \hat c_{n-k+1}(M) + \dim (L^{k+1}(H^{n-k-1}_{{\rm hr}}(M)))\\ \!&\!\!=\!\!& \hat c_{n-k+1}(M) + h_{n+k+1}(M). \end{eqnarray*} \end{proof} From Proposition~\ref{propiedades-finitas-para-c-sombrero} we get directly upper and lower bounds for the difference $h_{n-k+1}(M) - h_{n+k+1}(M)$. Moreover, the previous theorem, together with Proposition~\ref{invtopologicos} and \eqref{rel-otra}, provides further relations between the harmonic and the filtered cohomologies. Next we derive some concrete relations of the harmonic cohomology with the groups $\check{H}^{q}_{(k)}(M)$. \begin{proposition}\label{rel_armonica_coef-2} Let $(M^{2n},\omega)$ be a symplectic manifold of finite type. For every $1\leq k\leq n$ we have $$ 0\leq \check{c}_{n+k}^{(k)}(M)-\hat c_{n-k+1}(M) \leq b_{n+k}(M) - h_{n+k}(M), $$ where the latter equality holds if and only if $L^k(H^{n-k}_{{\rm hr}}(M))=L^k(H^{n-k}(M))$. \end{proposition} \begin{proof} It follows from \eqref{rel-hat-check} that $\hat c_{n-k+1}(M)= \check{c}_{n+k}^{(k)}(M) - b_{n+k}(M) + \dim L^k(H^{n-k}(M))$. Taking into account that $\dim L^k(H^{n-k}(M))\geq h_{n+k}(M)$ by Theorem~\ref{harm}, we conclude the relation. \end{proof} The following consequence will be useful later for symplectic manifolds of low dimension. \begin{corollary}\label{rel_armonica_coef-2-cor} Let $(M^{2n},\omega)$ be a symplectic manifold of finite type. Then: \begin{eqnarray} &&\check{c}_{2n}^{(n)}(M) = b_1(M) + b_{2n}(M) - h_{2n}(M),\nonumber\\[3pt] &&\check{c}_{2n-1}^{(n-1)}(M) = b_2(M) + b_{2n-1}(M) - h_{2n-1}(M) - h_{2n}(M),\nonumber\\[3pt] &&\check{c}_{2n-2}^{(n-2)}(M) = b_{2n-2}(M) + h_3(M) - h_{2n-2}(M) -h_{2n-1}(M).\nonumber \end{eqnarray} \end{corollary} \begin{proof} Notice that $L^k(H^{n-k}_{{\rm hr}}(M))=L^k(H^{n-k}(M))$ is satisfied for $k=n, n-1$ and $n-2$ because $H^q_{\rm hr}(M)=H^q(M)$ for $q=0,1,2$. Hence, it suffices to apply Proposition~\ref{rel_armonica_coef-2} and use Theorem~\ref{rel_armonica_coef} to relate the coeffective numbers with the harmonic cohomology. \end{proof} Note that from Theorem~\ref{harm} it follows directly that an analogous result to Lemmas~\ref{iso-equiv-coef} and~\ref{iso-equiv-filtered} also holds for the symplectically harmonic cohomology \cite[Proposition~1]{Ym}, and in the case of solvmanifolds satisfying the Mostow condition, a result similar to Propositions~\ref{calculo-coef-solvariedades} and~\ref{calculo-filtered-solvariedades} is also valid (this was first observed in \cite[Proposition~2]{Ym}, see also \cite{IRTU1,IRTU2}, for the class of symplectic nilmanifolds). \section{Symplectic flexibility of closed manifolds}\label{flex} \noindent In this section we focus on closed symplectic manifolds, for which we introduce a notion of flexibility for the generalized coeffective and filtered cohomologies, and study relations with the harmonic flexibility. First we show some general properties that we will use later in this section. \begin{lemma}\label{igualdades-general} Let $(M^{2n},\omega)$ be a symplectic manifold of finite type. Then: \begin{enumerate} \item[{\rm (i)}] $\hat c_1(M)=b_1(M)$, and $c_q^{(n)}(M)=b_q(M)$ for every $2\leq q\leq 2n$. \item[{\rm (ii)}] For any $1\leq k\leq n$, $\check{c}_q^{(k)}(M)=c_{q-2k+1}^{(k)}(M)$ for every $n+k+1\leq q\leq 2n+2k-1$. \end{enumerate} \end{lemma} \begin{proof} (i) is clear from the definition of the generalized coeffective cohomology for $k=n$ and from the long exact sequence \eqref{selarga}. Equalities (ii) are direct from the definition of $\check{H}^q_{(k)}(M)$. \end{proof} For closed manifolds one has additional relations. For instance, the numbers $\check c_q^{(k)}(M)$ satisfy certain duality (see \cite[Proposition 4.8]{TY3} for the corresponding duality for the filtered cohomology groups), whereas the harmonic number $h_{2n-1}(M)$ is always even \cite[Lemma 1.14]{IRTU1}. The proof of these facts follows from the existence of the usual non-singular pairing $p([\alpha],[\beta])=\int_M \alpha\wedge\beta$, for $[\alpha]\in H^q(M)$ and $[\beta]\in H^{m-q}(M)$, valid on any closed $m$-dimensional manifold $M$. In the following proposition we collect these results together with other properties: \begin{proposition}\label{igualdades} Let $(M^{2n},\omega)$ be a closed symplectic manifold. Then: \begin{enumerate} \item[{\rm (i)}] For any $1\leq k\leq n-1$, $c_q^{(k)}(M)=b_q(M)$ for every $2n-2k+1\leq q\leq 2n$. \item[{\rm (ii)}] For any $1\leq k\leq n$, $\check c_q^{(k)}(M)=\check c_{2n+2k-q-1}^{(k)}(M)$ for every $0\leq q\leq n+k-1$. \item[{\rm (iii)}] $h_{2n}(M)=b_{2n}(M)$, and $h_{2n-1}(M)$ is always even. \item[{\rm (iv)}] $h_{n-1}(M) - (\check c_{n+1}^{(1)}(M)-\hat c_n(M)) \leq h_{n+1}(M)\leq h_{n-1}(M)$. \end{enumerate} \end{proposition} \begin{proof} By definition of the coeffective cohomology one always has that $c_q^{(k)}(M)=b_q(M)$ for any $1\leq k\leq n-1$ and for every $q\geq 2n-2k+2$. Moreover, since $M$ is closed, $H^{2n}(M)=\langle [\omega^n] \rangle$ and $L^k\colon H^{2n-2k}(M)\longrightarrow H^{2n}(M)$ is surjective, so the long exact sequence \eqref{selarga} implies $c_{2n-2k+1}^{(k)}(M)=b_{2n-2k+1}(M)$. This proves~(i). Property (ii) follows from \cite[Proposition 4.8]{TY3} taking into account the identifications given in Remark~\ref{comparison with filtered}. The proof of (iii) is a consequence of the fact that the rank of $L^{n-1}\colon H^1(M)\longrightarrow H^{2n-1}(M)$ is always an even number \cite[Lemma 1.14]{IRTU1}. To prove (iv), since $H^{n+1}_{{\rm hr}}(M) = L(H^{n-1}_{{\rm hr}}(M))$, we have $$ h_{n-1}(M) = h_{n+1}(M) + \dim \left(\ker \{L\colon H^{n-1}_{{\rm hr}}(M) \to H^{n+1}(M) \}\right). $$ Therefore, \begin{eqnarray*} h_{n-1}(M) - h_{n+1}(M) &=& \dim \left(\ker \{L\colon H^{n-1}_{{\rm hr}}(M) \to H^{n+1}(M) \}\right) \\ &\leq& \dim \left(\ker \{L\colon H^{n-1}(M) \to H^{n+1}(M) \}\right)\\ &=& b_{n-1}(M) - \dim \left(\text{im}\,\{L\colon H^{n-1}(M)\to H^{n+1}(M)\}\right)\\ &=& b_{n+1}(M) - \dim \left(\text{im}\,\{L\colon H^{n-1}(M)\to H^{n+1}(M)\}\right)\\ &=& \check c_{n+1}^{(1)}(M)-\hat c_n(M), \end{eqnarray*} where the last equality follows from Remark~\ref{comparison of indexes}. \end{proof} Notice that Theorem~\ref{rel_armonica_coef} does not provide any relation for $h_{n+1}$, so (iv) in the above proposition provides upper and lower bounds for the harmonic number $h_{n+1}$ on closed symplectic manifolds. \begin{corollary}\label{n_n-1_coeffective} Let $(M^{2n},\omega)$ be a closed symplectic manifold. Then, \begin{equation}\label{coefectivos-topologicos} \hat c_1(M)=\check{c}_{2n}^{(n)}(M)=b_1(M) \quad\mbox{ and }\quad \hat c_2(M)=b_2(M)-1. \end{equation} Hence, the generalized coeffective cohomology groups \eqref{todos-los-gen-coef-groups} for $k=n$ and $n-1$, as well as the $n$-filtered cohomology groups are topological invariants. \end{corollary} \begin{proof} The formulas for $\chi^{(n)}$ and $\chi^{(n-1)}$ given in Proposition~\ref{invtopologicos} together with part (i) in Proposition~\ref{igualdades-general} and Proposition~\ref{igualdades} imply \eqref{coefectivos-topologicos}, so all the generalized $n$- and $(n-1)$-coeffective numbers are topological. For the $n$-filtered cohomology it suffices to use Corollary~\ref{rel_armonica_coef-2-cor} and the fact that $\check c^{(n)}_q = c^{(n)}_{q-2n+1}$ for $2n+1\leq q\leq 4n-1$. \end{proof} Next we introduce the notions of generalized coeffective flexibility and filtered flexibility, as analogous notions of the concept of harmonic flexibility introduced and studied in \cite{IRTU1, Yan} and motivated by a question raised by Khesin and McDuff. \begin{definition}\label{flexible} {\rm A closed smooth manifold $M^{2n}$ is said to be \begin{enumerate} \item[{\rm (i)}] \texttt{c}-\emph{flexible}, if $M$ possesses a continuous family of symplectic forms $\omega_t$, where $t\in [a,b]$, such that $\hat c_{n-k+1}(M,\omega_a)\not=\hat c_{n-k+1}(M,\omega_b)$ or $c_{q}^{(k)}(M,\omega_a)\not=c_{q}^{(k)}(M,\omega_b)$ for some $1\leq k\leq n$ and $n-k+2\leq q\leq 2n$; \item[{\rm (ii)}] \texttt{f}-\emph{flexible}, if $M$ possesses a continuous family of symplectic forms $\omega_t$, $t\in [a,b]$, such that $\check c_{q}^{(k)}(M,\omega_a)\not=\check c_{q}^{(k)}(M,\omega_b)$ for some $1\leq k\leq n$ and $0\leq q\leq 2n+2k-1$; \item[{\rm (iii)}] \texttt{h}-\emph{flexible}, if $M$ possesses a continuous family of symplectic forms $\omega_t$, $t\in [a,b]$, such that $h_{q}(M,\omega_a)\not=h_{q}(M,\omega_b)$ for some $0\leq q\leq 2n$. \end{enumerate} } \end{definition} Notice that \texttt{h}-flexible manifolds are precisely the flexible manifolds in \cite{IRTU1}. \medskip Since $H^q_{\rm hr}(M)=H^q(M)$ for $q=1,2$, we have $h_{2n-q}(M)=\dim ({\rm Im}\,\{L^{n-q}\colon H^{q}(M) \longrightarrow H^{2n-q}(M)\})$ for $q=1,2$ by Theorem~\ref{harm}. Now, if $\omega_t$ is a continuous family of symplectic structures on $M$, $t\in [a,b]$, then it is clear that \begin{equation}\label{lower-semicontinuous} h_{2n-1}(M,\omega_t) \geq h_{2n-1}(M,\omega_a)\quad \mbox{ and }\quad h_{2n-2}(M,\omega_t) \geq h_{2n-2}(M,\omega_a), \end{equation} that is, these symplectically harmonic numbers satisfy a ``lower-semicontinuous'' property. In the following result we observe that an ``upper-semicontinuous'' property holds for all the coeffective and the filtered numbers. \begin{proposition}\label{upper-semicontinuous} Let $\omega_t$ be a continuous family of symplectic structures on $M$, for $t\in [a,b]$. Then, for any $1\leq k\leq n$ the following inequalities hold: \begin{eqnarray*} && \hat c_{n-k+1}(M,\omega_t) \leq \hat c_{n-k+1}(M,\omega_a),\\[5pt] && c_{q}^{(k)}(M,\omega_t) \leq c_{q}^{(k)}(M,\omega_a), \mbox{ for every } n-k+2\leq q\leq 2n,\\[5pt] && \check{c}_{n+k}^{(k)}(M,\omega_t)\leq \check{c}_{n+k}^{(k)}(M,\omega_a), \mbox{ for every } 0\leq q\leq 2n+2k-1. \end{eqnarray*} \end{proposition} \begin{proof} It follows directly from the long exact sequences \eqref{selarga} and \eqref{sexl-E_k}. \end{proof} Next we study relations among the three different types of flexibility. We begin in dimension four. \begin{theorem}\label{dim4} Let $M$ be a $4$-dimensional closed manifold. Then: \begin{enumerate} \item[{\rm (i)}] $M$ is never \emph{\texttt{c}}-flexible; \item[{\rm (ii)}] $M$ is \emph{\texttt{f}}-flexible if and only if it is \emph{\texttt{h}}-flexible. \end{enumerate} \end{theorem} \begin{proof} Suppose that $M$ has a symplectic form $\omega$. Since $n=2$, we need to study the coeffective numbers $c_q^{(1)}$ for $q=3,4$, $c_q^{(2)}$ for $q=2,3,4$, $\hat c_1$ and $\hat c_2$. Lemma~\ref{igualdades-general}~(i), Proposition~\ref{igualdades}~(i) and Corollary~\ref{n_n-1_coeffective} imply that $c_{2}^{(2)}=b_2$, $c_{3}^{(1)}=c_{3}^{(2)}=b_3$, $c_{4}^{(1)}=c_{4}^{(2)}=b_4$, $\hat c_{1}=b_1$ and $\hat c_{2}=b_2-1$. Therefore, $M$ cannot be \texttt{c}-flexible and (i) is proved. By Lemma~\ref{igualdades-general}~(ii) we have $\check{c}_q^{(k)}=c_{q-2k+1}^{(k)}$ for $k=1,2$ and for any $3+k\leq q\leq 3+2k$, so they are topological invariants. By the duality given in Proposition~\ref{igualdades}~(ii), it remains to study $\check{c}_3^{(1)}$ and $\check{c}_4^{(2)}$. Since $h_q=b_q$ for $q=0,1,2$ and $4$, the first two equalities in Corollary~\ref{rel_armonica_coef-2-cor} imply that $\check c_{4}^{(2)}=b_1$ and $\check{c}_3^{(1)}=b_2-b_0+b_3-h_3$, i.e. $$ \check{c}_3^{(1)}=b_1+b_2-h_3-1. $$ Therefore, $M$ is \emph{\texttt{f}}-flexible iff $M$ is \emph{\texttt{h}}-flexible, since $\check{c}_3^{(1)}(M,\omega_t)$ varies along a family of symplectic forms $\omega_t$ iff $h_3(M,\omega_t)$ varies. \end{proof} The previous proof shows that the fundamental relation between flexibilities on a 4-dimensional manifold~$M$ is $$ \check{c}_3^{(1)}(M,\omega_t)=b_1(M)+b_2(M)-h_3(M,\omega_t)-1. $$ \begin{corollary}\label{no-flex-4-dim} Let $M$ be a $4$-dimensional closed manifold. If the first Betti number $b_1(M)\leq 1$ then $M$ is not \emph{\texttt{f}}-flexible. \end{corollary} \begin{proof} Proposition~\ref{igualdades}~(iii) implies that $h_3$ is an even number for any symplectic form. Since $h_3\leq b_3$ then $h_3=0$ and therefore $M$ cannot be flexible. \end{proof} In particular there do not exist simply-connected closed 4-manifolds which are \emph{\texttt{f}}-flexible. On the other hand, Yan \cite{Yan} proved that there are no \emph{\texttt{h}}-flexible 4-dimensional nilmanifolds. Even more, one can see that the same holds in the bigger class of completely solvable solvmanifolds. For that, by the classification given in \cite[Table 2]{Bock} it remains to check that a solvmanifold based on the Lie algebra $de^1=e^{13}$, $de^2=-e^{23}$, $de^3=de^4=0$ is not \emph{\texttt{h}}-flexible. In fact, any invariant symplectic structure is of the form $\omega=A\,e^{12}+B\,e^{13}+C\,e^{23}+D\,e^{34}$, with $AD\not=0$, and the number $h_3$ only depends on the element $[[\omega]]$ in $\mathbb{P}(H^2(M))$ (see Remark~\ref{coef-inv-cohom-class}), so we can suppose that $A=1$ and $B=C=0$ because $[e^{13}]=[e^{23}]=0$. Thus, it suffices to study the family $\omega_t=e^{12}+t\,e^{34}$, with $t\not=0$. A direct calculation shows $$ L_{[\omega_t]}(H^1(M))=\langle [\omega_t\wedge e^3], [\omega_t\wedge e^4] \rangle=\langle [e^{123}],[e^{124}] \rangle=H^3(M), $$ i.e. $h_3(\omega_t)=b_3$ for any $t\not=0$, and therefore the solvmanifold is not \emph{\texttt{h}}-flexible. Hence: \begin{proposition}\label{no-flex-4-dim-solv} Any $4$-dimensional completely solvable solvmanifold is not \emph{\texttt{c}}-flexible, \emph{\texttt{f}}-flexible or \emph{\texttt{h}}-flexible. \end{proposition} However, there exist $4$-dimensional closed manifolds which are \emph{\texttt{h}}-flexible, as it was proved in \cite[Corollary 4.2]{Yan} and \cite[Proposition 3.2]{IRTU1}. In fact, if $(M^4,\omega)$ is a closed symplectic manifold satisfying the conditions \begin{enumerate} \item[(i)] the homomorphism $L\colon H^1(M)\longrightarrow H^3(M)$ is trivial, \item[(ii)] the cup product $H^1(M)\otimes H^2(M) \longrightarrow H^3(M)$ is non-trivial, \end{enumerate} then $M$ is \emph{\texttt{h}}-flexible. Since Gompf \cite[Observation 7]{Gompf} proved the existence of 4-manifolds satisfying (i) and (ii), from Theorem~\ref{dim4} it follows that there exists $4$-dimensional closed manifolds which are \emph{\texttt{f}}-flexible. Moreover, taking symplectic products, we arrive at the following existence result: \begin{theorem}\label{existencia-f-flexible-en-cualquier-dimension} For each $n\geq 2$, there exist $2n$-dimensional \emph{\texttt{f}}-flexible closed manifolds. More precisely, there exists a $2n$-dimensional closed manifold $M$ with a continuous family of symplectic forms $\omega_t$ such that the dimensions of the primitive cohomology groups $P H^{2}_{d+d^\Lambda}(M,\omega_t)$ and $P H^{2}_{dd^\Lambda}(M,\omega_t)$ vary with respect to~$t$. \end{theorem} \begin{proof} Notice first that $\check c_{2n-1}^{(n-1)}(M)=\dim P H^{2}_{d+d^\Lambda}(M)=\dim P H^{2}_{dd^\Lambda}(M)$, and by Corollary~\ref{rel_armonica_coef-2-cor} we have $\check{c}_{2n-1}^{(n-1)}(M) = b_2(M) + b_{1}(M) - h_{2n-1}(M) -1$. On the other hand, by \cite[Proposition 5.3]{IRTU1} we have the following formula for $h_{2n-1}$ of a product $(M=N_1\times N_2,\ \omega=\omega_1+\omega_2)$ of two symplectic manifolds $(N_1,\omega_1)$ and $(N_2,\omega_2)$ of respective dimensions $n_1$ and $n_2$: $$ h_{2n-1}(M)=h_{2n_1-1}(N_1)+h_{2n_2-1}(N_2), $$ where $n=n_1+n_2$. Now, let $N_1$ be a 4-dimensional closed manifold such that $h_3$ varies along a continuous family of symplectic forms and let $N_2$ be, for instance, any compact K\"ahler manifold. Then, on the product manifold $M$ there is a continuous family of symplectic forms such that $h_{2n-1}(M)$, and so $\check{c}_{2n-1}^{(n-1)}(M)$, varies. \end{proof} It is worthy to remark that the existence of $2n$-dimensional closed manifolds which are \emph{\texttt{h}}-flexible is proved in \cite{IRTU1}, however it is unclear if \emph{\texttt{f}}-flexibility is implied by \emph{\texttt{h}}-flexibility in dimension higher than or equal to 8 (see Proposition~\ref{dim2n} below for the general relation). In contrast, in six dimensions we have: \begin{theorem}\label{dim6} Let $M$ be a $6$-dimensional closed manifold. Then: \begin{enumerate} \item[{\rm (i)}] If $M$ is \emph{\texttt{c}}-flexible then $M$ is \emph{\texttt{f}}-flexible and \emph{\texttt{h}}-flexible. \item[{\rm (ii)}] If $M$ is not \emph{\texttt{c}}-flexible then, $M$ is \emph{\texttt{f}}-flexible if and only if it is \emph{\texttt{h}}-flexible. \end{enumerate} \end{theorem} \begin{proof} Suppose that $M$ has a symplectic form $\omega$. Since $n=3$, the coeffective numbers to be studied are: $c_q^{(1)}$ for $q=4,5,6$, $c_q^{(2)}$ for $q=3,4,5,6$, $c_q^{(3)}$ for $q=2,3,4,5,6$, and $\hat c_1$, $\hat c_2$, $\hat c_3$. Corollary~\ref{n_n-1_coeffective} implies that $\hat c_1,\hat c_2$ and all $c_{q}^{(k)}$ for $k=2,3$ are topological invariants. Moreover, by Proposition~\ref{igualdades}~(i) we have $c_{q}^{(1)}=b_q$ for $q=5,6$. Now, the formula for $\chi^{(1)}$ given in Proposition~\ref{invtopologicos} implies that \begin{equation}\label{fund-ec-dim6-1} \hat c_{3}=c_{4}^{(1)}+b_3-b_2-b_1+1. \end{equation} Therefore, $M$ is \texttt{c}-flexible if and only if $M$ possesses a continuous family of symplectic forms $\omega_t$, $t\in [a,b]$, such that $\hat c_{3}(M,\omega_a)\not=\hat c_{3}(M,\omega_b)$. By Lemma~\ref{igualdades-general}~(ii) we have $\check{c}_q^{(k)}=c_{q-2k+1}^{(k)}$ for $k=1,2,3$ and for any $4+k\leq q\leq 5+2k$, so they are topological invariants except possibly $\check{c}_5^{(1)}$, which satisfies \begin{equation}\label{fund-ec-dim6-2} \check{c}_5^{(1)}=c_{4}^{(1)}. \end{equation} By the duality given in Proposition~\ref{igualdades}~(ii), it remains to study $\check{c}_4^{(1)}$ and $\check{c}_5^{(2)}$. Since $h_q=b_q$ for $q=0,1,2,6$, the equalities in Corollary~\ref{rel_armonica_coef-2-cor} together with Theorem~\ref{rel_armonica_coef} imply the following relations \begin{equation}\label{fund-ec-dim6-3-4-5} \hat c_{3}=h_3-h_5,\quad\ \ \hat c_{3}=\check{c}_4^{(1)}+h_4-b_2,\quad\ \ \check{c}_5^{(2)}=-h_5+b_2+b_1-1. \end{equation} Therefore, the fundamental equalities that relate the different cohomologies for closed 6-dimensional manifolds are \eqref{fund-ec-dim6-1}--\eqref{fund-ec-dim6-3-4-5}. Now, using these relations, a direct argument shows (i) and (ii). \end{proof} \begin{corollary}\label{dim6-cor} A $6$-dimensional closed manifold is \emph{\texttt{f}}-flexible if and only if it is \emph{\texttt{h}}-flexible. \end{corollary} \begin{remark}\label{no-reciproco} {\rm Notice that there exist closed 6-dimensional manifolds which are \emph{\texttt{f}}-flexible and \emph{\texttt{h}}-flexible, but not \emph{\texttt{c}}-flexible; that is to say, the converse to (i) in Theorem~\ref{dim6} does not hold in general. Explicit examples of nilmanifolds satisfying this are given in Section~\ref{sec_ex}. } \end{remark} In higher dimension we have the following result: \begin{proposition}\label{dim2n} Let $M$ be a closed manifold of dimension $2n\geq 8$. If $M$ is \emph{\texttt{c}}-flexible then $M$ is \emph{\texttt{f}}-flexible or \emph{\texttt{h}}-flexible. \end{proposition} \begin{proof} If $M$ possesses a continuous family of symplectic forms $\omega_t$, $t\in [a,b]$, such that $c_{q}^{(k)}(M,\omega_a)\not=c_{q}^{(k)}(M,\omega_b)$ for some $1\leq k\leq n$ and $n-k+2\leq q\leq 2n$, then it is clear that $M$ is \emph{\texttt{f}}-flexible by Lemma~\ref{igualdades-general}~(ii). If $M$ possesses a continuous family of symplectic forms $\omega_t$, $t\in [a,b]$, such that $\hat c_{n-k+1}(M,\omega_a)\not=\hat c_{n-k+1}(M,\omega_b)$ for some $1\leq k\leq n$, then Theorem~\ref{rel_armonica_coef} implies that $h_{n-k+1}(M,\omega_a)\not=h_{n-k+1}(M,\omega_b)$ or $h_{n+k+1}(M,\omega_a)\not=h_{n+k+1}(M,\omega_b)$, therefore $M$ is \emph{\texttt{h}}-flexible. \end{proof} \section{Symplectic 6-dimensional nilmanifolds}\label{sec_ex} \noindent In this section we present a complete study of the dimensions of the harmonic, coeffective and filtered cohomology groups of 6-dimensional symplectic nilmanifolds, see Table~1 below. The symplectically harmonic numbers $h_4$ and $h_5$ were first computed in~\cite{IRTU1}, whereas $h_3$ was obtained in~\cite{IRTU2} (see Remark~\ref{mistakes} for corrections). As a consequence of our study, we describe all \emph{\texttt{c}}-flexible, \emph{\texttt{h}}-flexible or \emph{\texttt{f}}-flexible 6-dimensional nilmanifolds. In the proof of Theorem~\ref{dim6} we found that for 6-dimensional closed symplectic manifolds the fundamental equalities that relate the different cohomologies are \eqref{fund-ec-dim6-1}--\eqref{fund-ec-dim6-3-4-5}. In addition, since the Euler characteristic of a nilmanifold vanishes, we have that $b_3=2(b_2-b_1+1)$. Therefore, relations \eqref{fund-ec-dim6-1}--\eqref{fund-ec-dim6-3-4-5} for 6-dimensional symplectic nilmanifolds are \begin{equation}\label{fund-ec-dim6-nilv} \hat c_{3}=c_{4}^{(1)}\!+\!b_2\!-\!3 b_1\!+\!3,\ \ \ h_3=\hat c_{3}\!+\!h_5,\ \ \ \check{c}_4^{(1)}=\hat c_{3}\!-\!h_4+b_2,\ \ \ \check{c}_5^{(1)}=c_{4}^{(1)},\ \ \ \check{c}_5^{(2)}=-h_5\!+\!b_2\!+\!b_1\!-\!1. \end{equation} Recall that $c_{4}^{(1)}$, $\check{c}_4^{(1)}$ and $\check{c}_5^{(2)}$ are by Remark~\ref{comparison with filtered} dimensions of primitive cohomology groups; concretely, $c_{4}^{(1)}=\dim PH^2_{\partial_-}(=\dim PH^2_{\partial_+})$, $\check{c}_4^{(1)}=\dim PH^3_{d+d^\Lambda}(=\dim PH^3_{d d^\Lambda})$ and $\check{c}_5^{(2)}=\dim PH^2_{d+d^\Lambda}(=\dim PH^2_{d d^\Lambda})$. It follows from Propositions~\ref{calculo-coef-solvariedades} and~\ref{calculo-filtered-solvariedades} that the calculation of all the cohomology groups reduces to the Lie algebra level. In Table~1 nilmanifolds of dimension 6 admitting symplectic structure appear lexicographically with respect to the triple $(b_1,b_2,6-s)$, where $b_1$ and $b_2$ are the Betti numbers (first two columns in the table) and $s$ is the step length (third column). The fourth column contains the description of the structure of the nilmanifold; for instance, the notation $(0,0,12,13,14,15)$ means that there exists a basis $\{e^i\}_{i=1}^6$ of (invariant) 1-forms such that $$ d e^1=d e^2=0,\quad d e^3=e^1 \wedge e^2,\quad d e^4=e^1 \wedge e^3,\quad d e^5=e^1 \wedge e^4,\quad d e^6=e^1 \wedge e^5. $$ \par The next columns show the dimensions of the non-trivial harmonic, coeffective and filtered cohomology groups, that is, $h_k$ $(k=3,4,5)$, $\hat c_3$, $c_4^{(1)}(=\check{c}_5^{(1)})$, $\check c_4^{(1)}$ and $\check c_5^{(2)}$. Moreover, the columns contain all the possible values when $\omega$ runs over the space ${\mathcal S}$ of all invariant symplectic structures on the nilmanifold. The last column shows the dimension of the space ${\mathcal S}$, however the cohomology groups only depend on the cohomology class of the symplectic form. This fact allows to reduce calculations to a smaller number of parameters, and furthermore by Remark~\ref{coef-inv-cohom-class} we can always normalize one of the non-zero coefficients that parametrize the classes of the symplectic forms. When there are variations in the dimensions of the cohomology groups, they appear in the table written accordingly to the lower-semicontinuous property \eqref{lower-semicontinuous} of the harmonic numbers $h_4$ and $h_5$, or to the upper-semicontinuous property of $\hat c_3$, $c_4^{(1)}$, $\check c_4^{(1)}$ and $\check c_5^{(2)}$ (see Proposition~\ref{upper-semicontinuous}). Notice that the harmonic number $h_3$ does not satisfy any lower or upper semicontinuous property. Moreover, the variations in the dimensions of the cohomology groups are in correspondence (in the sense that we explain in Example~\ref{h24} below), except for the nilmanifolds $(0,0,0,12,13,23)$ and $(0,0,0,12,13,14+23)$ (see Example~\ref{ex_h11} for details on the latter). \medskip The following result is a direct consequence of Table 1. \begin{theorem}\label{flexdim6clasif} \ \begin{enumerate} \item[{\rm (i)}] There exist seven nilmanifolds of dimension~$6$ that are \emph{\texttt{c}}-flexible (and therefore \emph{\texttt{f}}-flexible and \emph{\texttt{h}}-flexible). \item[{\rm (ii)}] There exist three nilmanifolds of dimension~$6$ that are \emph{\texttt{f}}-flexible and \emph{\texttt{h}}-flexible, but not \emph{\texttt{c}}-flexible. \end{enumerate} In conclusion, there exist ten $6$-dimensional nilmanifolds that are \emph{\texttt{f}}-flexible and \emph{\texttt{h}}-flexible. \end{theorem} Notice that from (ii) it follows that the converse of Theorem~\ref{dim6} does not hold, that is, \emph{\texttt{c}}-flexibility is the strongest condition. \bigskip \smallskip \begin{center} \renewcommand{\arraystretch}{1.5} {\tiny \def\hb{\bf--}{\hb{\bf--}} \begin{tabular}{|c|c|c|c|c|c|c|c|c|c|c|c|} \hline $b_1$&$b_2$&$6\!-\!s$&Structure& $h_3$& $h_4$&$h_5$ & $\hat c_3$&$c^{(1)}_4$&$\check c^{(1)}_4$&$\check c^{(2)}_5$&$\dim {\mathcal S}$\\ \hline 2&3&1&(0,0,12,13,14,15)&3&2&0& 3&3&4 &4 & 7\\ 2&3&1&(0,0,12,13,14+23,24+15)&4,3 &2&0&4,3&4,3&5,4&4 & 7\\ 2&3&1&(0,0,12,13,14,23+15)&3&2&0& 3&3&4& 4 &7\\ 2&4&2&(0,0,12,13,23,14)&4 &4&0& 4&3&4&5 &8\\ 2&4&2&(0,0,12,13,23,14-25)&4 &2,3,4&0& 4&3&6,5,4& 5 &8\\ 2&4&2&(0,0,12,13,23,14+25)&4 &4&0& 4&3&4& 5 &8\\ \hline 3&4&2&(0,0,0,12,14-23,15+34)& 2 &2&0& 2&4&4& 6 &7\\ 3&5&2&(0,0,0,12,14,15+23)&5&4&2&3&4& 4& 5 &8\\ 3&5&2&(0,0,0,12,14,15+23+24)&4,5 &3,4&0,2& 4,3&5,4&6,4& 7,5 &8\\ 3&5&2&(0,0,0,12,14,15+24)&5&4 &2 & 3&4&4&5 & 8\\ 3&5&2&(0,0,0,12,14,15)&5&4&2 &3&4& 4& 5 &8\\ 3&5&3&(0,0,0,12,13+42,14+23)&5 &3 &0 & 5&6&7& 7 &8\\ 3&5&3&(0,0,0,12,14,13+42)&5 &3 &0 &5&6&7& 7 &8\\ 3&5&3&(0,0,0,12,13+14,24)&5 &2,3 &0 &5& 6&8,7&7 &8\\ 3&6&3&(0,0,0,12,13,14+23)&7,6,5 &3,4 &0 & 7,6,5&7,6,5&9,8,7& 8 &9\\ 3&6&3&(0,0,0,12,13,24)&6,5 &5 &0 & 6,5&6,5&7,6&8 & 9\\ 3&6&3&(0,0,0,12,13,14)&6,5 &4&0 &6,5&6,5&8,7&8 & 9\\ 3&8&4&(0,0,0,12,13,23)&10,9&7,8&0 &10,9&8,7&10&10 & 9\\ \hline 4&7&3&(0,0,0,0,12,15)&6&3&2& 4&6&8&8 & 9\\ 4&7&3&(0,0,0,0,12,14+25)&7,6&4&2&5,4&7,6&8,7& 8 &9\\ 4&8&4&(0,0,0,0,13+42,14+23)& 8&7&2& 6&7&7& 9 &10\\ 4&8&4&(0,0,0,0,12,14+23)& 8&6 &2 &6& 7&8& 9 &10\\ 4&8&4&(0,0,0,0,12,34)& 8&7 &2 & 6&7&7& 9 &10\\ 4&9&4&(0,0,0,0,12,13)& 10&7,8 &2 & 8&8&10,9& 10 &11\\ \hline 5&11&4&(0,0,0,0,0,12)&13 & 9 &4 & 9&10&11& 11 &12\\ \hline 6&15&5&(0,0,0,0,0,0)& 20 & 15 &6& 14&14&14& 14 &15\\ \hline \end{tabular}} \medskip } \smallskip {\bf Table 1.} Symplectic invariants of six-dimensional nilmanifolds \end{center} \medskip \smallskip \begin{remark}\label{mistakes} {\rm In \cite{IRTU1,IRTU2} the following symplectically harmonic numbers need correction: $\bullet$ for $(0,0,12,13,14,15)$ the number $h_4$ is equal to 2 (not 3); $\bullet$ for $(0,0,12,13,14,23+15)$ the number $h_3$ is equal to 3 (not 2); $\bullet$ for $(0,0,0,12,14,15+23)$ the number $h_3$ is equal to 5 (not 4); $\bullet$ for $(0,0,0,0,12,14+25)$ the number $h_4$ is equal to 4 (not 3). } \end{remark} \begin{example}\label{h24} {\rm Let us consider the $6$-dimensional nilmanifold $(0,0,0,12,14,15+23+24)$. According to Table~1, this manifold is \emph{\texttt{c}}-flexible, \emph{\texttt{f}}-flexible and \emph{\texttt{h}}-flexible. In fact, consider the following continuous family of symplectic structures $$[\omega_t]=(1-\cos t)[e^{13}] - \cos t\,[e^{16}+e^{25}-e^{34}]+(1-\cos t)[e^{26}-e^{45}],\quad t\in\R.$$ This family was constructed first in~\cite{IRTU1,IRTU2} to show \emph{\texttt{h}}-flexibility, and the symplectic structures $\omega_{t=0}$ and $\omega_{t=\frac\pi 2}$ were considered in~\cite{TY2} concerning the dimension of the primitive group $PH^2_{\partial_-}$, i.e. $c_4^{(1)}$. This 6-dimensional nilmanifold is the only one where all the non-trivial coeffective, harmonic and primitive numbers vary. In Table 1 the variations are in correspondence as follows: \smallskip \noindent $\bullet$ $h_3(\omega_{2\pi k})=4$,\, $h_4(\omega_{2\pi k})=3$,\, $h_5(\omega_{2\pi k})=0$,\, $\hat c_3(\omega_{2\pi k})=4$,\, $c_4^{(1)}(\omega_{2\pi k})=\check c_5^{(1)}(\omega_{2\pi k})=5$,\, $\check c_4^{(1)}(\omega_{2\pi k})=6$, \smallskip \hskip-.1cm $\check c_5^{(2)}(\omega_{2\pi k})=7$,\ for any integer $k$; \smallskip \noindent $\bullet$ $h_3(\omega_{t})=5$,\, $h_4(\omega_{t})=4$,\, $h_5(\omega_{t})=2$,\, $\hat c_3(\omega_{t})=3$,\, $c_4^{(1)}(\omega_{t})=\check c_5^{(1)}(\omega_{t})=4$,\, $\check c_4^{(1)}(\omega_{t})=4$,\, $\check c_5^{(2)}(\omega_{t})=5$, \smallskip \hskip-.1cm for $t\not= 2\pi k$. } \end{example} \begin{example}\label{ex_h11} {\rm Let us consider the $6$-dimensional nilmanifold $(0,0,0,12,13,14+23)$. The de Rham class of any symplectic form is given by $$[\omega]=A\,[e^{14}] + B\,[e^{15}] +C\,[e^{24}] +D\,[e^{35}] +E\,[e^{16}+e^{25}] +F\,[e^{16}-e^{34}],$$ where $(E+F)(CD+EF)\neq 0$. Direct computations show that $\hat c_3$ and $h_4$ vary as follows: $$\hat c_3=\begin{cases}7,\,\, \mbox{ if } D=E+2F=0,\\ 6,\,\,\mbox{ if } D=0,\, E+2F\neq 0,\\ 5,\,\,\mbox{ if } D\neq 0, \end{cases} \quad\quad h_4=\begin{cases}3,\,\,\mbox{ if }(E+F)^2=CD+EF,\\ 4,\,\,\mbox{ if }(E+F)^2\neq CD+EF. \end{cases}$$ This nilmanifold satisfies $b_2=3b_1-3$, so \eqref{fund-ec-dim6-nilv} implies that $\check c_5^{(1)}=c_4^{(1)}=\hat c_3$. Moreover, $h_5=0$ from which we get that $\check c_5^{(2)}=8$ and $h_3=\hat c_3$. Hence, using that $\check c_4^{(1)} = b_2-h_4+ \hat c_3$ by \eqref{fund-ec-dim6-nilv}, we arrive at $$\check c_4^{(1)}=\begin{cases} 9,\,\, \mbox{ if } D=E+2F=0,\\ 8,\,\, \mbox{ if } D=0,\, E+2F\neq 0,\ \mbox{ or }\ D\neq 0, \,(E+F)^2=CD+EF,\\ 7,\,\, \mbox{ if } D\neq 0,\, (E+F)^2\neq CD+EF. \end{cases}$$ As a consequence, concrete families can be constructed. Let us consider the two-parametric family $$[\omega_{t,s}]=t\,[e^{35}] +(s+2)\,[e^{16}+e^{25}] -\,[e^{16}-e^{34}],$$ where $t,s\geq 0$. Then, the variations of the dimensions are: \smallskip \noindent $\bullet$ $h_3(\omega_{0,0})=\hat c_3(\omega_{0,0})=c_4^{(1)}(\omega_{0,0})=\check c_5^{(1)}(\omega_{0,0})=7$ and $\check c_4^{(1)}(\omega_{0,0})=9$; \smallskip \noindent $\bullet$ $h_3(\omega_{t,0})=\hat c_3(\omega_{t,0})=c_4^{(1)}(\omega_{t,0})=\check c_5^{(1)}(\omega_{t,0})=5$ and $\check c_4^{(1)}(\omega_{t,0})=7$, for $t>0$; \smallskip \noindent $\bullet$ $h_3(\omega_{0,s})=\hat c_3(\omega_{0,s})=c_4^{(1)}(\omega_{0,s})=\check c_5^{(1)}(\omega_{0,s})=6$ and $\check c_4^{(1)}(\omega_{0,s})=8$, for $s>0$. \medskip On the other hand, if we consider the family $$[\omega_{t}]=[e^{24}] +[e^{35}] +t\,[e^{16}+e^{25}] +[e^{16}-e^{34}],$$ where $t\geq 0$, then the variations are: \smallskip \noindent $\bullet$ $h_4(\omega_{0})=3$ and $\check c_4^{(1)}(\omega_{0})=8$; \smallskip \noindent $\bullet$ $h_4(\omega_{t})=4$ and $\check c_4^{(1)}(\omega_{t})=7$, for $t>0$. } \end{example} \medskip For compact K\"ahler manifolds $(M^{2n},\omega)$ and for every $q\geq n+1$, the coeffective cohomology group $H^q_{(1)}(M)$ is isomorphic to the \emph{$[\omega]$-truncated $q$-th de Rham group} $\widetilde{H}^q_{[\omega]}(M)=\{[\alpha]\in H^q(M) \mid [\alpha]\cup[\omega]=0\}$, although Fern\'andez, Ib\'a\~nez and de Le\'on showed that this is no longer true for arbitrary compact symplectic manifolds~\cite{FIL1}. Kasuya has studied in \cite{Kasuya} certain symplectic aspherical (non-K\"ahler) manifolds for which \begin{equation}\label{isotrun} H^q_{(1)}(M)\cong \widetilde{H}^q_{[\omega]}(M) \quad \mbox{ for every } q\geq n+1. \end{equation} For 6-dimensional symplectic nilmanifolds one has the following result, which suggests that such isomorphism might be closely related to a low step of nilpotency: \begin{proposition}\label{isotruncada} Let $M$ be a symplectic $s$-step nilmanifold of dimension~$6$. \begin{enumerate} \item[{\rm (i)}] If $s\leq 2$, then there exists a symplectic form on $M$ satisfying $\eqref{isotrun}$. \item[{\rm (ii)}] If $s=5$ then $\eqref{isotrun}$ is never satisfied. \end{enumerate} \end{proposition} \begin{proof} Since $n=3$ we only need to consider $q=4$. Thus, $\eqref{isotrun}$ holds if and only if $c_4^{(1)}(M)=\dim \widetilde{H}^4_{[\omega]}(M)=b_2(M)-1$, the latter equality coming from the fact that $L(H^4(M))=H^6(M)$. Now, the result is a direct consequence of Table 1. \end{proof} Notice that there are several 3-step and several 4-step symplectic nilmanifolds of dimension $6$ satisfying~\eqref{isotrun}. In fact, all the cases in Table 1 where $c_4^{(1)}=b_2-1$ have such property. \begin{remark} {\rm Concerning other (non primitive) cohomology groups, we recall that in \cite[Table 3]{Angella-K} the dimensions of the symplectic Bott-Chern and Aeppli cohomologies for a particular choice of symplectic structure on each 6-dimensional nilmanifold have been computed. } \end{remark} \section*{Acknowledgments} The work was partially supported through Project MICINN (Spain) MTM2011-28326-C02-01.
{ "redpajama_set_name": "RedPajamaArXiv" }
971
- [Getting Started](/) - [Examples](https://github.com/dolanmiu/docx/tree/master/demo) - API - [Documentation](https://docx.js.org/api/) - Usage - [Document](usage/document.md) - [Sections](usage/sections.md) - [Paragraph](usage/paragraph.md) - [Text Frames](usage/text-frames.md) - [Symbols](usage/symbols.md) - [Text](usage/text.md) - [Images](usage/images.md) - [Headers & Footers](usage/headers-and-footers.md) - [Bullet Points](usage/bullet-points.md) - [Hyperlinks](usage/hyperlinks.md) - [Numbering](usage/numbering.md) - [Tables](usage/tables.md) - [Tab Stops](usage/tab-stops.md) - [Table of Contents](usage/table-of-contents.md) - [Page Numbers](usage/page-numbers.md) - [Change Tracking](usage/change-tracking.md) - [Math](usage/math.md) - [Comments](usage/comments.md) - [Footnotes](usage/footnotes.md) - [Fields](usage/fields.md) - Styling - [Styling with JS](usage/styling-with-js.md) - [Styling with XML](usage/styling-with-xml.md) - Exporting - [Packers](usage/packers.md) - Utility - [Convenience functions](usage/convenience-functions.md) - [Contribution Guidelines](contribution-guidelines.md)
{ "redpajama_set_name": "RedPajamaGithub" }
7,976
\section{Introduction} The ubiquitous emulsion is a metastable material where droplets of one fluid are suspended in another. All emulsions in practical use require surfactants in the form of amphiphilic molecules or, in the case of solid-stabilized or Pickering emulsions, in the form of nano- or miocroparticles. In both cases the surfactants form an interfacial layer, which extends the emulsion lifetime: molecular surfactants greatly reduce surface tension, while in solid-stabilized emulsions superlative stability is achieved by particles' strong adsorption to interfaces \cite{binks2002}. However, the interfacial layer is itself a complex material of interacting molecules or particles, loosely confined to a two-dimensional curved interface. Beyond the desired stabilizing effect, the interfacial layer can impart spatially varying mechanical properties onto the interface and influence the morphology of emulsion droplets. In common experience, a droplet minimizing its surface area adopts a perfectly spherical shape. While emulsion droplets usually conform to this expectation, at low temperatures, the surface tension can vanish and effects from a hexatically ordered interfacial layer dominate, inducing polyhedral droplets \cite{marin2020, guttman2016}. Factors such as a negative surface tension and gravity \cite{garcia-aguilar2021} induce further exotic droplet morphologies such as flattened polygons, rods, and protrusions. Solid-stabilized emulsion, whose stabilization mechanism differs, can form similarly off-spherical facetted shapes at room temperature \cite{abkarian2007}. Despite differences in energy scales, length scales, and driving mechanisms, the icosahedral facetted morphology in several experimental systems is induced by the interaction of hexatic order with a spherical surface topology. The positions of particles or molecules on a two-dimensional surface will, at low temperature, arrange in a way locally resembling a hexagonal close packing. The phase, with quasi-long-range correlation in the orientation of the pattern, is known as hexatic. Just as a sphere cannot be covered by a vector field without two point-defects, where the vector field diverges, it cannot be smoothly covered by hexatic orientational order. The necessary total number of defects is commonly realized in the form of twelve defect sites, where a particle has only five neighbors, in an icosohedral arrangement on the sphere. The prescence of twelve topologically induced defects can be seen directly when low-temperature droplets adopt a facetted icosahedral morphology \cite{guttman2016}. In contrast, order on a cylindrical surface has no topologically mandatory defects. While uniform hexatic order on a cylindrical surface has a complex effect on instability and dynamics, as studied by Lenz and Nelson \cite{lenz2003}, no phenomenon comparable to faceting is predicted in the linear analysis. Examples of cylinder-like systems with an interfacial layer or membrane are biological lipid nanotubes and their nanotechnological counterpart\cite{eevans1996}. Long tails are also seen to grow from the above cooled emulsion droplets \cite{guttman2016}. An experimental system of a larger lipid bilayer tube with a dynamic instability was introduced by Bar-Ziv and Moses \cite{bar-ziv1994}. Cylinder-like geometries also occur transiently in most industrial emulsions during the formation process \cite{stone1994}. As a result of arrested coalescence, modulated cylindrical structures can form and persist in emulsions. Through specialized mixing techniques, solid-stabilized emulsions with long-lived elongated morphologies can be manufactured \cite{li2019}. Observing that in a variety of cylindrical systems, a modulated or pearled morphology is induced by factors such as spontaneous curvature or external forces, we examine whether excess defects will appear conditionally on modulated cylinders. As on toroidal droplets \cite{evans1995, bowick2009}, the appearance of `excess' defects in charge-neutral pairs on cylindrical systems is possible, but not topologically inevitable. A modulated cylindrical shape is seen in a variety of cylinder-like systems, possibly including in the coiled tails of facetted emulsions droplets themselves. Spontaneous curvature has been proposed as a mechanism behind the instability of lipid bilayer tubules as well as cylindrical surfactant micelles \cite{granek1996,chaieb1998}. Isotropic spontaneous curvature can exist due to an asymmetry of layers in a lipid bilayer or due to the geometry of lipid molecules or additives in a lipid monolayer. We here use an off-neutral spontaneous curvature as a representative initial driver of a modulated morphology. First, we establish the energetics of cylinder-like systems dominated by spontaneous curvature only. To see how $n$-atic order (the generalization of hexatic order to order with $n$-fold rotational symmetry) modifies the behavior, we then use a field-theoretic approach to in-plane orientational order to obtain configurations and energetics of $n$-atic order on modulated cylinders. This approach allows us to reveal a polymorphic spectrum of morphologies with different numbers of defects. \section{Geometry and orientational order} Our model surface is a cylinder with a sinusoidal modulation in radius. In three-dimensional space spanned by a cylindrical polar coordinate basis ($\bf{e}_{\rho}$, $\bf{e}_{\theta}$, $\bf{e}_{z}$), the surface of revolution is described by the parametric equation \begin{equation} \begin{aligned} \label{eq:surface} \bf{x}(\theta, z)&= r(a) (1+a \sin (kz)) \bf{e}_{\rho}+ z \bf{e}_{z} \\ \end{aligned} \end{equation} The dimensionless parameter $a \in (-1,1)$, the shape amplitude, gives the amplitude of the sinusoidal modulation. The surfaces are subject to a global volume-conserving constraint, $V(a) = V_0$, due to the incompressible inner fluid. Consequently the mean radius $r(a)$ in equation \ref{eq:surface} must depend on shape amplitude as \begin{equation} \label{radiusrescaled} r(a) = \dfrac{r_0}{\sqrt{1+a^2/2}}. \end{equation} At $a=0$ the surface is a flat cylinder of radius $r_0$. We define the length unit by setting $r_0=1$. The sinusoidal modulation is additionally characterized by its wavenumber $k$ and wavelength $\lambda=2\pi/k$. \begin{figure} \centering \includegraphics[width=.45\textwidth]{shape_labelled.png} \caption{The model surface, a sinusoid periodic surface of revolution. Positive and negative principal curvatures occur. The sinusoidal modulation has amplitude $ar_0$ and wavelength $\lambda= 2\pi/k$. The mean radius $r(a)$ is chosen to conserve volume relative to a cylinder of radius $r_0$. } \end{figure} We assume the system is closed and periodic, neglecting the topological effects of end-caps or attachment to larger spheroids that may be present in a real emulsion tubule. It thus has topological genus 1, in common with the torus. While a topological sphere has $2n$ topologically mandated defects, the periodic model system has none. In comparable elongated closed vesicles, the $2n$ defects are commonly localized at the end-caps \cite{mesarec2017}. Our periodic system, omitting both end-caps and their defects, facilitates the study of excess defect pairs in isolation. Alternatively our periodic model can represent a bridge between larger fluid reservoirs in a material with a more complex topology, whose Gaussian curvature is similarly omitted along with the associated defects. The external Hamiltonian $\mathcal{H}_E $ describes the energy of the membrane or interface, that is the part of the system energy not due to internal degrees of freedom of the interfacial layer. The Helfrich\cite{helfrich1973} Hamiltonian is a widely used estimate of the energetics of cell membranes (lipid bilayers) with surface tension and bending rigidity: \begin{equation} \label{eq:hh} \mathcal{H}_E = \gamma_0 \int_S dS + \frac{\kappa}{2}\int_S (2H-H_0)^2 dS+ \frac{\bar{\kappa}}{2}\int_S K dS \end{equation} with $\gamma_0$ a general microscopic surface tension, $H$ mean curvature, $H_0$ the material's spontaneous total curvature, $K$ Gaussian curvature, and $\kappa$ and $\bar{\kappa}$ two bending rigidities. All integrals are over one period of the surface, $\int_S dS = \int_0^{\lambda} \int_0^{2\pi} d\theta dz \sqrt{g}$. The square root of the metric determinant, $\sqrt{g}=\sqrt{g_{zz} g_{\theta \theta}}$, can be seen as the measure of the integral, describing the relative size of an infinitesimal area element. The metric tensor, in the basis of the cylindrical coordinate system, can be calculated as $g_{ij}= \partial_i \bf{x} \cdot \partial_j \bf{x}$: \begin{equation} \label{eq:g} \begin{aligned} g_{\theta \theta} &= r(a)^2 (1+a \sin(kz))^2\\ g_{zz} &= 1+a^2k^2\cos^2(kz)\\ g_{\theta z} &= g_{z \theta}=0.\\ \end{aligned} \end{equation} We will additionally use the shape tensor $K^i_j$. When diagonal, it gives the two principal curvatures at each point. It can be derived as $K_{ij}=\hat{n} \cdot \partial_i \partial_j \bf{x}$, $K^i_j = K_{kj}g^{ki}$, where $n$ is the unit normal to the surface and the index-raising $g^{ij}$ is the inverse of the metric tensor. Here the principal curvatures are \begin{equation} \begin{aligned} \label{eq:curvaturetensor} &K_{\theta}^{\theta}=\dfrac{-1}{ \sqrt{g}}\\ &K_{z}^{z}=\dfrac{-r(a) a k^2\sin(kz)}{ (g_{zz})^{3/2}} \end{aligned} \end{equation} Since $K^i_j$ is diagonal in our basis, mean curvature $H$ and Gaussian curvature $K$ are given by its trace and determinant as $2H= K_\theta^\theta +K_z^z$ and $K=K_\theta^\theta K_z^z$ respectively. For further discussion of geometric quantities and the Helfrich Hamiltonian, see review \cite{kamien2002}. We here examine a surface layer with $n$-atic orientational order. The $n$-atic order parameter represents correlations in, for example, the 1-atic vector direction of the tilt of lipid molecules, 2-atic orientation of a nematic liquid crystal layer, or hexatic order in the orientation of hexagonal arrangements of the positions of particles. Order is described by order parameter field $\Psi(\bf{x})$, which holds information on $n$-atic orientational order of molecules or particles which make up the interfacial layer. From the local structure of the interfacial layer, the complex-valued order parameter field is defined as \begin{equation} \Psi(\bf{x})= \langle e^{i n \phi(\bf{x})} \rangle . \label{eq:psidef} \end{equation} The angle $\phi(\bf{x})$ indicates orientation of a local molecule or particle. This may be the tilt of a molecule ($n=1$), the orientation of a rod-like molecule or particle (nematic, $n=2$), or the direction of an imagined bond between the positions of a particle and its neighbor in a hexatic arrangement ($n=6$). The factor $n$ is the order of the discrete rotational symmetry of the material. The brackets $\langle \rangle$ denote a local spatial averaging. The magnitude as well as the phase of $\Psi(\bf{x})$ may vary, with $|\Psi|=0$ corresponding to the isotropic state. The angle $\phi$ in equation \ref{eq:psidef} is defined with respect to one arbitrarily chosen axis of an intrinsic coordinate system spanned by the unit tangent vectors $\bf{t}$ to the surface. They are, in the basis $\bf{e}_i$, \begin{equation} \label{eq:tangentvectors} \bf{t}_i= \frac{\partial_i \bf{x}}{|\partial_i \bf{x}|}. \end{equation} The order parameter field is subject to a Ginzburg-Landau potential, the simplest analytic expression reproducing the desired phase behavior and coupling to surface curvature \begin{equation} \label{eq:hi} \mathcal{H}_I= \int_S dS \left( \alpha |\Psi|^2 + c |D_i\Psi|^2 + \frac{u}{2}|\Psi|^4\right) . \end{equation} Coefficient $\alpha$ is a temperature-dependent material parameter which is negative below the critical temperature; and $c$ and $u$ are positive parameters. The Landau-Ginzburg model for $n$-atic order of membranes on curved interfaces has been introduced for spheres by Park et al. \cite{park1992} and studied on spheres and tori by Evans \cite{evans1995, evans1996}. For a summary of further developments in the study of Landau-Ginzburg $n$-atic order and of other representations of order on a variety of surface shapes, see review by Bowick and Giomi \cite{bowick2009}. The field magnitude $|\Psi|$ is sometimes taken to be constant except for point defects, so that only the gradient energy varies, for example by Lenz and Nelson \cite{lenz2003}, treating the onset of instabilities of spheres and cylinders with hexatic order, or in a treatment by Kumaralageshan et al. \cite{kumaralageshan2017} which showcases the elegant differential geometric solutions enabled by the assumption. We here retain varying magnitude. The more general theory allows us to represent a high-temperature regime where $\alpha$ is close to the critical value \cite{foltin2000}, at the expense of analytical tractability. On flat surfaces and in the absence of thermal fluctuations, well-known solutions minimizing the Landau theory, i.e. Equation \ref{eq:hi} without the gradient term, are a spatially constant field $\Psi(\bf{x})=\Psi_0$ with magnitude \begin{equation} |\Psi_0| = \begin{cases} \sqrt{\frac{-\alpha}{u}}, & \alpha \leq 0,\\ 0, & \alpha>0. \end{cases} \end{equation} and arbitrary phase. In this case the material has an energy density \begin{equation} f_0 = \begin{cases} -\frac{\alpha^2}{2u}, & \alpha \leq 0,\\ 0, & \alpha>0; \end{cases} \end{equation} the negative free energy density of the ordered interface (relative to surfactants in the bulk) at low temperatures can act as an effective negative surface tension \cite{guttman2016}. The material has a persistence length \cite{chaikin1995} of \begin{equation} \xi = \begin{cases} \sqrt{\frac{c}{2 |\alpha|}}, & \alpha \leq 0,\\ \sqrt{\frac{c}{\alpha}}, & \alpha>0. \end{cases} \end{equation} On curved surfaces, the field is coupled to surface geometry via the covariant derivative operator \begin{equation} D_i = \partial_i - i n A_i, \end{equation} where $A_i$ is the spin connection, a quantity related to surface shape at each point. The spin connection corrects for deviations in parallel transport on curved surfaces, allowing comparison of the vector field at two distant points. The formulation of the covariant derivative operator, first introduced in this form for the study of $n$-atic material on curved surfaces by Park et al. \cite{park1992}, multiplies the spin connection by a factor of $i n$ to account for the mapping of $1/n$ of a full turn in orientation to a full rotation of complex phase. One way to derive the spin connection is $A_i =\bf{t}_\theta \cdot \partial_i \bf{t}_z$. Starting from Equation \ref{eq:surface} and retrieving tangent vectors via Equation \ref{eq:tangentvectors}, the spin connection for the given surface is \begin{equation} \label{eq:connection} \begin{aligned} A_z&=0\\ A_\theta &= \frac{r(a) a k \cos(kz)}{\sqrt{g_{zz}}}. \end{aligned} \end{equation} \section{Spontaneous curvature} \label{chap:external} For cylindrical systems dominated by surface tension, $\mathcal{H}_E=\gamma \int_S dS$ only, the well-known Plateau-Rayleigh limit of stability is (in units of $1/r_0$) critical wavenumber $k_c=1$. The limit of stability can be derived by examining the linearized energy difference associated with a small perturbation. Sinusoidal perturbations with smaller wavenumber (longer wavelength) than the limiting value decrease the energy of the system and therefore grow. For our system, the calculation is repeated using Equation \ref{eq:hh} only (in the absence of orientational order described by Equation \ref{eq:hi}). Our surface is a periodic tube; we will take this model literally and assume it is a closed surface of topological genus $g=1$. On a closed surface of constant topological genus, the total surface integral of Gaussian curvature is a constant $2\pi\chi$, determined completely by the surface's Euler characteristic $\chi = 2-2g$ (Gauss-Bonnet theorem). Thus, the third term in Equation \ref{eq:hh}, relating to total Gaussian curvature, is a constant and will be dropped. Expanding the second term in Equation \ref{eq:hh}, a cross-term $-2\kappa K_\theta^\theta K_z^z$ is also proportional to the Gaussian curvature, thus its integral is also a constant. The external Hamiltonian is reduced to \begin{equation} \begin{aligned} \label{eq:Hcurv} \mathcal{H}_E&=\mathcal{H}_E^{surf}+\mathcal{H}_E^{curv}\\ \mathcal{H}_E^{surf}&=\gamma_0 \int_S dS\\ \mathcal{H}_E^{curv}&=\frac{\kappa}{2} \int_S dS ((K_\theta^\theta)^2+(K_z^z)^2 -2K^\theta_\theta H_0 - 2K_z^zH_0 +H_0^2)\\ \end{aligned} \end{equation} Inserting expressions from Equations \ref{eq:curvaturetensor} and \ref{eq:g}, we expand all analytic functions in Equation \ref{eq:Hcurv} as series in small $a$, and integrate over one period. Examining the next-to-leading order term, we retrieve the energy difference on a small sinusoidal perturbation of amplitude $a$ and wavenumber $k$: \begin{equation} \label{eq:diff} \dfrac{\Delta \mathcal{H}_E}{A_0 a^2}= \frac{\gamma}{4} \left( k^2 - 1 \right)+ \frac{1}{8} \left(2 k^4+(4 H_0 -1) k^2 +3 \right) , \end{equation} where $A_0= 4 \pi^2 / k$ is the surface area of a section of length $\lambda=2\pi/k $ of the unperturbed, cylindrical surface. The last term in Equation \ref{eq:Hcurv} has been absorbed into the surface tension $\gamma= \gamma_0 + H_0^2/2$ and the equation has been nondimensionalized by choosing units where $\kappa=1$. Roots of Equation \ref{eq:diff} are the critical wavenumber \begin{equation} \begin{aligned} \label{eq:kc} k_c(H_0) &= \frac{1}{2} \Big( 1-2 \gamma- 4 H_0 \\& \pm \sqrt{8 (2 \gamma -3) + (-1+4 H_0 + 2 \gamma )^2}\Big)^{1/2}. \end{aligned} \end{equation} Equations \ref{eq:diff} and \ref{eq:kc} are known in various forms and special cases in the literature on the pearling instability of lipid bilayer membrane tubules \cite{boedec2014}. The surface tension is composed of the constant $H_0^2/2$ from equation \ref{eq:Hcurv}, a local energy density $f_0$ of the possibly ordered interfacial material as described by Equation \ref{eq:hi}, and any other effects that may be present, represented by $\gamma_0$. For simplicity of calculations we neglect additional effects, such that $\gamma_0=0$ and $\gamma:=f_0 + H_0^2/2$. In the low-temperature isotropic phase $f_0= 0$ and we have only $\gamma:= H_0^2/2$: the effective surface tension is dominated by a constant energy density from spontaneous curvature. \begin{figure} \centering \includegraphics[width=.45\textwidth]{fig2_labelled.pdf} \caption{ Stability as a function of wavenumber and spontaneous curvature in the absence of orientational order. The red line $k_c(H_0)$ is the critical wavenumber below which cylinders are linearly unstable according to the perturbative calculation, while background shading indicates shape amplitude $a$ that is the global minimum of $\mathcal{H}_E(a)$ at the given $k$ and $H_0$. (a) Light background shading ($a>0$) and wavenumbers below the critical value $k_c(H_0)$ indicate instability according to both linearly and numerical analysis. (b) Black background shading ($a=0$) and wavenumers larger than $k_c$ indicate that the unperturbed cylinder is stable according to both indicators. (c) Where the two indicators of stability disagree, the cylinder is metastable.} \label{fig:numerical_H0_g0} \end{figure} The critical wavenumber as a function of spontaneous curvature in the abscence of orientational order is shown as the line in Figure \ref{fig:numerical_H0_g0}. The system is absolutely stable against shape perturbation of all wavelengths at spontaneous curvatures from $H_0=-1$ to $\sqrt{3}$. While there are two real solutions $k_c(H_0)$ to Equation \ref{eq:diff} for intrinsic curvatures between $H_0=-\sqrt{3}$ to $H_0=-1$, all wavenumbers below the upper curve should be counted as unstable because these long-wavelength systems are unstable against smaller-wavelength shape modulations. At extreme spontaneous curvature of either sign, the effective surface tension $\gamma = \kappa H_0^2/2$ dominates and the system approaches the original Plateau-Rayleigh stability criterion, $k_c = 1$. Orientation of the surface is defined so that the original cylinder has the negative total curvature $2H = -1/r_0= -1$. Surprisingly, a spontaneous curvature with the same sign and slightly larger magnitude has a more prominent destabilizing effect than a positive spontaneous curvature. The maximum critical wavenumber $k_c=\sqrt{3}$ occurs at $H_0 = -3$. The instability can be explained by considering the axial curvature of a sinusoidal perturbation. The larger parts of the channel have both principal curvatures negative, approaching sphere-like. The positive axial principal curvature on the narrow neck occupies a smaller surface area. We have derived the limit of stability $k_c(H_0)$ by considering the effects of a small-amplitude modulation, $|a| \ll 1$. In addition, going beyond the linear regime we integrate the energy functional semi-numerically. Elliptic integrals were used to integrate terms in (Equation \ref{eq:Hcurv}) over the sinusoidal surface shape where applicable and remaining nontractable terms were integrated numerically. A shape amplitude $a_{min}$ minimizing the energy was found for a grid of values of $(k,H_0)$. The values are shown as the background shading in Figure \ref{fig:numerical_H0_g0}. The region where linear limit of stability and numerical results disagree is to be interpreted as a region where the unmodulated cylinder is a metastable state. In fact this metastability is driven by the effective surface tension term and is known for the classic Plateau-Rayleigh instability. In the classic, surface-tension-dominated case, for a range of wavenumbers $k\geq1$, shape amplitude $|a|=0$ is a metastable local minimum of surface area $A(a)$, a large nonzero shape amplitude is the global minimum. As previously described by Carter and Glaeser \cite{carter1987}, the system is unstable once nucleated with a sufficiently large shape fluctuation $a$; a new criterion for instability can be formulated in terms of both $k$ and $a$. The semi-numerical investigation additionally indicates that, particularly near $H_0=\pm 1$, an intermediate shape amplitude $0<|a|<1$ may be the energetic minimum. The analysis is restricted to a single wavelength at a time; the real system may be unstable against fluctuations of smaller wavelengths. In Appendix \ref{chap:pinchoff} we estimate whether a modulated shape will indeed be stable by examining the stability of the narrow neck against fluctuations of smaller wavelengths. For spontaneous curvatures around $H_0=\pm 1$, a slightly modulated channel shape is in fact the stable equilibrium, despite the wavenumber $k<k_c$ being marked as unstable by the linear analysis. \section{Orientational Order} \subsection{Configurations of orientational order} Below the isotropic-$n$-atic transition temperature the coefficient $\alpha$ is negative and the interfacial layer is orientationally ordered. In the azimuthal gradient term of Equation \ref{eq:hi}, \begin{equation} \label{eq:gradient} |D_\theta \Psi|^2 = |\partial_\theta \Psi|^2 + \frac{2 n A_\theta}{r^2(a)} \mathfrak{ Im} ((\partial_\theta \Psi) \Psi^* )+ n^2 |A_\theta|^2 |\Psi|^2, \end{equation} we note that the cross-term can take negative values, suggesting that gradient energy can be decreased by orientational order whose direction rotates as it winds around the cylinder in the azimuthal direction. The factor $g^{\theta\theta}= 1/g_{\theta\theta} = 1/r^2$, explicitly written in the middle term, is also implicitly present in tensor inner products $|X_i|^2 $ in the other two terms. Moreover, the equation suggests that a certain handedness of rotation is selected for. This is a consequence of our choice to represent $n$-atic rotational order by Equation \ref{eq:psidef} rather than its complex conjugate field, or equivalently, to use the charge $n$ rather than $-n$ in the coupling. In reality, states that are solutions to either set of equations occur; the chiral symmetry is spontaneously broken when a vortex state develops. In analogy with superconductors, we distinguish Type I and Type II behavior. In Type I superconductors, the material transitions directly from the superconducting state which expels an applied magnetic field ($n$-atic order which expels Gaussian curvature) to a non-superconducting state penetrated by a magnetic field (isotropic state on a modulated surface). In Type II superconductors, on the other hand, there is an intermediate vortex state, where the material is in the superconducting state almost everywhere but its phase (orientation of order) rotates around point defects, at which the magnetic field (Gaussian curvature) is concentrated. It is not surprising that behavior analogous to superconductors emerges, as the description of $n$-atic order coupled to surface curvature is adapted from the Landau-Ginzburg equations for superconductors. The analogy with the Abrikosov vortex state for spheres has been pointed out by Park et al. \cite{park1992}, studying spheres. On near-spherical objects the range of available curvatures is dictated by size and topology, while on our sinusoidal model geometry, a wider range of local curvatures is accessible. The modulated cylindrical model system thus bears a closer, but still constrained, resemblance to superconductors, where arbitrary external magnetic fields may be applied. On the given closed surface, the $n$-atic field can undergo $N/n$ rotations as it winds around the cylinder once, with integer $N$. Such a field is represented by the mode $\Psi = |\Psi| e^{i N \theta }$, with the linearly varying orientation minimizing gradient energy. Plugging this trial mode into Equation \ref{eq:gradient}, the local azimuthal gradient term is proportional to \begin{equation} \label{eq:localdth} |D_\theta \Psi|^2 = (N+n A_\theta)^2 \frac{|\Psi|^2}{r^2(a)} \end{equation} The first transition, when the energy can be minimized by selecting $N=1$ rather than $N=0$ rotations, occurs at the axial location $kz = m \pi$, where the spin connection attains its maximal value, at wavenumber \begin{equation} \label{eq:ktypeII} k(a) = \frac{\sqrt{2+ a^2}}{a \sqrt{-2 + 8n^2}}. \end{equation} In the case of $n=6$ the lower bound wavenumber for onset of Type II behavior is $\sqrt{3/286} \approx 0.102$, whereas in the case of $n=1$ it is $1/\sqrt{2} \approx 0.707$. Analogous calculations for the transition from other values of $N$ to $N+1$ states suggest that for $1$-atic fields, modes with more than $N=1$ azimuthal rotations are never energetically advantageous, while for hexatic fields the spectrum extends to $N=6$. In other words, on one period of the surface vector order has either $0$ or $4$ defects, while for hexatic order it may have any number $4N$, up to $4N = 24$, of defects. The wavenumber of onset predicted here is a lower bound based on local energy balance at locations $kz = m \pi$. This theoretical lower bound depends on $n$ but not on values of field parameters $\alpha<0$, $c$. The true wavenumber of onset for the whole system will be increased by the interplay of several additional factors, including the energetic cost of defect cores, axial gradients, and the fact that the spin connection is less extreme at other locations; it does depend on coefficients $c$ and $\alpha$. \begin{figure} \centering \includegraphics[width=\textwidth]{new_irrotational_series.pdf} \caption{Examples of Type I behaviour. Here the field configuration remains defect-free on a series of shape amplitudes from $a=0$ to $a=0.9$. The averaged field magnitude profile $\langle |\Psi| \rangle _{\theta,t}$ as a function of $z$ is shown as a graph, with a horizontal axis where $|\Psi|=0$ and $|\Psi|=2$ at $a=0$. (a) Magnitude profiles from series of simulations with $n=6, \alpha=-4, c=6.5, k=0.9$. As shape amplitude increases, the field transitions from ordered field to locally depressed, then to everywhere isotropic. The characteristic magnitude profile is most depressed at two locations on either side of the narrow neck. Two simulation snapshots, a banded configuration and the uniform isotropic state, are rendered on the surface shape. (b) Series of magnitude profiles from simulations $n=1, \alpha=-4, c=4.5, k=0.9$. The $1$-atic field is more weakly affected by shape modulations. Inset: Colormap representing the complex-valued field. Saturation, ranging from $0$ to $|\Psi|=2$, indicates field magnitude or amount of $n$-atic order, while hue indicates the phase of the complex field or the direction, modulo $1/n$, of orientational order.} \label{fig:irrotationalseries} \end{figure} When the system is known to be confined to Type I behavior, the equations can be simplified: the field minimizing Equation $\ref{eq:hi}$ does not vary azimuthally, $\partial_\theta \Psi =0 $ and the gradient term is reduced to \begin{equation} c|D_i \Psi|^2 = c|\partial_z \Psi| + c n^2 |A_\theta|^2 |\Psi|^2. \end{equation} The second term can be understood as an addition to coefficient $\alpha$, forming the axially varying effective coefficient $\alpha'(z) = \alpha + c n^2 |A_\theta|^2$. The isotropic-$n$-atic transition temperature thus varies locally: it is increased on regions which are curved in the sense of having a nonzero spin connection. The effect is largest in two regions adjacent to the narrowest location $kz = 3 \pi/2 $: $|A_\theta|^2$ is proportional to $\cos^2 kz $ but also to $1/r^2(z)$. The resulting field configuration is one with constant phase and axially varying magnitude $|\Psi|(z)$ minimizing the equation \begin{equation} \label{eq:HI1D} \mathcal{H}_I^{N=0} = \int dS \left[ \alpha'(z) |\Psi|^2 + c \partial_z |\Psi|\partial^z |\Psi| +\frac{u}{2}|\Psi|^4 \right] \end{equation} We investigate field configurations on curved surface shapes using lattice-based Markov chain Monte Carlo simulations (Appendix \ref{chap:simulation}). Here, the order parameter field $\Psi(\bf{x})$ is represented as a discretized lattice of complex values. Each simulation represents a system with material parameters $(n, \alpha, c)$ on a fixed surface shape $(k,a)$. For a given field configuration, total field energy $\mathcal{H}_I[\Psi(\bf{x})]$ is calculated numerically as described in Appendix \ref{chap:simulation}. Field configurations are evolved according to a low-temperature Monte Carlo sampling protocol so that the simulation converges on an energy-minimizing configuration. For each surface shape and set of material parameters, we are then able to visually inspect the resulting field configuration as well as extract the minimized energy value. If, for a given set of material parameters and a wavenumber $k$, the field configuration remains vortex-free for all shape amplitudes $0\leq|a|<1$, we classify the material and shape as Type I. Two examples in the Type I regime are shown in Figure \ref{fig:irrotationalseries}. In this regime, increasing amplitude of shape modulations causes the field magnitude to decrease locally, resulting in modulated magnitude profiles $|\Psi|(z)$ minimizing the one-dimensionalized Equation \ref{eq:HI1D}. For large shape deformations the field is isotropic everywhere. On the other hand examples of field configurations from those simulations which displayed Type II behavior are shown in Figure \ref{fig:rotationalseries}. As in the Type I cases, order is locally decreased, especially on either side of the narrow neck. At higher shape amplitude $a$ we observe the predicted vortex state: between the widest region and the narrow neck orientational order rotates azimuthally $N$ times around the cylinder; on the other half it winds around the cylinder $N$ times in the opposite direction. There must be $2N$ defects with the total defect charge $\pm 2N/n$ where the counterrotating bands meet at the narrowest and widest regions, for a total of $4N$ defects on the surface and a vanishing total defect charge. The defects on the narrow neck are topologically present but are often visually obscured by an isotropic band. At more extreme curvatures, the defects lie on a line at the narrowest/widest locations, whereas placement is more distributed in cases of less extreme curvature at lower wavenumber. We have not studied the system on the level of defects, rather defects appear as an emergent phenomenon. The analysis and simulation have not been directly informed by laws describing the interaction of defects and Gaussian curvature. It is therefore encouraging that the lattice simulation reproduces the well-known association between defect charge and local Gaussian curvature. Seeing Gaussian curvature as an effective charge, we can apply the effective charge cancellation \cite{mesarec2016} principle, in which the sum of defect charge and Gaussian curvature charge in a small region $s$ approximates a locally neutral total charge: \begin{equation} \label{eq:defectcharge} q M + \frac{1}{2\pi} \int_s dS K \approx 0, \end{equation} where $M$ is the number of defects in a region $s$ and $q = \pm 1/n$ their charge. Equation \ref{eq:defectcharge} can be derived in the special case of our cylinder-like system as follows: As we reasoned above, at each axial location, in an idealized system the local number of azimuthal rotations $N$ takes the integer value most closely minimizing local gradient energy (Equation \ref{eq:localdth}), that is \begin{equation} \label{eq:18repeat} N + n A_\theta \approx 0. \end{equation} On the cylinder-like shape, the total number and sign of defects in a region between two axial locations $z_1$ and $z_2$ is the sum of differences in local rotation numbers, $\pm M = \int_{z_1}^{z_2} dz \sqrt{g_{zz}} \partial_z N$. We similarly take an axial derivative and integrate axially over the second term of Equation \ref{eq:18repeat} to obtain \begin{equation} \frac{\pm M}{n} + \int_{z_1}^{z_2} dz \sqrt{g_{zz}} \partial_z A_\theta \approx 0. \end{equation} By relating spin connection to Gaussian curvature via the Mermin-Ho theorem \cite{kamien2002} as $\partial_z A_\theta = \sqrt{g_{\theta \theta}} K$ and by labelling charge $q=\pm 1/n$, we recognize Equation \ref{eq:defectcharge}. The factor of $2\pi$ is equivalent to additionally carrying out an azimuthal integration over the latter term. The number of defects on the wider half of the cylinder predicted via the effective topological charge cancellation mechanism roughly agrees that from our previous reasoning about rotation numbers $N$ at certain locations of maximal spin connection (Equation \ref{eq:ktypeII}) and with the number of defects appearing in simulation in the examples shown in Figure \ref{fig:rotationalseries}. Like Equation \ref{eq:ktypeII}, the number of defects predicted by topological charge cancellation is an upper bound; for materials with large persistence length fewer defects may be realized. \begin{figure} \centering \includegraphics[width=\textwidth]{new_rotational_series.pdf} \caption{Examples of Type II behavior in hexatic fields. The final field configuration snapshot is shown on the $(z, \theta)$ plane using the complex colormap (Figure \ref{fig:irrotationalseries}, inset) to represent complex values. (a) Series in $a$ with $n=6, \alpha=-4$, $c=1.5$, $k=0.9$. As shape amplitude increases, the field here transitions from adapting to curvature via defect-free magnitude modulations to a state with $N=\pm2$ discrete azimuthal rotations and finally one with $N=\pm3$ rotations. There are $\Delta N = 2|N|$ visible defects on the widest part of the cylinder, while their counterparts on the narrow neck are often merged to an isotropic band. (b) An example of a more complex banding pattern on a longer cylinder, simulation $n=6, \alpha=-4$, $c=2.5$, $k=0.6$, $a=0.5$.} \label{fig:rotationalseries} \end{figure} \begin{figure} \centering \begin{subfigure}{.45\textwidth} \includegraphics[width=\linewidth]{energy_ka_n1_m1_1.png} \subcaption{$n=1, \alpha=-1$ } \end{subfigure} \begin{subfigure}{.45\textwidth} \includegraphics[width=\linewidth]{energy_ka_n6_m4_1.png} \subcaption{$n=6, \alpha=-4$} \end{subfigure} \caption{Mean gradient energy $\langle E \rangle$ obtained from simulations on an array of surface shapes $(k,a)$. (a) In the $n=1$ case field energy increases monotonically in $a$ for all $k$ examined here. (b) For $n=6$ there a multiple steps in the energy landscape, as the field transitions from the defect-free state to a spectrum of states $N$ with $4N$ defects.} \label{fig:fieldenergy} \end{figure} For an array of simulations on a range of fixed surface shapes $(a,k)$ we collect average field energy $\langle E \rangle$ of the equilibrated simulations. A vectorial and a hexatic example are shown in Figure \ref{fig:fieldenergy}. In general, gradient energy density increases on surface shapes which are more curved in the sense of the spin connection, having larger shape amplitude and wavenumber. For hexatic order, a stepped dependence of gradient energy on shape is apparent. The first discontinuity in energy corresponds to the onset of Type II behavior in the form of the first defect state with $N=1$, with each additional discontinuity corresponding to a transition to the next vortex state. The wavenumber of onset is about $k=0.2$ in the hexatic example with $c=1$. In an analogous set of simulations with $n=1$ the onset of Type II behavior is not seen for any $k$ sampled here, up to $k=2.0$. While a transition to Type II behavior is theoretically possible at some $k \geq 0.707$, for vector order it apparently occurs at larger wavenumber, smaller field stiffness $c$, or larger alignment $|\alpha|$ than those studied here. \begin{figure} \centering \begin{subfigure}[b]{.45\textwidth} \includegraphics[width=\textwidth]{fig5_n1_fieldST.png} \subcaption{$n=1$} \end{subfigure} \begin{subfigure}[b]{.45\textwidth} \includegraphics[width=\textwidth]{fig5_n6_fieldST.png} \subcaption{$n=6$} \end{subfigure} \caption{Solid colored lines show mean field energy $\mathcal{H}_I(a)$ (per unit length of cylinder) retrieved from simulations on fixed surfaces with $k=0.9$ and a range of $a$. As reference, we show the energy a uniformly ordered field would have on the curved surface shape (colored dotted lines), obtained by semi-numerically integrating Equation \ref{eq:uniformordered}, and the energy of a uniformly isotropic field (black dashed line, zero). (a) For $n=1$, the field is ordered at low $a$ and at higher $a$ adjusts by locally adapting magnitude, lowering energy slightly below that of the perfectly uniform field on the same surface. (b) For $n=6$ the field with high bending rigidity $c$ transitions to the uniformly isotropic state on curved surfaces. For lower $c$ the hexatic field, in addition to locally adapting its magnitude, is able to decrease its energy by adopting rotational states with defects, resulting in a stepped energy response. } \label{fig:energylandscapes} \end{figure} \subsection{Effect of orientational order on shape} We now consider the effect of the interfacial order on the shape modulations. First, in line with the previous linear stability analysis of a cylindrical interface with spontaneous curvature, we examine the linear effect of $n$-atic order. Assuming the field is initially ordered ($\Psi(\bf{x}) = \Psi_0$) on the unperturbed cylinder, at small shape perturbations with amplitude $a \ll 1$, it can be shown that the induced changes in field configuration are negligible in terms of their energetic contribution. To leading order the difference in internal energy is the gradient energy difference plus a term proportional to surface area change \begin{equation} \label{eq:chi} \frac{\Delta \mathcal{H}}{A_0 a^2} = \chi k^2 - \frac{\alpha^2}{2u} \Delta A, \end{equation} collecting field characteristics as $\chi=|\alpha|cn^2 /(2u)$. The second term, an energy difference proportional to change in surface area $\Delta A$, will be absorbed into surface tension. Lenz and Nelson \cite{lenz2003}, treating a hexatic field with constant magnitude equivalent to $|\alpha|/u=1$, obtain a linear energy difference equivalent to the first term. Adding the effect of $n$-atic order to the energy difference Equation \ref{eq:diff} and again finding roots, the limit of stability is \begin{equation} \label{eq:kc_field} \begin{aligned} k_c(H_0, \chi) &= \frac{1}{2} \Big( 1-2 \gamma- 4 H_0 - 8 \chi \\&\pm \sqrt{8 (2 \gamma -3) + (-1+4 H_0 + 2 \gamma +8 \chi)^2}\Big)^{1/2}. \end{aligned} \end{equation} with energy densities again in units where $\kappa=1$, $r_0=1$ and with $\gamma$ here representing both the surface energy density $f_0=-\alpha^2/(2 u)$ and an effective surface tension from spontaneous curvature: $\gamma = -\alpha^2/(2 u) +H_0^2/2$. According to the linear analysis, a preferentially ordered field ($\alpha<0$) has a stabilizing effect on the system via the first term of equation \ref{eq:chi}. While the second term, a negative effective surface tension from the ordered material, can theoretically induce an inverse Plateau-Rayleigh instability, where short-wavelength fluctuations grow to increase surface area, in the regime $|\alpha| \approx c $ studied here the stabilizing effect is dominant. Examples of the effect of order on critical wavenumber are shown as red lines in Figure \ref{fig:fieldeffect}. As reference energies, we calculate the energy a uniformly ordered field $\Psi(\bf{x})=\Psi_0$ and a uniformly isotropic field $\Psi(\bf{x})=0$ would have on a more heavily modulated surface. The energy a spatially uniform ordered field $|\Psi|(\bf{x})= |\Psi_0|= \sqrt{-\alpha/u}$ would have on the modulated surface, including its contribution via $\gamma$, is \begin{equation} \label{eq:uniformordered} \mathcal{H}_I[\Psi(z)=\Psi_0]= 2\chi \int_S dS |A_\theta|^2 - \frac{\alpha^2}{2u}\int_S dS. \end{equation} The energy a uniformly isotropic field would have on the surface shape is \begin{equation} \label{eq:uniformdisordered} \mathcal{H}_I[\Psi(z)=0] = 0. \end{equation} For a range of shape amplitudes $a$, Equation \ref{eq:uniformdisordered} was evaluated using elliptic integrals for the second term and numerical integration for the first term. The above analytic upper bounds describe spatially uniform field configurations. In our simulations, as in reality, the field representing orientational order is free to vary spatially. The field was allowed to converge on energy-minimizing configurations in a series of simulations on fixed surfaces with increasing shape amplitudes. in Figure \ref{fig:energylandscapes}, we compare the resulting energy function $\mathcal{H}_I (a)$ from the series of simulations to the analytic reference energies. Unsurprisingly the energy of the simulated field is lower than that of either of the two uniform reference states. In Figure \ref{fig:energylandscapes}-b, showing the $n=6$ case, stepped energy graphs are again the signature of Type II behavior. Increasing shape amplitude leads to an increase in gradient energy, alleviated by the introduction of additional vortices. The nonmonotonic dependence of field energy on shape amplitude implies that certain surface shapes $(k,a)$ are more compatible with the ordered interfacial layer than others, so that morphologies will be biased towards a discrete set of shapes. Having gathered an array of field energy values $\mathcal{H}_I(k,a)$ from simulation, we combine these with semi-numerically integrated values $\mathcal{H}_E(k, a, H_0)$ from section \ref{chap:external}. To predict the shape of the cylinder, for each point $(k,H_0)$ in parameter space we search numerically for the shape amplitude $a$ minimizing total system energy $\mathcal{H}(a) = \mathcal{H}_I(a)+\mathcal{H}_E(a)$. The energy-minimizing shape amplitude, as a function of wavenumber and spontaneous curvature, is shown in Figure \ref{fig:fieldeffect} as the shaded background. We compare the linear limit of stability predicted by Equation \ref{eq:kc_field}. Below a certain wavenumber $k \approx 0.215$, where behavior is Type I, the linear limit of stability is a good description and the shape transition is continuous. On the other hand at larger $k$, in the regime of Type II behavior, the transition from flat to modulated cylinders induced by increasing $|H_0|$ is discontinuous. In this regime nonlinear effects, namely the emergence of the vortex state, become important. The phenomenon coincides with and adds to the metastability due to nonlinear surface area changes which was already present in the classical Plateau-Rayleigh case; the metastable region is strongly extended and modified by effects of orientational order. \begin{figure} \includegraphics[width=\textwidth]{fig7_H0_fieldST_nocompare_labelled.pdf} \caption{The effect of hexatic order with $c=1$ and $\alpha=-1$ (left) or $\alpha=-4$ (right) on critical wavenumber as a function of spontaneous curvature. The linear limit of stability is suppressed compared to the case with no order (compare Figure 2). There is a qualitative transition from Type I field behavior at low wavenumbers $k_c \lesssim 0.215$, inducing a continuous shape transition adhering to the linear prediction, to Type II behavior at higher wavenumbers. In the latter case the transition is discontinuous and the linear limit of stability gives an incomplete description of the system. We mark the apparent critical endpoints (orange \texttt{x}) seperating continuous from discontinuous transitions. As in Figure \ref{fig:numerical_H0_g0}, there are metastability regions (c) where linear analysis indicates perturbative stability of the unmodulated shape but numerics reveal that the global energy minimum of the system is a modulated shape. In metastable Type II systems, the metastability effect is enhanced by coinciding shape and vortex metastabilities and the ultimately stable state is a vortex configuration on a modulated shape.} \label{fig:fieldeffect} \end{figure} \section{Discussion} Cylinder-like vesicles and emulsion structures may be driven to a pearled morphology by factors such as a spontaneous curvature. For a model system that is topologically simple but highly curved, we showcase significant nonlinear effects of local curvature on $n$-atic order and ultimately the effects of that order on the equilibrium morphology. For shapes close to cylindrical, where we can expand linearly around an unmodulated cylindrical shape, $n$-atic order is trivially uniform and defect free. In the regime of relative high field stiffness studied here, it has a dominant stabilizing effect. However, taking the possibility of externally induced modulated morphologies into account, strongly curvature-coupled order may interact with modulated surface shapes in complex ways. First, on a surface shape with local Gaussian curvatures the isotropic-$n$-atic transition temperature of the ordered material is locally depressed, inducing a banded state of alternating ordered and isotropic regions. Furthermore, at a threshold curvature, interfacial materials can adopt a vortex state, with orientation of order undergoing maximally $\pm N$ full rotations as the field winds around the cylinder azimuthally and with $4N$ defects. By considering the maximal number of rotations that could be induced locally at the location of maximal spin connection, a lower bound, where the onset of the defect state becomes possible, is identified. Simulations reveal the global emergence of a defect state; the complex interplay of a number of factors increases the wavenumber of onset above the lower bound estimate. The delineation into systems which attain a defect state, analogous to a vortex state in Type II superconductor, from those that transition directly from ordered to isotropic fields, is significant for predicting the morphology of cylinders with both spontaneous curvature and order. In the latter case the transition is continuous and the linearized theory is a good description. The system has a critical endpoint at a certain wavenumber and spontaneous curvature, above which nonlinear effects in the interplay between order and curvature strongly influence morphology. Within the regime of excess defects, there are further transitions between discrete states with $4N$ defects, which will bias the spectrum of morphologies towards a discrete spectrum of wavelength-amplitude combinations. In both cases, on certain regions which are more curved in the sense of the spin connection, the isotropic-$n$-atic transition temperature is effectively increased by curvature; order can be thought of as locally `melted' by curvature. Both effects - the quasi-high-temperature phenomenon of decreased order, and the low-temperature state of isolated defects - coexist in the same system. Working with the general formulation of Equation \ref{eq:hi}, rather than a representation as defects in a constant-magnitude field, which is well-suited for the low-temperature regime \cite{foltin2000}, is crucial to revealing this polymorphism. The resulting banded pattern of order and disorder is reminiscent of the banded partitioning of different species of lipid molecule, compatible with different spontaneous curvatures, on modulated cylinders. Cases have been observed experimentally by Yanagisawa et al. \cite{yanagisawa2010} and extensively studied on fixed snowman surfaces by Rinaldin et al \cite{rinaldin2020}. Among other differences the field describing lipid composition has $Z_2$ rather than $O_n$ symmetry and will not form defects. Interestingly it nevertheless has some features in common with the $n$-atic field in its Type I regime. The system has been assumed to obey a Hamiltonian with certain rotational symmetries. i) Mechanical bending rigidities in Equation \ref{eq:hh} are isotropic. In several interesting biological systems, such as cell membranes with curvature-inducing proteins, curvature elasticity can be strongly anisotropic \cite{iglic2005}. ii) In our model the order parameter field is not coupled to any extrinsic or mean curvature terms in Equation \ref{eq:hi}. Such a coupling exists more or less prominently in various systems, from interactions through the bulk phase between hexatically arranged spheres \cite{law2020} to the prominently mean-curvature-inducing properties of ordered domains of inclusions \cite{kralj-iglic2000}. The focus on extrinsic curvature coupling has allowed us to study a field theory with unbroken continuous $O_n$ symmetry and the associated vortex state. The research could be extended to systems where the symmetry is broken by additional curvature coupling terms. We expect intermediate behaviour, with a weakened or absent vortex state. The axisymmetric model is incomplete with respect to the shape deformations examined. It is clear that in a similar experimental system as for cooled emulsion droplets, as in spherical droplets faceting effects will become important where there are defects. A more precise study of the relevant shape variations, beyond the scope of the model surfaces examined here, warrants further study. Representing order of particles and flexible interfaces simultaneously in simulation is an ongoing challenge. With the continuum field representation of orientational order, general principles can be explored. A low-temperature lattice-based Monte Carlo simulation, adapted to include multiple effects of an underlying curved surfaces, was used to obtain field configurations in the low-temperature limit. Due to peculiarities of statistical field theories represented on a non-uniform lattice, further modifications are needed before our simulation can accurately represent fluctuations and extend our exploration to the regime of high-temperature sampling. The model and simulation protocol used here allow an efficient exploration of parameter space: taking advantage of the linearly additive formulation of effects of internal and external energies, the model allows combination in post-processing. Simulation results verify and extend the analytical predictions presented here and, crucially, allow us to delineate regions of parameter space where linearized and quasi-one-dimensional descriptions of the system are sufficient from those where nonlinear effects dominate. \begin{acknowledgments} The authors thank Patrick Warren for useful discussion. This work was undertaken on ARC3, part of the High Performance Computing facilities at the University of Leeds, UK. JK acknowledges, with thanks, funding from the EPSRC through the SOFI (Soft Matter and Functional Interfaces) Centre for Doctoral Training (grant EP/L015536/1). \end{acknowledgments}
{ "redpajama_set_name": "RedPajamaArXiv" }
9,142
Please provide two references. One must be from another transport company or good repute and one from a industry supplier. We ask for references to ensure we only accept reputable operators who will not bring BCA into disrepute.
{ "redpajama_set_name": "RedPajamaC4" }
7,338
Eagle Scout Ceremony – Eagle Scout Court of Honor Eagle Scout Ceremony This Ceremony is meant for Eagle scouts. [Color guard and troop are in position at the rear of room: American flag, troop flag] SPL: [Step to microphone.] Ladies and gentlemen, please rise for the opening ceremonies. [Wait until the audience is standing and quiet.] SPL: Color guard, present the colors. [Color guard enters, _____ and _____ carry flags, ______ (from the back of the room) plays "to the colors". Troop follows color guard] [color guard proceeds to flag stand, troop occupies the front rows. Reserve seats for color guard] [When color guard is in position] SPL: Please Stand. Scout salute. Everyone please join me in the Pledge of Allegiance I pledge Allegiance to the flag of the United States of America, and to the Republic for which it stands, one Nation under God, indivisible, with liberty and justice for all. SPL: Color guard, post the colors. [Wait until posted] SPL: TWO. Color guard retreat. SPL: Scout Sign. Please join me in reciting the Scout Oath On my honor I will do my best To do my duty to God and my country And to obey the Scout Law; To help other people at all times; To keep myself physically strong, mentally awake, and morally straight. SPL: Two. Please be seated. Good afternoon, my name is _________________. As Senior Patrol Leader for Troop _____, it is my pleasure to call this Eagle Court of Honor to order. I would like to introduce today's master of ceremonies, __________. MC: Thank you. As _________ said, my name is ____________ and I have the pleasure of being Master of Ceremonies for this Eagle Court of Honor to recognize the accomplishments of _________________. To earn the highest rank in Scouting, a Scout must spend a great deal of time and effort fulfilling the requirements of rank advancement. ________'s efforts, as well as those of his Scout leaders and his family, culminate today in the presentation of the Eagle Scout Badge. This is an occasion for pride and joy, as well as a time for reflection. At this point I would like to call upon the Troop Chaplain's Aide to ask God's blessing on this ceremony. Troop Chaplain: Lord, you bring us together this afternoon to honor an Eagle Scout. We acknowledge your guidance in helping boys and adults work together to build strong character, physical fitness, ethical leadership and an appreciation of the outdoors. Lord, continue to watch over these boys as they grow into young men, and give the adults the patience and wisdom to guide them. Amen MC: Thank you. Scoutmaster ____________, would you please come forward. SM: Good afternoon Ladies and gentlemen, I am _________________, Scoutmaster of Troop ______. Today I present Eagle Candidate _______________ for Rank of Eagle Scout. SM: As we begin this Eagle Court of Honor and examine the Trail to Eagle, it is fitting that we began today with a Pledge, and an Oath. The Scout Oath that we recited is the guiding principle of Scouting. The Scout Oath sets our internal direction. We declare our duty to God, Country, Others and Self. Also within the Oath are the rules we must follow to provide guidelines to our duties. The rules of Scouting are found in the twelve points of the Scout Law. Let us pay careful attention to the words and re-dedicate ourselves to the principles contained in those words. Scouts, join me in slowly reciting the Scout law. Scout Attention. Scout Sign. [slowly] A scout is trustworthy, loyal, helpful, friendly, courteous, kind, obedient, cheerful, thrifty, brave, clean and reverent. SM: TWO, Thank you, you may take your seats. (As MC lights the unity candle) SM: When a boy joins Boy Scouts, there is within him, something that we call Scout Spirit. The boy may not know that it exists, or know its meaning, but by following the trail to Eagle the scout is enlightened. This single candle, lit before you, represents that spirit. The spirit of Scouting embodies the principles of the Scout Oath and the Scout Law. It becomes a shining beacon of inspiration. Alone, this light may seem feeble, but when multiplied by the millions of youth in Scouting around the world, it is powerful indeed. The pathway to Eagle can be described as a steep trail leading up to three peaks, the highest being that of Eagle Scout. [MC lights the first three rank candles.] The trail starts with the Scout rank and continues through Tenderfoot, Second Class, and First Class ranks. During this phase the scouts are given instruction and guidance. They are learning important outdoor skills, including camping, cooking, physical fitness, and developing teamwork and leadership skills. They are also being examined. The adult leaders are watching to see if the flicker of light representing duty, service and law is growing within. Reaching First Class is an important step in a Scout's life. He has demonstrated that he has met the requirements at each rank and is showing SCOUT SPIRIT. Then, the mountain climbing begins. To begin the climb, the scout is first asked, "Are you ready for the next challenge?" Secondly, "Can you and will you teach others what you have learned?" [MC lights Star and Life candles] The path going forward is marked with Service, Leadership, Exploration and a stronger presence of Scout Spirit. The first peak to be reached is the Star Scout. The scout has provided leadership to his troop, he has provided service to fellow scouts and to his community and he is completing merit badges of personal interest as well as required to reach Eagle. At this point the scout is a Star. He has done well. The scouting family expects the light to be shining more brightly. The second peak is the Life Scout. Again the scout must complete several hours of service to the community. The scout must demonstrate additional leadership within the troop. The scout must complete several merit badges. And finally the scout must have embodied Scout Spirit in his daily life. The badge for Life Scout contains a heart. This is the only rank position that shows what Boy Scouts is all about and what is inside the scout. This rank badge answers these questions: Does he understand his duty? Does he understand the 12 points of the law? What is in this young man's heart? What could be more important than this rank? The third peak is the Eagle Scout. The scout has demonstrated he is a Star and he has embodied the Scout Spirit in his Life. Now he must be tested on one final climb to see if it is truly part of his character. The requirements are much the same as Star and Life, however the scout must now go into the community, plan, organize, coordinate, and deliver a service project; a demonstration of leadership and character. And one final time the scout must go before a group of adults who focus on his character, his leadership and his spirit. Does he reach the peak? Has he proven himself? Can he leap from the top peak and fly like an Eagle? ______, are you an Eagle? [The Challenge Ceremony] MC: Sir, I have the honor to present Life Scout ____________ for the award of Eagle Scout. [Eagle Candidate goes to the front of the room] MC: Awarding the Eagle Badge of rank is an important and serious matter. It is the goal toward which this Scout has been working for several years. It is the culmination of effort by his parents and Scout Leaders. It is an occasion for pride and for joy, but it is also a time for serious contemplation. The Eagle rank is the highest and most coveted award in all of Scouting, and it is the last major step in the advancement program. If at this point, Scouting has not achieved its purpose of building character:then it probably never shall. These thoughts, which are the basic code of Scouting, are well summed up in the pledge that is taken by every Scout in the council upon advancement to Eagle rank. ______, I will read the pledge so that you will know that which you are about to promise, and then I will ask you to repeat it after me. MC: (reads entire pledge) I ________, On my honor, I will do my best, to do my duty to God. On my honor, I will do my best, to do my duty to my country I reaffirm my allegiance to the three promises of the Scout Oath I thoughtfully recognize and take upon myself the obligations and responsibilities of the rank of Eagle Scout. On my honor, I will do my best to make my training an example, and make my rank and influence strongly count for better scouting and for better citizenship, in my troop, in my community, and in my contact with other people, regardless of race, color, or creed. To this, I pledge my sacred honor. MC: Having heard the Eagle pledge, are you willing to adopt it? Eagle Candidate: I am. MC: Raise your hand in the Scout sign and repeat after me. (A Life Scout in the audience stands and interrupts.) LIFE SCOUT: Stop! I challenge the right of this Scout to be awarded the rank of Eagle. MC: Who are you and by what right do you challenge? LIFE SCOUT: I am a Life Scout, and my esteem for the Eagle rank gives me the right to so challenge. MC: On what grounds do you challenge? LIFE SCOUT: Has this Scout achieved the requirements in Scoutcraft and Life Interest? MC: His application was verified, and indicates that he has satisfactorily completed the required 21 merit badges in the various fields of endeavor. Are you satisfied Life Scout? LIFE SCOUT: I am. (A Tenderfoot Scout in the audience stands and interrupts.) TENDERFOOT: I, too, challenge the right of this Scout to be awarded the rank of Eagle TENDERFOOT: I am a Tenderfoot Scout, and the respect that I have for the uniform that I wear gives me the right to so challenge. TENDERFOOT: Does his Scoutmaster certify that this Scout has actively participated in Scouting in his troop. Has he demonstrated leadership, and done his best to help in his home, school, church, and community? SM: As Scoutmaster of Troop ____, I certify that for more than 6 months since attaining the Life rank, this Scout has held leadership positions in his troop. Also, he has been active in school, church, and community activities. MC: Are you now satisfied? TENDERFOOT: I am. (An Eagle Scout in the audience stands and interrupts.) EAGLE SCOUT #1: I too challenge the right of this Scout to be awarded the rank of Eagle. EAGLE SCOUT #1: I am an Eagle Scout, and the pride that I have in this badge that I wear over my heart gives me the right to so challenge. EAGLE SCOUT #1: Has this Scout, nearing the end of the Eagle trail, demonstrated his Scouting Spirit? Has his ability to live and act in accordance with the ideals of Scouting, as exemplified by the Scout Oath, Law, motto, and slogan been noted? The Scout motto is Be Prepared and the Scout slogan is Do a Good Turn Daily. ADVANCEMENT CHAIR: As Advancement Chair I have reviewed this Scout's record. I certify that, after investigation, interview, and examination, the Eagle board of Review is of the opinion that this Scout has demonstrated that he has held to the spirit of Scouting in his daily living. The board recommends his advancement to Eagle. EAGLE SCOUT #1: I am still not satisfied. I believe that this candidate should understand that the Eagle Rank is a big responsibility, as well as an honor. I respectfully ask that this candidate be informed of the responsibilities of an Eagle Scout before continuing further. MC: Thank you, Eagle Scout. I agree with your feelings and suggest that none are more qualified to impart this than those who wear the Eagle Badge. I invite you and your fellow Eagle Scouts, Mr. ______, Mr. ______ and Mr. ______ to the platform. EAGLE SCOUT #1: The first responsibility of an Eagle Scout is to live with honor, which to an Eagle is sacred. Honor is the foundation of character: it is what a person really is, down inside, not what someone may think he is. An Eagle will live so as to reflect credit upon his home, church, school, friends, upon Scouting, and upon himself. May the white of your Eagle badge always remind you to live with honor. EAGLE SCOUT #2: The second responsibility of an Eagle Scout is loyalty, for without loyalty, character lacks direction. An Eagle is loyal to his ideals. May the blue of your Eagle badge always remind you to be loyal. EAGLE SCOUT #3: The third responsibility of an Eagle Scout is courage. Courage gives character force and strength. Trusting in God and with faith in his fellowman, the Eagle faces each day unafraid, and seeks his share of the world's work. May the red of your Eagle badge always remind you of courage. EAGLE SCOUT #4: The final responsibility of an Eagle Scout is service. He extends a helping hand to those who toil up the Scouting trail he has completed, just as others helped him in his achievement of the Eagle rank. The habit of the daily Good Turn must take on new meaning and blossom forth into a life of service. The Eagle protects and defends the weak and the helpless. He aids and comforts the oppressed and the unfortunate. He upholds the rights of others while defending his own. His code of honor is based upon the belief that leadership is founded upon real service. EAGLE SCOUT #1: Mr. Chairman, if this candidate is willing and eager to accept the mantle of responsibility, as well as the honor of the badge, then I will be satisfied and request that you proceed to administer the Eagle pledge. MC: (To Eagle candidate) Are you ready and willing to accept these responsibilities and to adopt the Eagle pledge which I have previously read to you? MC: The Eagle Scout candidate will now take the Eagle Scout pledge. As the candidate takes this pledge for the first time, will those Eagle Scouts who are present in the audience please stand and reaffirm their pledge? Would those in attendance also introduce yourself and when you received your rank of Eagle? (All Eagles stand and introduce themselves) MC: Thank you. And now, Attention. Scout Sign. Eagle Scouts, please repeat after me in unison: On my honor, I will do my best, to do my duty to God. I thoughtfully recognize and take upon myself the obligations and responsibilities of the rank of Eagle Scout. and make my rank and influence strongly count for better scouting and for better citizenship, in my troop, in my community, and in my contact with other people, MC: The Eagle Scouts in the audience may now be seated. Mr. Scoutmaster, will you do the honor of awarding the rank of Eagle to ______? Since the parents and siblings of this Scout have been so instrumental in his attaining the Eagle rank, I will ask that they come to the platform. (pause while they come forward) Mr. _____ will you please assist? (Presentation of Eagle Award) SM: The symbol of your success is the Eagle badge which is now present to your mother. Your mother will, in turn, pin the badge over your heart. (presentation of badge) SM: In recognition of the wisdom and guidance given to you by your father will you present to him this Eagle pin, which he will be proud to wear in your honor. (presentation of fathers pin) SM: And now, also in recognition of the many hours of patient guidance given by her in your efforts, will you pin the Eagle mother's pin on your mother. (presentation of mother's pin) SM: (siblings of Eagle Candidate)___________, will you remove ______'s Troop Neckerchief and dress him in the Eagle Neckerchief. SM: _____, will you present the Eagle Advisor Mentor pin? Eagle Candidate: Mr. _______, thank you for your interest, dedication and patience in assisting me in earning the rank of Eagle Scout. SM: Now, would the audience please rise. By virtue of the authority vested in me by the National Council of the Boy Scouts of America, I hereby present to you our newest Eagle Scout. Let's give him a round of applause. (Pause) At this time I would like to invite ______ to say a few words about his Scouting experience. Eagle Candidate: (give Eagle response. Then invites each adult up that he wants to acknowledge, presents him/her with a gift and asks him/her to say a few words, stay standing next to him. Typically, his Eagle mentor, past scoutmasters, current scoutmaster, other influential adults. If doing the Scoutmaster, then save him/her for last.) SM: says a few words after receiving acknowledgement from Eagle Candidate. SM: _____, the process of earning one's Eagle Scout award does not end with this ceremony. I have confidence that you will honor Scouting with your life and service as today Scouting has honored you. The Eagle soars high and seeks to rest only upon the lofty peaks. As an Eagle Scout, you, too, must soar high. You must not swerve from the path of duty. You must strive to obtain that which is the highest and noblest in life. Eagle Candidate: Thank you, Mr. __________. I will do my best. MC: By the authority vested in me as a representative of Troop ____, I declare that this court of honor is now complete. Mr. SPL, will you close this meeting? SPL: Everyone please rise. Color guard advance. Color guard retire the colors. (color guard exits while bugler plays taps) SPL: This meeting is now adjourned. There will be refreshments served in the back, please stay and enjoy, thank you.
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
1,606
<?php namespace ZendTest\ModuleManager\Listener; use ListenerTestModule; use ModuleAsClass; use Zend\ModuleManager\Listener\ModuleResolverListener; use Zend\ModuleManager\ModuleEvent; /** * @covers \Zend\ModuleManager\Listener\AbstractListener * @covers \Zend\ModuleManager\Listener\ModuleResolverListener */ class ModuleResolverListenerTest extends AbstractListenerTestCase { /** * @dataProvider validModuleNameProvider */ public function testModuleResolverListenerCanResolveModuleClasses($moduleName, $expectedInstanceOf) { $moduleResolver = new ModuleResolverListener; $e = new ModuleEvent; $e->setModuleName($moduleName); $this->assertInstanceOf($expectedInstanceOf, $moduleResolver($e)); } public function validModuleNameProvider() { return [ // Description => [module name, expectedInstanceOf] 'Append Module' => ['ListenerTestModule', ListenerTestModule\Module::class], 'FQCN Module' => [ListenerTestModule\Module::class, ListenerTestModule\Module::class], 'FQCN Arbitrary' => [ListenerTestModule\FooModule::class, ListenerTestModule\FooModule::class], ]; } public function testModuleResolverListenerReturnFalseIfCannotResolveModuleClasses() { $moduleResolver = new ModuleResolverListener; $e = new ModuleEvent; $e->setModuleName('DoesNotExist'); $this->assertFalse($moduleResolver($e)); } public function testModuleResolverListenerPrefersModuleClassesInModuleNamespaceOverNamedClasses() { $moduleResolver = new ModuleResolverListener; $e = new ModuleEvent; $e->setModuleName('ModuleAsClass'); $this->assertInstanceOf(ModuleAsClass\Module::class, $moduleResolver($e)); } public function testModuleResolverListenerWillNotAttemptToResolveModuleAsClassNameGenerator() { $moduleResolver = new ModuleResolverListener; $e = new ModuleEvent; $e->setModuleName('Generator'); $this->assertFalse($moduleResolver($e)); } }
{ "redpajama_set_name": "RedPajamaGithub" }
5,635
Q: Using Visual Studio 2012, the manifest signed, but the assembly is not I am using ClickOnce application deployment, and I just got my code certificate from Verisign. I am using this certificate to sign the manifest. When I download and install the application, the smartscreen comes up with my name on it (lame, but I think this is what is supposed to happen). When the ClickOnce installer completes, the smartscreen comes up again for the execution of the actual application, here it says 'Unknown Publisher'. Does ClickOnce not sign the assemblies by default? How do I do this? Edit: Currently I am letting VS sign my manifest (installer) for the ClickOnce, and I am setting a Post-build event to sign my assembly. But still when I install the application it says 'unknown publisher' when I go to actually run it. A: That does not sound right to me. I have used exactly the same workflow for multiple applications, and it works fine. Most likely there is an issue with your postbuild step. Make sure that you sign EXE file inside the OBJ folder (because that's where ClickOnce takes all the files from) - not the BIN one. Do ClickOnce publishing, go to the OBJ folder, right click on your application.exe file, and select properties. It should have six tabs - the last one being "Digital Signature": If you don't have it, you don't sign your application properly. And here is my postbuild step - note that I sign "RELEASE" configuration only: <Target Name="SignOutput" AfterTargets="CoreCompile" Condition="'$(ConfigurationName)'=='Release'"> <PropertyGroup> <TimestampServerUrl>http://timestamp.verisign.com/scripts/timestamp.dll</TimestampServerUrl> <ApplicationDescription>my app</ApplicationDescription> <SigningCertificateCriteria>/n "my company."</SigningCertificateCriteria> </PropertyGroup> <ItemGroup> <SignableFiles Include="$(ProjectDir)obj\$(ConfigurationName)\$(TargetName)$(TargetExt)" /> </ItemGroup> <Exec Condition=" '$(ConfigurationName)'=='Release'" Command="&quot;c:\Program Files (x86)\Windows Kits\8.0\bin\x64\signtool.exe&quot; sign $(SigningCertificateCriteria) /d &quot;$(ApplicationDescription)&quot; /t &quot;$(TimestampServerUrl)&quot; &quot;%(SignableFiles.Identity)&quot;" /> </Target>
{ "redpajama_set_name": "RedPajamaStackExchange" }
971
Q: How to sort a dict of dicts by the length of a list inside child dicts I'm a beginner to Python, and for the sake of learning I looked up some challenges to solve. Here I have a dictionary, containing several dictionaries, each having a list of dicts. My job here is to sort the dictionary, in descending order, based on which child of somedict has more elements in the key list. somedict = { 'a': { 'key' : [{}, {}, ] }, 'b': { 'key' : [{}, ] }, 'c': { 'key' : [{}, {}, {} ] }, 'd': { 'key' : [{}, {}, {}, ] }, 'e': { 'key' : [{}, {}, {} ] }, 'f': { 'key' : [{}, ] }, 'g': { 'key' : [{}, {} ] }, 'h': { 'key' : [{}, {}, {},] } } sorted_dict = sorted(somedict, key= lambda k: len(k['a']['key'])) print(sorted_dict) While trying to use sorted(), I get: TypeError: string indices must be integers A: Yes, here you go: somedict = { 'a': { 'key' : [{}, {}, ] }, 'b': { 'key' : [{}, ] }, 'c': { 'key' : [{}, {}, {} ] }, 'd': { 'key' : [{}, {}, {}, ] }, 'e': { 'key' : [{}, {}, {} ] }, 'f': { 'key' : [{}, ] }, 'g': { 'key' : [{}, {} ] }, 'h': { 'key' : [{}, {}, {},] } } sorted_dict = sorted(somedict, key=lambda k: len(somedict[k]["key"])) print(sorted_dict) Comparing to your attempt Basically, sorted iterates over the incoming iterable (here a dict) and the custom key parameter gives you the key of the dictionary for each element of that dictionary. Here, the key is called k as a variable name, so the question is: "Given the key of the dictionary, how do we get the length of the underlying list?" In this case, we do that by calling somedict[k] first, and then the key string. This returns the array. We then simply call len(...) on that to get the list length. For example, for the first element in the dict the key is a, so somedict["a"] returns the subdict: {'key' : [{}, {}, ]}. In this subdict, we then extract the value of the key called, also, key - with: somedict["a"]["key"] and get [{}, {}, ]. Then we take the length with len([{}, {}, ]) which is 2. Hope that helps! A: What you need to do is to sort the keys as a list, and from that sorted list rebuild the dict: somedict = { ... # contents elided } keys = list(somedict.keys()) sorted_keys = sorted(keys, key= lambda k: len(somedict[k]['key']), reverse=True) print(sorted_keys) sorted_dict = {k:somedict[k] for k in sorted_keys} print(sorted_dict) Output ['c', 'd', 'e', 'h', 'a', 'g', 'b', 'f'] {'c': {'key': [{}, {}, {}]}, 'd': {'key': [{}, {}, {}]}, 'e': {'key': [{}, {}, {}]}, 'h': {'key': [{}, {}, {}]}, 'a': {'key': [{}, {}]}, 'g': {'key': [{}, {}]}, 'b': {'key': [{}]}, 'f': {'key': [{}]}} And this can be shortened to one line: sorted_dict = {k:somedict[k] for k in sorted(somedict, key=lambda k: len(somedict[k]['key']), reverse=True)} A: The problem is the lambda you are passing for key and since you want to sort it in descending order, you need also to add reverse=True. sorted_keys = sorted(somedict, key=lambda k: len(somedict[k]['key']), reverse=True) print(sorted_keys) # then you can build the sorted dictionary sorted_dict = {k:somedict[k] for k in sorted_keys} print(sorted_dict) Output ['c', 'd', 'e', 'h', 'a', 'g', 'b', 'f'] {'c': {'key': [{}, {}, {}]}, 'd': {'key': [{}, {}, {}]}, 'e': {'key': [{}, {}, {}]}, 'h': {'key': [{}, {}, {}]}, 'a': {'key': [{}, {}]}, 'g': {'key': [{}, {}]}, 'b': {'key': [{}]}, 'f': {'key': [{}]}} A: You almost got it right. Two edits: * *sorted needs the keys and the values of the top-level dict. So you need to pass somedict.items(). This passes a list of tuples with key, value pairs. *in the sorting lambda, you the the key, value tuple as parameter. You are interested in the value (2nd tuple item = index 1) and then the 'key' of it. So I think this does the trick: sorted_dict = sorted(somedict.items(), key=lambda kv: len(kv[1]['key']))
{ "redpajama_set_name": "RedPajamaStackExchange" }
9,021
title: Automatically create pages from components in any directory date: 2018-07-07 image: "gatsbymanor.jpg" # change this to the npm package stats author: "Steven Natera" --- ![Gatsby plugin page creator in the plugin library](plugin-library-page-creator.png) In this article we will talk about a new plugin that lets you auto create pages from components in any folder within your Gatsby project! We will chat about the problem that sparked the creation of this plugin. Afterwards, we'll talk about how you can use this plugin to auto create pages without having to place your components in the **src/pages** directory. # The problem with static values Here's how it all got started. The goal with [Gatsby Manor](https://gatsbymanor.com) (a separate project, unaffiliated with the Gatsby core team) is to create professional designed Gatsby starters to give your site a clean, modern look the moment you create a new project. While creating our newest starter [gatsby-starter-eventually](https://github.com/gatsbymanor/gatsby-starter-eventually), I wanted to organize the directory structure of the project to better fit my workflow. Gatsby core automatically turns React components in **src/pages** into pages. Prior to this plugin, that functionality was only available to Gatsby core as an internal plugin. There was no way to have a different folder automatically create pages from components because the default **src/pages** path was hardcoded. The side effect of this behavior is that you have a hard time creating folder structures that best fit your needs. If you wanted all your JavaScript to live in a single folder, you would lose the ability to automatically create pages. # The community asks for a solution After searching the repo issues for a solution, I found other people having the same problem and asking for a solution. A user in [#2424](https://github.com/gatsbyjs/gatsby/issues/2424) wanted to move their components into a render folder. Another wanted to keep their Gatsby code near non-Gatsby related code but under a different directory name. One [issue](https://github.com/gatsbyjs/gatsby/issues/2514) raised how uncommon this pattern might be when handing a project over to other clients that are less technical. Each issue focused on a valid concern, with many community members in support of a better convention. With this data, I set out to determine a solution for this problem. # The great pull request ![Pull request for gatsby plugin page creator](page-creator-pull-request.png) With the help of Kyle Mathews, he directed me to the location of the internal plugin responsible for the hardcoded behavior. To support the desired functionality, we would have to extract the internal plugin so any project could download the plugin from npm. The next step would be to add a path option that takes a directory path string to denote the directory of components we want to use for automatic page creation. With a development plan in place, I started coding a solution. After a few days of coding, I opened a pull request to add a `gatsby-plugin-page-creator` to the Gatsby Plugin Library. After a few weeks of code reviews the [pull request](https://github.com/gatsbyjs/gatsby/pull/4490) was merged! # How you can use this plugin You can find [`gatsby-plugin-page-creator`](/packages/gatsby-plugin-page-creator/?=page-creator) in the plugin library. Use `yarn add gatsby-plugin-page-creator` to add this into your project. In `gatsby-config.js`, use a JSON object to use the plugin, making sure to add the required path option with your desired directory path. If you need to designate another location for auto page creation, add another entry of the plugin with a different path. You can have unlimited directories responsible for auto page creation. The result is you can keep your project well organized without having to give up on the most powerful out-of-the-box feature. # Community reception ![NPM stats for gatsby plugin page creator](npm-stats.png) This plugin has by far been the most impactful open source contribution I have made to date. As of writing, the newly published Gatsby plugin has been [downloaded more than 50k+ times](https://npm-stat.com/charts.html?package=gatsby-plugin-page-creator&from=2018-06-01&to=2018-07-18) in less than a month! I am happy this plugin is being used by so many developers. What brings me even more joy is that this small contribution will help the Gatsby community continue to deliver an amazing open source tool. # Gatsby Manor [Gatsby Manor](https://gatsbymanor.com) builds professional design Gatsby starters. We make starters to meet your project needs, with new starters added monthly. Can't find a starter you like? Message us and we'll build a starter project for you.
{ "redpajama_set_name": "RedPajamaGithub" }
7,402
Q: Reset the sequence of a serial PK with dynamic SQL How can I dynamically reference a sequence name (as below) using a combination of strings and DECLARE variables? Is the code below proper or is there another method of doing this? My goal is to do this within a DO/BEGIN block for performance reasons plus it'll help me understand plpgsql by doing something useful so I'm not going to mix this in to PHP unless I really have to. DO $$ DECLARE PKEY VARCHAR; BEGIN SELECT pg_attribute.attname INTO PKEY FROM pg_index, pg_class, pg_attribute WHERE pg_class.oid = 'parts1'::regclass AND indrelid = pg_class.oid AND pg_attribute.attrelid = pg_class.oid AND pg_attribute.attnum = any(pg_index.indkey) AND indisprimary; --SELECT setval('parts1_id_seq', (SELECT MAX(pkey) + 1 FROM parts)); SELECT setval('parts1_' || PKEY || '_seq', (SELECT MAX(pkey) + 1 FROM parts)); END; $$ LANGUAGE plpgsql; A: General advice You mentioned it yourself, you just started using Postgres. Yet, you're tackling extremely advanced tasks right away, juggling system catalogs and operating with advanced dynamic SQL to automate things. While your objectives seem reasonable, you still need to start at the basics. There is just too much to explain here. Start with (relevant parts of) the excellent manual. I provided a couple of deep links further down. Answer To get name(s) (and data type(s)) of columns involved in the primary key, rather use this simpler query: SELECT a.attname, format_type(a.atttypid, a.atttypmod) AS data_type FROM pg_index i JOIN pg_attribute a ON a.attrelid = i.indrelid AND a.attnum = ANY(i.indkey) WHERE i.indrelid = 'tbl'::regclass AND i.indisprimary; I updated the Postgres Wiki page where your original query seems to originate from. However, this can return multiple rows, while you only assign a single value. Assuming you have established that we are dealing with a serial type (single column) primary key. Else use similar techniques as laid out in my previous answer to make sure. Then use the dedicated system information function pg_get_serial_sequence() to determine the name of the used sequence, like demonstrated in my previous answer. Per documentation: get name of the sequence that a serial, smallserial or bigserial column uses DO $do$ BEGIN EXECUTE ( SELECT format($$SELECT setval('%s'::regclass, max(%I)) FROM %s$$ , pg_get_serial_sequence(a.attrelid::regclass::text, a.attname) , a.attname , a.attrelid::regclass ) FROM pg_index i JOIN pg_attribute a ON a.attrelid = i.indrelid AND a.attnum = i.indkey[0] WHERE i.indrelid = 'tbl'::regclass AND i.indisprimary ); END $do$ LANGUAGE plpgsql; This builds and executes a query of the form: SELECT setval('tbl_tbl_id_seq'::regclass, max(tbl_id)) FROM tbl; Explain / Advise * *This is advanced stuff and not really suitable for beginners. Messing with system catalogs can go south quickly if you don't know exactly what you are doing. *Stick to legal all-lower case identifiers in Postgres and plpgsql to make your life easier. But never rely on it in dynamic SQL, where you also need to defend against SQL injection at all times. *Making heavy use of format() to build the query string conveniently & safely. *Inside a plpgsql function you cannot call SELECT without assigning the result. You would use PERFORM instead. Details in the manual. I removed that completely, since I reduced everything to a single EXECUTE. *Since the whole operation only makes sense for a single-column primary key, I simplified the JOIN condition to a.attnum = i.indkey[0] Note that pg_index.indkey has the special (internal) type int2vector. Unlike Postgres arrays its index starts with 0, not 1. *Make it a habbit to use dollar-quotes with a token around plpgsql code (including DO statements). This allows to nest simple dollar-quotes like I do in my example. Details: * *Insert varchar with single quotes in PostgreSQL *You only need a single SELECT here: SELECT setval('tbl_tbl_id_seq'::regclass, max(tbl_id)) FROM tbl; instead of: SELECT setval('tbl_tbl_id_seq'::regclass, (SELECT max(tbl_id) FROM tbl)); SQL Fiddle demonstrating a few things. A: Any time you have a SQL statement that it dynamic in nature (can potentially change at runtime) that you need to run inside of a function, you need to run EXECUTE for it. See Section 40.5.4. Executing Dynamic Commands You need to be aware that executing SQL this way CAN be vulnerable to SQL Injection attacks. Your line above: SELECT setval('parts1_' || INTO PKEY || '_seq', (SELECT MAX(pkey) + 1 FROM parts)); Would become (at least, I believe this is the intent) EXECUTE 'SELECT setval(''parts1_' || PKEY || '_seq'', (SELECT MAX(' || pkey || ') + 1 FROM parts))'; * *Note .. the doubled single quote ('') inside a string is treated as a literal single quote (like \') *Note 2 .. to protect against SQL Injection, you can EXECUTE a formatted string (as Erwin Brandstetter has already supplied several examples to you in a different post)
{ "redpajama_set_name": "RedPajamaStackExchange" }
29
Лиштенберг () — коммуна на северо-востоке Франции в регионе Гранд-Эст (бывший Эльзас — Шампань — Арденны — Лотарингия), департамент Нижний Рейн, округ Саверн, кантон Ингвиллер. До марта 2015 года коммуна административно входила в состав упразднённого кантона Ла-Птит-Пьер (округ Саверн). Площадь коммуны — 12,12 км², население — 551 человек (2006) с тенденцией к стабилизации: 559 человек (2013), плотность населения — 46,1 чел/км². Население Население коммуны в 2011 году составляло 566 человек, в 2012 году — 559 человек, а в 2013-м — 559 человек. Динамика населения: Экономика В 2010 году из 361 человек трудоспособного возраста (от 15 до 64 лет) 259 были экономически активными, 102 — неактивными (показатель активности 71,7 %, в 1999 году — 69,6 %). Из 259 активных трудоспособных жителей работал 231 человек (125 мужчин и 106 женщин), 28 числились безработными (14 мужчин и 14 женщин). Среди 102 трудоспособных неактивных граждан 31 были учениками либо студентами, 40 — пенсионерами, а ещё 31 — были неактивны в силу других причин. Примечания Ссылки
{ "redpajama_set_name": "RedPajamaWikipedia" }
5,137
\section{\bf Introduction and modeling \color{black}} In this paper we propose and analyze a model for phase separation and damage in a thermoviscoelastic body, occupying a spatial domain $\Omega \subset \mathbb{R}^d$, where $d\in \{2,3\}$. We shall \color{black} consider here a suitable weak formulation of the following PDE syste \begin{subequations} \label{eqn:PDEsystem-expli} \begin{align} &c_t=\dive(m(c,z)\nabla\mu), \label{e:c-expl-intro}\\ &\begin{aligned} \mu ={}&-\Delta_p(c)+\phi'(c) +\frac12\big(b(c,z) \mathbb{C}(\varepsilon(\mathbf u)-\varepsilon^*(c)):(\varepsilon(\mathbf u)-\varepsilon^*(c))\big)_{,c}-\vartheta+c_t, \end{aligned} \label{e:mu-expl-intro}\\ &z_t+\partial I_{(-\infty,0]}(z_t) -\Delta_p(z)+\partial I_{[0,\infty)}(z) +\sigma'(z)\ni -\frac12 b_{,z}(c,z) \mathbb{C} (\varepsilon(\mathbf u)-\varepsilon^*(c)):(\varepsilon(\mathbf u)-\varepsilon^*(c)) +\vartheta,\label{e:z-expl-intro}\\ &\vartheta_t+c_t\vartheta+z_t\vartheta+\rho\vartheta\dive(\mathbf u_t)-\dive(\mathsf{K}(\vartheta)\nabla\vartheta) =g+|c_t|^2+|z_t|^2+a(c,z)\varepsilon(\mathbf u_t):\mathbb{V}\varepsilon(\mathbf u_t)+m(c,z)|\nabla\mu|^2, \label{e:teta-expl-intro}\\ &\mathbf u_{tt}-\dive\big( a(c,z)\mathbb{V}\varepsilon(\mathbf u_t) + b(c,z) \mathbb{C} (\varepsilon(\mathbf u)-\varepsilon^*(c)) -\rho\vartheta\mathds 1\big)=\mathbf{f} \label{e:u-expl-intro} \end{align} \end{subequations} posed in $\Omega \times (0,T)$. The system couples \begin{itemize} \item[-] the viscous Cahn-Hilliard equation \eqref{e:c-expl-intro}--\eqref{e:mu-expl-intro} ruling the evolution of the concentration $c$; \item[-] the damage flow rule \color{black} \eqref{e:z-expl-intro} for the local proportion of the damage $z$; \item[-] the internal energy balance \eqref{e:teta-expl-intro} for the absolute temperaure $\vartheta$ ; \item[-] the momentum balance \eqref{e:u-expl-intro} describing the dynamics for the displacement $\mathbf u$. \end{itemize} The symbol $(\cdot)_t$ denotes the partial derivative with respect to time. In the Cahn-Hilliard equation \eqref{e:c-expl-intro} $m$ denotes the mobility of the system and $\mu$ the chemical potential, whose expression is \color{black} given in \eqref{e:mu-expl-intro}. There, $\Delta_p(\cdot):= \dive(|\nabla\cdot|^{p-2}\nabla\cdot)$ denotes the $p$-Laplacian, $\phi$ is a mixing potential, $b$ is an elastic coefficient function depending possibly on both $c$ and $z$, $\mathbb{C}$ represents the elasticity tensor, $\varepsilon^*$ a residual strain tensor, and $(\cdot)_{,c}$ the partial derivate with respect to the variable $c$ (with an analogous notation for the \color{black} other variables). In the damage flow rule \color{black} \eqref{e:z-expl-intro} $\partial I_{(-\infty,0]}: \mathbb{R} \rightrightarrows \mathbb{R}$ denotes the subdifferential of the indicator function of the set $(-\infty,0]$, given by \[ \partial I_{(-\infty,0]}(v) = \begin{cases} \{ 0\} & \text{for } v <0, \\ [0,+\infty) & \text{for } v=0 \end{cases} \] while $\partial I_{[0,\infty)}: \mathbb{R} \rightrightarrows \mathbb{R}$ is \color{black} the subdifferential of the indicator function of the set $[0,\infty)$, i.e. \[ \partial I_{[0,\infty)}(z) = \begin{cases} (-\infty, 0]& \text{for } z=0, \\ \{ 0\} & \text{for } z >0\,. \end{cases} \] \color{black} The presence of these two \color{black} maximal monotone \color{black} graphs, enforcing in particular \color{black} the irreversibility of the damage phenomenon, entails the constraint $z(t)\in [0,1]$ for $t\in (0,T)$ as soon as $z(0)\in [0,1]$. This is physically meaningful because $z$ denotes the damage parameter which is set to be equal to $0$ in case the material is completely damaged and it is \color{black} equal to $1$ in the completely safe case, while $z\in (0,1)$ indicates partial damage. The function $\sigma$ in \eqref{e:z-expl-intro} represents a smooth function, possibly non-convex, of the damage variable \color{black} $z$. In the temperature equation \eqref{e:teta-expl-intro}, $\rho$ denotes a positive thermal expansion coefficient, $\mathsf{K}$ the heat conductivity of the system, $g$ a given heat source and $a$ a viscosity coefficient possibly depending on $c$ and $z$, while $\mathbb{V}$ is the viscosity tensor. Finally, in the momentum balance \eqref{e:u-expl-intro} $\mathbf{f}$ denotes a given volume force. We will supplement system \eqref{eqn:PDEsystem-expli} with \color{black} the initial-boundary conditions \begin{subequations} \label{init-bdry-conditions} \begin{align} &c(0)=c^0, &&z(0)=z^0, &&\vartheta(0)=\vartheta^0, &&\mathbf u(0)=\mathbf u^0, &&\mathbf u_t(0)=\mathbf v^0 &&\text{a.e.\ in }\Omega, \label{init-conditions}\\ &\nabla c\cdot { \bf n \color{black}}=0, &&m(c,z)\nabla\mu\cdot { \bf n \color{black}}=0, &&\nabla z\cdot { \bf n \color{black}}=0, &&\mathsf{K}(\vartheta)\nabla\vartheta\cdot { \bf n \color{black} }=h, &&\mathbf u=\mathbf{d} &&\text{a.e. on }\partial\Omega\times(0,T), \label{bdry-conditions} \end{align} \end{subequations} where ${ \bf n \color{black} }$ indicates the outer unit normal to $\partial\Omega$, while $h$ and $\mathbf{d}$ denote, respectively, a given boundary heat source and displacement. The PDE system \eqref{eqn:PDEsystem-expli} may be written in the more compact form \begin{subequations} \label{eqn:PDEsystem} \begin{align} &c_t=\dive(m(c,z)\nabla\mu), \label{e:c}\\ &\mu = -\Delta_p(c)+\phi'(c)+W_{,c}(c,\varepsilon(\mathbf u),z)-\vartheta+c_t, \label{e:mu}\\ &z_t +\partial I_{(-\infty,0]}(z_t) -\Delta_p(z)+\partial I_{[0,\infty)}(z) +\sigma'(z)\ni -W_{,z}(c,\varepsilon(\mathbf u),z)+\vartheta, \label{e:z}\\ &\vartheta_t+c_t\vartheta+z_t\vartheta+\rho\vartheta\dive(\mathbf u_t)-\dive(\mathsf{K}(\vartheta)\nabla\vartheta)=g+|c_t|^2+|z_t|^2+a(c,z)\varepsilon(\mathbf u_t):\mathbb{V}\varepsilon(\mathbf u_t)+m(c,z)|\nabla\mu|^2, \label{e:teta}\\ &\mathbf u_{tt}-\dive\big(a(c,z)\mathbb{V}\varepsilon(\mathbf u_t)+W_{,\varepsilon}(c,\varepsilon(\mathbf u),z)-\rho\vartheta\mathds{1}\big)=\mathbf{f}, \label{e:u} \end{align} \end{subequations} with the following choice of the elastic energy density \begin{equation} \label{elastic-energy} W(c,\varepsilon,z)=\frac 12b(c,z)\mathbb C(\varepsilon-\varepsilon^*(c)):(\varepsilon-\varepsilon^*(c)). \end{equation} The expression \color{black} of $W$ is typically quadratic as a function of the strain tensor $\varepsilon(\mathbf u)$, whereas \color{black} the coefficient $b$ can depend on $c$ and $z$. This accounts for possible inhomogeneity of elasticity on the one hand, and is characteristic for damage on the other hand. Indeed, the natural choice would be that $b$ vanishes \color{black} for $z=0$, i.e.\ when the material is completely damaged. \paragraph{\bf Derivation of the model} Let us briefly discuss the thermodynamically consistent derivation of the PDE-system \eqref{eqn:PDEsystem-expli}. The {\it state variables} that determine the local thermodynamic state of the material and the {\it dissipative variables} whose evolution describes the way along which the system tends to dissipate energy are as follows: \\ {\it State variables} $$ \vartheta, \, c, \, \nabla c, \, \varepsilon(\mathbf u), \, z, \, \nabla z $$ {\it Dissipation variables} $$ \nabla \vartheta, \, c_t, \, \varepsilon(\mathbf u_t), \, z_t $$ By classical principles of thermodynamics, the evolution of the \color{black} system is based on the free energy $\mathscr{F}$ and the pseudopotential of dissipation $\mathscr{P}$, for which we assume the following general form: \begin{align} \tF{c}{z}{\vartheta}{ \varepsilon(\mathbf u)} = \int_\Omega F(c,\nabla c,z,\nabla z,\vartheta, \varepsilon(\mathbf u) )\,\mathrm dx \qquad \text{and} \qquad \tP{\nabla \vartheta}{c_t}{\varepsilon(\mathbf u_t)}{z_t} = \int_\Omega P(\nabla \vartheta, c_t, \varepsilon(\mathbf u_t), z_t) \,\mathrm dx . \end{align} Our evolutionary \color{black} system has been obtained by the principle of virtual power and by balance equations of micro-forces, \color{black} a generalization of the approaches by Fremond \cite{fremond} and Gurtin \cite{gur96}. In addition, we also include temperature-dependent \color{black} effects by means of the balance equation of energy. \\ The system relies altogether on the balance equations of mass, forces, micro-forces and energy:\\ {\it Evolution system} \begin{subequations} \label{eqn:PDEsystem-derivation} \begin{align} \text{Mass balance} \hspace{0.98cm}& \notag\\ c_t + \dive {\BS J} &=0, \label{e:c-deri}\\ \text{Force balance}\hspace{0.9cm}& \notag\\ \mathbf u_{tt} - \dive { {\BS\sigma}} \def\bftau{{\BS\tau}} &= {\mathbf f}, \label{e:u-deri} \\ \text{Micro-force balance} & \notag\\ B - \dive{\bf H}& = 0 ,\\ \Pi - \dive { \BS \xi} &= 0 ,\\ \text{Energy balance} \hspace{0.6cm}& \notag \\ U_t + \dive { \BS q} \;&= g + {\BS\sigma}} \def\bftau{{\BS\tau}: \varepsilon(\mathbf u_t) + \dive ({\bf H}) z_t + {\bf H} \cdot \nabla z_t + \dive ( {\BS \xi}) c_t + { \BS \xi} {\cdot} \nabla c_t - \dive ({\BS J}) \, \mu -{\BS J} \cdot \nabla \mu \end{align} \end{subequations} where the internal energy density is given by $U= F - \vartheta \partial_\vartheta F$.\\ Note that the system is not closed for the variables. Therefore, constitutive laws have to be imposed for the mass flux ${ \bf J}$, the stress tensor ${\BS\sigma}} \def\bftau{{\BS\tau}$, the internal microforce $B$ for $z$, the microstress ${\bf H}$ for $z$, the internal microforce $\Pi$ for $c$, the microstress ${ \BS \xi}$ for $c$ and the heat flux ${\bf q}$.\\ {\it Constitutive relations}\\ Following Fr{\'e}mond's perspective, we assume that the stress tensor ${\BS\sigma}} \def\bftau{{\BS\tau}$, the microforce $B$ and the microstress ${\bf H}$, may be additively decomposed into their non-dissipative and dissipative components, i.e. \begin{align} && & {\BS \sigma }= {\BS \sigma }^{nd} + {\BS \sigma }^d & \text{with}& \qquad {\BS \sigma }^{nd}= \partial_{\varepsilon(\mathbf u)} F , & & {\BS \sigma }^d = \partial_{\varepsilon(\mathbf u_t)} P, & & && &&\\ && &B= B^{nd} + B^d & \text{with}& \qquad B^{nd} \in \partial_z F , & & B^d \in \partial_{z_t} P, & & && &&\\ && & {\bf H} = {\bf H}^{nd} + {\bf H}^d &\text{with} & \qquad {\bf H}^{nd}= \partial_{\nabla z} F , & & {\bf H}^d= \partial_{\nabla z_t} P=0 .& & && & & \end{align} In a similar way, by choosing Gurtin's approach, cf.~\cite{gur96} equations (3.19)-(3.23), we get the constitutive relations: \begin{align} { \bf J}= - m(c,z) \nabla \mu,\qquad\qquad \Pi= \partial_c F + \partial_{c_t} P - \mu, \qquad \qquad {\BS \xi} = \partial_{\nabla c} F. \end{align} The heat source is given by the standard constitutive relation: $$ { \BS q}= - \frac{\partial P}{ \partial \nabla \vartheta}. $$ In the framework of the formulation of the damage and phase separation theory \cite{fremond, gur96}, we choose for our system the following free energy and dissipation potential: \begin{align} \label{free-energy} \tF{c}{z}{\vartheta}{ \varepsilon(\mathbf u)}:={}&\int_\Omega \frac 1p|\nabla c|^p+\frac 1p|\nabla z|^p+W(c,\varepsilon(\mathbf u),z)+\phi(c)+\sigma(z)+ I_{[0,+\infty)}(z)\,\mathrm dx\notag\\ &+\int_\Omega-\vartheta\log\vartheta-\vartheta\big(c+z+\rho\dive(\mathbf u)\big)\,\mathrm dx,\\ \label{dissipation} \tP{\nabla \vartheta}{c_t}{\varepsilon(\mathbf u_t)}{z_t} :={} & \int_\Omega \frac{1}{2} \mathsf{K}(\vartheta)|\nabla\vartheta|^2 + \frac{1}{2} z_t^2 + \frac{1}{2} c_t^2 + \frac{1}{2} a(c,z)\varepsilon(\mathbf u_t):\mathbb{V}\varepsilon(\mathbf u_t) + I_{(-\infty,0]}(z_t) \,\mathrm dx \, . \end{align} \par The first two gradient terms in \eqref{free-energy} represent the nonlocal interactions in phase separation and damage processes. The analytical study of gradient \color{black}}%{\color{magenta} theories \color{black} goes back to \cite{LM89,Mod98}, where phase separation processes were investigated. \color{black} A typical choice for $W$ has been introduced in \eqref{elastic-energy}. The functions $\phi$ and $\sigma$ represent the mixing potentials. The term $\vartheta(c+z+\rho\dive \mathbf u)$ models the phase and thermal expansion processes in the system. It may also be regarded as linear approximation near to the thermodynamical equilibrium. In the following lines we will get further insight into the choices of these functionals. Exploiting \eqref{eqn:PDEsystem-derivation}-\eqref{dissipation} results in system \eqref{eqn:PDEsystem-expli}, for which the Clausius-Duhem inequality is satisfied. As discussed, our approach is based on a gradient theory of phase separation and damage processes due to \cite{fremond,gur96,CH58}. For a non-gradient approach \color{black}}%{\color{magenta} to \color{black} damage models we refer to \cite{FG06, GL09, Bab11}. There, the damage variable $z$ takes \color{black}}%{\color{magenta} only \color{black} two distinct values, i.e. $\{0,1\}$, in contrast to phase-field models where intermediate values $z \in [0,1]$ are also allowed. In addition, the mechanical properties of damage phenomena are described in \cite{FG06, GL09, Bab11} differently. They choose a $z$-mixture of a linearly elastic strong and weak material with two different elasticity tensors. We also refer to \cite{FKS11}, where a non-gradient damage model was studied by \color{black}}%{\color{magenta} means of \color{black} Young measures. \color{black} \paragraph{\bf Mathematical difficulties.} The main mathematical difficulties attached with \color{black} the proof of existence of solutions to such a PDE system are related to the presence of the quadratic dissipative terms on the right-hand \color{black} side in the internal energy balance \eqref{e:teta}, as well as the doubly nonlinear and possibly nonsmooth carachter of the damage relation \eqref{e:z}. This is the reason why we shall \color{black} resort here to a weak solution notion for \eqref{eqn:PDEsystem} coupled with \eqref{init-bdry-conditions}. In this solution concept, partially drawn from \cite{RocRos14}, the Cahn-Hilliard system (\ref{e:c}--\ref{e:mu}) and the balance of forces \eqref{e:u} (read a.e. in $\Omega\times(0,T)$) \color{black} are coupled with an {\sl ``entropic'' formulation} of the heat equation \eqref{e:teta} and a weak formulation of the damage flow rule \eqref{e:z} \color{black} taken from \cite{hk1,hk2}. Let us briefly illustrate them. \color{black} \paragraph{\bf The ``entropic'' formulation of the heat equation.} It consists of \color{black} a {\sl weak entropy inequality} \begin{equation} \label{entropy-ineq-intro} \begin{aligned} &\int_s^t \int_\Omega (\log(\vartheta) + c+z) \varphi_t \, \mathrm{d} x \, \mathrm{d} r - \rho \int_s^t \int_\Omega \dive(\mathbf{u}_t) \varphi \, \mathrm{d} x \, \mathrm{d} r -\int_s^t \int_\Omega \mathsf{K}(\vartheta) \nabla \log(\vartheta) \cdot \nabla \varphi \, \mathrm{d} x \, \mathrm{d} r\\ &\begin{aligned} \leq \int_\Omega (\log(\vartheta(t))+c(t)+z(t)){\varphi(t)} \, \mathrm{d} x &-\int_\Omega (\log(\vartheta(s))+c(s)+z(s)){\varphi(s)} \, \mathrm{d} x\\ &-\int_s^t \int_\Omega \mathsf{K}(\vartheta)|\nabla\log(\vartheta)|^2\varphi\, \mathrm{d} x \, \mathrm{d} r \end{aligned}\\ &\quad-\int_s^t \int_\Omega \left( g +|c_t|^2+ |z_t|^2 + a(c,z) \varepsilon(\mathbf{u}_t):\mathbb{V} \varepsilon(\mathbf{u}_t) + m(c,z)|\nabla \mu|^2\right) \frac{\varphi}{\vartheta} \, \mathrm{d} x \, \mathrm{d} r -\int_s^t \int_{\partial\Omega} h \frac\varphi\vartheta \, \mathrm{d} S \, \mathrm{d} r \end{aligned} \end{equation} required to be valid \color{black} for almost all $0\leq s \leq t \leq T$ and for $s=0$, and for all sufficiently regular and positive test functions $\varphi$, coupled with a {\sl total energy inequality}: \begin{equation} \label{total-enid-intro} \begin{aligned} \tE{c(t)}{z(t)}{\vartheta(t)}{\mathbf u(t)}{\mathbf u_t(t)} \leq{}&\tE{c(s)}{z(s)}{\vartheta(s)}{\mathbf u(s)}{\mathbf u_t(s)}\\ &+ \int_s^t\int_\Omega g \, \mathrm{d} x \, \mathrm{d} r + \int_s^t\int_{\partial\Omega} h \, \mathrm{d} S \, \mathrm{d} r\\ &+ \int_s^t \int_\Omega \mathbf{f} \cdot \mathbf{u}_t \, \mathrm{d} x \, \mathrm{d} r + \int_s^t \int_{\partial\Omega}\big({\boldsymbol{\sigma}} { \bf n \color{black}} \big)\cdot \mathbf{d}_t \, \mathrm{d} S \, \mathrm{d} r, \end{aligned} \end{equation} valid \color{black} for almost all $0\leq s \leq t \leq T$, and for $s=0$, where the total energy $\mathscr E$ is the sum of the internal energy and the kinetic energy, i.e. \begin{align} \tE{c}{z}{\vartheta}{\mathbf u}{\mathbf u_t}:={}&\tU{c}{z}{\vartheta}{\color{black}}%{\color{magenta} \varepsilon(\mathbf u)\color{black}}+\int_\Omega\frac12| \mathbf u_t \color{black}|^2\,\mathrm dx, \label{total-energy} \end{align} being the internal energy $\mathscr{U}$ specified by (cf.~also \eqref{free-energy}): \begin{align} \tU{c}{z}{\vartheta}{\color{black}}%{\color{magenta} \varepsilon(\mathbf u)\color{black}}:={}&\tF{c}{z}{\vartheta}{\varepsilon(\mathbf u)}-\vartheta\cdot\partial_\vartheta\mathscr{F}(c,z,\vartheta,\color{black}}%{\color{magenta} \varepsilon(\mathbf u)\color{black})\notag\\ ={}&\int_\Omega \frac 1p|\nabla c|^p+\frac 1p|\nabla z|^p+W(c,\varepsilon(\mathbf u),z)+\phi(c)+\sigma(z)+ I_{[0,+\infty)}(z)+\vartheta\,\mathrm dx. \end{align} From an analytical viewpoint, observe that the entropy inequality \eqref{entropy-ineq-intro} has the advantage that all the quadratic terms on the right-hand side of \eqref{e:teta} are multiplied by a negative test function, which, together with the fact that we are only requiring an {\sl inequality} and not an equation, will allow \color{black} us to apply upper semicontinuity arguments for \color{black} the limit passage in the time-discrete approximation of system \eqref{eqn:PDEsystem} set up in \color{black} Section~\ref{s:5}. The \color{black} {\sl ``entropic'' formulation}, first introduced in \cite{fei} in the framework of heat conduction in fluids, and then applied to a phase separation \color{black} model \color{black} derived according to \textsc{Fr\'emond}'s approach \cite{fremond} in \cite{fpr09}, has been successively used also in models for different kinds of special materials. Besides the aforementioned work on damage \cite{RocRos14}, we may mention the papers \cite{ffrs}, \cite{frsz1}, and \cite{frsz2} on liquid crystals, \color{black} and more recently the analysis of a model for the evolution of non-isothermal binary incompressible immiscible fluids (cf.\ \cite{ERS}). Let us also mention that other approaches to treat PDE systems with an $L^1$-right-hand side are available in the literature: among others, we refer to \cite{zimmer}, resorting to the notion of {\em renormalized solution}, \color{black} and \cite{roubiSIAM10} where the coupling of rate-independent and thermal processes is considered. The heat equation therein, with an $L^1$-right-hand side, is tackled by means of Boccardo-Gallo\"uet type techniques. \color{black} \paragraph{\bf The weak formulation of the damage flow rule.} Following the lines of \cite{hk1, hk2}, we replace the damage inclusion \eqref{e:z} by the {\sl damage energy-dissipation inequality} \begin{align} &\label{energ-ineq-z-intro} \begin{aligned} \int_s^t \int_{\Omega} |z_t|^2 \, \mathrm{d} x \, \mathrm{d} r & +\int_\Omega\left( \frac1p |\nabla z(t)|^p + \sigma(z(t))\right)\, \mathrm{d} x\\ & \leq\int_\Omega\left( \frac1p |\nabla z(s)|^p+ \sigma(z(s))\right)\, \mathrm{d} x +\int_s^t \int_\Omega z_t \left(- \pd{z}(c,\varepsilon(\mathbf u), z) +\vartheta\right)\, \mathrm{d} x \, \mathrm{d} r, \end{aligned} \end{align} imposed \color{black} for all $t \in (0,T]$, for $s=0$, and for almost all $0< s\leq t$ and the {\sl one-sided variational inequality for the damage process} \begin{align} \label{var-ineq-z-intro} &\begin{aligned} \int_\Omega \Big( z_t \zeta +|\nabla z|^{p-2} \nabla z \cdot \nabla \zeta + \xi \zeta + \sigma'(z(t)) \zeta & + \pd{z}(c,\varepsilon(\mathbf u), z) \zeta -\vartheta \zeta \Big)\,\mathrm{d}x \geq 0 \quad \text{a.e. in\;}\, (0,T), \end{aligned} \end{align} required to be valid \color{black} for all sufficiently regular test functions $\zeta$, where $\xi \in \partial I_{[0,+\infty)}(z)$ $\text{a.e. in\;}\, Q$, and $z(x,t) \in [0,1]$, $z_t(x,t)\in(-\infty,0]$ $ \text{a.e. in\;} Q.$ \paragraph{\bf Entropic weak solutions.} In what follows, we shall refer to the formulation consisting of (\ref{e:c}--\ref{e:mu}), \eqref{e:u}, \eqref{entropy-ineq-intro}, \eqref{total-enid-intro}, \eqref{energ-ineq-z-intro}, \eqref{var-ineq-z-intro}, supplemented with the initial and boundary conditions \eqref{init-bdry-conditions}, as the \emph{entropic weak formulation} of (the initial-boundary value problem for) system \eqref{eqn:PDEsystem}. Let us point out that, in case of regular solutions, it can be seen that the {\sl ``entropic'' formulation} is equivalent to the internal energy balance \eqref{e:teta} (cf.\ Remark \ref{rmk:weak-sol} as well as \cite[Rmk. 2.6]{RocRos14} for more details). Likewise, the {\sl weak formulation of the damage flow rule} would give rise to the damage inclusion \eqref{e:z} for sufficiently regular solutions. \color{black} In this sense, we can observe that our formulation is consistent \color{black} with the PDE system \eqref{eqn:PDEsystem-expli}. \paragraph{\bf Our results and related literature.} In this paper we prove existence of global-in-time entropic weak \color{black} solutions under the following assumptions on the data: \begin{itemize} \item[-] the mixing potential $\phi$ is the sum of a convex possibly non-smooth part and a regular $\lambda$-concave part (cf.~Hyp.~(I)). Hence, both the sum of a logarithmic potential (e.g.~ $(1+c)\log(1+c)+(1-c)\log(1-c)$) or an indicator function (e.g.~$I_{[-1,1]}(c)$) and a smooth concave perturbation (e.g.~$-c^2$) are allowed as choices of $\phi$, cf.\ Remark \ref{rmk:l-convex-splitting} ahead); \color{black} \item[-] the mobility $m$ is a smooth function bounded from below by a positive constant; \item[-] the function $\sigma$ is regular; \item[-] the heat conductivity $\mathsf{K}$ is a continuous function growing like a power of $\vartheta$. This choice is motivated by mathematics, indeed it is needed in order to get suitable estimates \color{black} on the temperature $\vartheta$, but it is also justified by \color{black} the physical behavior of certain materials (cf.~\cite{klein,zr}); \item[-] the function $a$ is bounded away from zero and bounded from above as well as its partial derivatives with respect to both $c$ and $z$. These assumptions are mainly made in order to prevent the full degeneracy of the momentum balance \eqref{e:u} and in order to obtain from it the \color{black} sufficient regularity on $\mathbf u$ needed to handle the nonlinear coupling with the temperature and damage relations. Instead, the coefficient $b$ in the elastic energy density \color{black} \eqref{elastic-energy} can possibly vanish, and both $b$ and the eigenstrain $\varepsilon^*$ are required to \color{black} be sufficiently regular functions; \item[-] the thermal expansion coefficient $\rho$ is assumed to be a positive constant. For more general behavior of $\rho$ possibly depending \color{black} on the damage parameter $z$ the reader can refer to \cite{hr}, while the fact that $\rho$ is chosen to be independent of $\vartheta$ is justified by the fact that we assume \color{black} to have a constant specific heat $c_v$ (equal to 1 in \eqref{e:teta} for simplicity): indeed they are related (by thermodynamical laws) by the relation $\partial_\vartheta c_v=\vartheta\partial_\vartheta\rho$; \item[-] the initial data are taken in the energy space, except for \color{black} the initial displacement and velocity which, jointly \color{black} with the boundary Dirichlet datum for $\mathbf u$, must enjoy \color{black} the regularity needed in order to perform \color{black} elliptic regularity estimates on the momentum balance \eqref{e:u}. \end{itemize} Furthermore, we consider a \emph{gradient theory} for damage. From the physical viewpoint, the term $\frac{1}{p}|\nabla z|^p$ contributing to \eqref{free-energy} models \color{black} nonlocality of the damage process, since the gradient of $z$ accounts for the influence of damage at a material point, undamaged in its neighborhood. The mathematical advantages attached to the presence of this term, and of the analogous contribution $\frac{1}{p}|\nabla c|^p$, are rather obvious. Let us mention that, in fact, throughout the paper we shall assume that the exponent $p$ in \eqref{e:mu} and \eqref{e:z} fulfills $p>d$. This assumption is mainly mathematically motivated by the fact that it ensures that $c$ and $z$ are estimated in $W^{1,p}(\Omega) \subset \mathrm{C}^0 (\overline\Omega)$, and has been adopted for the analysis of other damage models (cf., e.g., \cite{bmr,MieRou06,mrz,krz2}). \par Regarding the previous results on this type of problems in the literature, let us point out that, by now, \color{black} several contributions on systems coupling rate-dependent damage and thermal processes \color{black} (cf., e.g.~\cite{BoBo, RocRos12, RocRos14, hr}) \color{black} as well as rate-dependent damage and phase separation (cf., e.g., \cite{hk1,hk2}) are available in the literature. Up to our knowledge, this is one of the first contributions on the analysis of a model encompassing all of the three processes (temperature evolution, damage, phase separation) in a thermoviscoelastic \color{black} material. Recently, \color{black} a thermodynamically consistent, quite general \color{black} model describing diffusion of a solute or a fluid in a solid undergoing possible phase transformations and rate-independent damage, beside possible visco-inelastic processes, has been studied in \cite{TomRou}. Let us highlight the main difference to our own model: the evolution of the damage process is therein considered \emph{rate-independent}, which clearly affects the weak solution concept adopted in \cite{TomRou}. In particular, we may point out that dealing with a \emph{rate-dependent} flow rule for the damage variable is one of the challenges of our own analysis, \color{black} due to the presence of the quadratic nonlinearity in $\varepsilon(\mathbf u)$ on the right-hand side of \color{black} \eqref{e:z}. \par Let us conclude by mentioning \color{black} some open problems which are currently under study, such as uniqueness of solutions, at least for the isothermal case, and the global-in-time existence analysis for \color{black} the complete damage (degenerating) case, in which the coefficient $a$ in the momentum balance \eqref{e:u} is allowed to vanish in some parts of the domain (cf.~\cite{RocRos12} for the case without phase separation and \cite{hk3} for the isothermal case). \paragraph{\bf Plan of the paper.} In \underline{Section~\ref{s:3}}, after listing all the assumptions on the data of the problem, we rigorously state the \emph{entropic weak} formulation of the problem and give \color{black} the main result of the paper, i.e.\ \color{black} Theorem \ref{thm:1} ensuring the global-in-time existence of entropic weak solutions. \par In \underline{Section~\ref{s:4}} we (formally) derive all the a priori estimates on system \eqref{eqn:PDEsystem} which will be at the core of our existence analysis. \color{black} \par As previously mentioned, Thm.\ \ref{thm:1} is proved by passing to the limit in a carefully devised time-discretization scheme, also coupled with regularization procedures, which could also be of interest in view of possible numerical simulations on the model. To its analysis, the whole \underline{Section \ref{s:5}} is devoted. While postponing more detailed comments on its features, let us mention here that our time-discrete scheme will be {\sl thermodynamically \color{black} consistent}, in that it will ensure the validity of the discrete versions of the entropy and energy inequalities \eqref{entropy-ineq-intro} and \eqref{total-enid-intro}. This will play a crucial role in the limit passage, developed in \underline{Section \ref{s:6}}, where the proof of Theorem \ref{thm:1} will be carried out. \color{black} \section{\bf Weak formulation and statement of the main result} \label{s:3} In this section, first of all we recall some notation and preliminary results that will be used throughout the paper. Next, we list all of the conditions on the nonlinearities featuring in system \eqref{eqn:PDEsystem}, as well as on the data $f,\, g,\, h$ and on the initial data. We are thus in the position to give our notion of weak solution to the initial-boundary value problem for system \eqref{eqn:PDEsystem} and state our main existence result, Theorem \ref{thm:1}. \subsection{\bf Preliminaries} \label{ss:3.1} In what follows, we will suppose that \begin{equation} \label{smoothness-omega} \Omega\subset\mathbb{R}^d, \quad d\in \{2,3\}, \color{black} \ \ \text{is a bounded domain with \color{black} $\mathrm{C}^2$-boundary $\partial\Omega$.} \end{equation} This smoothness requirement will allow us to apply regularity results for elliptic systems, at the basis of a regularity estimate that we shall perform on the momentum equation and that will have a key role in the proof of our existence result for system \eqref{eqn:PDEsystem}. \paragraph{\bf Notation for function spaces, norms, operators} Given a Banach space $X$, we will use the symbol $\pairing{}{X}{\cdot}{\cdot}$ for the duality pairing between $X'$ and $X$. Moreover, we shall denote by ${\rm BV}([0,T];X)$ (by $\mathrm{C}^0_{\mathrm{weak}}([0,T];X)$, respectively), the space of functions from $[0,T]$ with values in $ X$ that are defined at every $t \in [0,T]$ and have bounded variation on $[0,T]$ (and are \emph{weakly} continuous on $[0,T]$, resp.). Let $\Omega \subset \mathbb{R}^d$ be a bounded domain, $d \in \{2,3\}$. We set $Q:= \Omega \times (0,T)$ and $\Sigma:=\partial\Omega\times (0,T)$. We identify both $L^2 (\Omega)$ and $L^2 (\Omega;\R^d)$ with their dual spaces, and denote by $(\cdot,\cdot)$ the scalar product in $\mathbb{R}^d$, by $(\cdot,\cdot)_{L^2(\Omega)}$ both the scalar product in $L^2(\Omega)$ and in \color{black} $L^2 (\Omega;\R^d)$, and by $H_{0}^1(\Omega;\R^d)$, $H_{\mathrm{Dir}}^2(\Omega;\R^d)$ and $ H_N^2(\Omega)$ \color{black} the spaces \begin{align*} &H_{0}^1(\Omega;\R^d):=\big\{\mathbf{v} \in H^1(\Omega;\mathbb{R}^d) \,:\ \mathbf{v}= 0 \ \hbox{ on }\partial\Omega \,\big\}, \text{ endowed with the norm } \| \mathbf{v}\|_{H_0^1(\Omega;\mathbb{R}^d)}^2: = \int_{\Omega} \varepsilon(\mathbf{v}) \colon \varepsilon(\mathbf{v})\,\, \mathrm{d} x, \\ &H_{\mathrm{Dir}}^2(\Omega;\R^d):= H_{0}^1(\Omega;\R^d) \cap H^2(\Omega; \mathbb{R}^d) = \color{black} \big\{\mathbf{v} \in H^2(\Omega;\mathbb{R}^d)\,:\ \mathbf{v} ={0} \ \hbox{ on }\partial\Omega \,\big\},\\ &H_N^2(\Omega):=\big\{v\in H^2(\Omega)\,:\ \partial_n v=0\text{ on }\partial\Omega\big\}. \end{align*} Note that by Korn's inequality $\|\cdot\|_{H_0^1(\Omega;\mathbb{R}^d)}$ is a norm equivalent to the standard one on $H^1(\Omega;\mathbb{R}^d)$. We denote by $\mathcal{D} (\overline Q)$ the space of the $\mathrm{C}^\infty$-functions with compact support on $Q$. For $q\geq 1$ we will adopt the notation \begin{equation} \label{label-added} W_+^{1,q}(\Omega):= \left\{\zeta \in W^{1,q}(\Omega)\, : \ \zeta(x) \geq 0 \quad \text{for a.a.}\, x \in \Omega \right\}, \quad \text{ and analogously for } W_-^{1,q}(\Omega). \end{equation} Finally, throughout the paper we shall denote by the symbols $c,\,c',\, C,\,C'$ various positive constants depending only on known quantities. Furthermore, the symbols $I_i$, $i = 0, 1,... $, will be used as place-holders for several integral terms popping in the various estimates: we warn the reader that we will not be self-consistent with the numbering, so that, for instance, the symbol $I_1$ will occur several times with different meanings. \paragraph{\bf Preliminaries of mathematical elasticity} We postpone to Sec.\ \ref{ss:3.2} the precise statement of all assumptions on the \emph{elastic} contribution $\pd{\varepsilon}(c,\varepsilon(\mathbf u),z)$ \color{black} to the elliptic operator in \eqref{e:u}. Concerning the stiffness tensor $\mathbb{C}$ (we will take the viscosity tensor to be a multiple of $\mathbb{C}$, cf.\ \eqref{eqn:assbV} ahead), \color{black} we suppose that \begin{equation} \label{ass-elas} \mathbb{C}=(c_{ijkh}) \in \mathrm{C}^{1}(\Omega;\mathbb{R}^{d \times d \times d \times d})\, \end{equation} with coefficients satisfying the classical symmetry and ellipticity conditions (with the usual summation convention) \begin{equation} \label{ellipticity} \begin{aligned} c_{ijkh}=c_{jikh}=c_{khij},\qquad \qquad \exists\, \nu_0>0 \,: \quad c_{ijkh} \xi_{ij}\xi_{kh}\geq \nu_0\xi_{ij}\xi_{ij} \ \ \forall\, \xi_{ij}\colon \xi_{ij}= \xi_{ji}. \end{aligned} \end{equation} Observe that with \eqref{ellipticity}, we also encompass in our analysis the case of an anisotropic and inhomogeneous material. Thanks to \eqref{ellipticity} and to the $\mathrm{C}^2$-regularity of $\Omega$ we have the following elliptic regularity result (cf.\ e.g.\ \cite[Lemma~3.2, p.\ 260]{necas}) or \cite[Chap.\ 6, p.\ 318]{Hughes}): \begin{align} \label{cigamma} \begin{aligned} \exists \, c_1,\, c_2>0 \quad \forall\, \mathbf{u} \in H_{\mathrm{Dir}}^2(\Omega;\R^d)\, : \qquad c_{1} \| \mathbf{u} \|_{H^2(\Omega;\mathbb{R}^d)} \leq \|\dive (\mathbb{C}\varepsilon(\mathbf{u}))\|_{L^2(\Omega;\mathbb{R}^d)} \leq c_{2} \| \mathbf{u} \|_{H^2(\Omega;\mathbb{R}^d)}\,. \end{aligned} \end{align} Under the assumption that $\mathbf{u}$ has prescribed \color{black} boundary values $\mathbf{d}\in H^2(\Omega;\mathbb{R}^d)$, i.e. $\mathbf{u}=\mathbf{d}$ a.e. on $\partial\Omega$, we obtain by applying \eqref{cigamma} \color{black} to $\mathbf{u}-\mathbf{d}$ \begin{align} \label{H2reg} \begin{aligned} &\exists \, \widetilde c_1,\, \widetilde c_2>0 \color{black} \quad \forall\, \mathbf{u} \in H^2(\Omega;\mathbb{R}^d) \text{ with }\mathbf{u}=\mathbf{d}\text{ a.e. on }\partial\Omega\, :\\ &\qquad\qquad\widetilde c_{1} \| \mathbf{u} \|_{H^2(\Omega;\mathbb{R}^d)} \leq \|\dive (\mathbb{C}\varepsilon (\mathbf{u}))\|_{L^2(\Omega;\mathbb{R}^d)}+\|\mathbf{d}\|_{H^2(\Omega;\mathbb{R}^d)} \leq \widetilde c_{2}\big(\|\mathbf{u}\|_{H^2(\Omega;\mathbb{R}^d)} + \|\mathbf{d}\|_{H^2(\Omega;\mathbb{R}^d)}\big)\,. \end{aligned} \end{align} \paragraph{\bf Useful inequalities} For later reference, we recall here the Gagliardo-Nirenberg inequality in a particular case: for all $r,\,q\in [1,+\infty],$ and for all $v\in L^q(\Omega)$ such that $\nabla v \in L^r(\Omega)$, there holds \begin{equation} \label{gn-ineq} \|v\|_{L^s(\Omega)}\leq C_{\mathrm{GN}} \|v\|_{W^{1,r}(\Omega)}^{\theta} \|v\|_{L^q(\Omega)}^{1-\theta} \qquad \text{ with } \frac{1}{s}=\theta \left(\frac{1}{r}-\frac{1}{d}\right)+(1-\theta)\frac{1}{q}, \ \ 0 \leq \theta \leq 1, \end{equation} the positive constant $C_{\mathrm{GN}}$ depending only on $d,\,r,\,q,\,\theta$. We will also make use of the following interpolation inequality from \cite[Thm.\ 16.4, p.\ 102]{LM} \begin{align} \label{interpolationIneq} \forall\varrho>0\quad\exists\,C_\varrho>0\quad\forall u\in X:\qquad\|u\|_Y\leq \varrho\|u\|_X+C_\varrho\|u\|_Z, \end{align} where $X\subseteq Y\subseteq Z$ are Banach spaces with compact embedding $X\Subset Y$. Combining this with the compact embedding \begin{equation} \label{dstar} H_{\mathrm{Dir}}^2(\Omega;\R^d) \Subset W^{1,d^\star{-}\eta}(\Omega;\mathbb{R}^d), \quad \text{with } d^{\star}= \begin{cases} \infty & \text{if }d=2, \\ 6 & \text{if }d=3, \end{cases} \quad \text{for all $\eta >0$}, \end{equation} (where for $d=2$ we mean that $H_{\mathrm{Dir}}^2(\Omega;\R^d) \Subset W^{1,q}(\Omega;\mathbb{R}^d)$ for all $1 \leq q <\infty$), we have \begin{equation} \label{interp} \forall\, \varrho>0 \ \ \exists\, C_\varrho>0 \ \ \forall\, \eta>0 \ \ \forall\, \mathbf{u} \in H_{\mathrm{Dir}}^2(\Omega;\R^d)\,: \ \ \|\varepsilon(\mathbf{u})\|_{L^{d^\star{-}\eta}(\Omega; \mathbb{R}^{d\times d}\color{black})}\leq \varrho \|\mathbf{u}\|_{H^2(\Omega; \mathbb{R}^{d}\color{black})}+C_\varrho\|\mathbf{u}\|_{L^2(\Omega; \mathbb{R}^{d}\color{black})}. \end{equation} We also obtain by interpolation \begin{equation} \label{interp2} \forall\, \varrho>0 \ \ \exists\, C_\varrho>0 \ \ \forall\, \eta>0 \ \ \forall\, \mathbf{u} \in H^1(\Omega;\mathbb{R}^d)\,: \ \ \|u\|_{L^{d^\star{-}\eta}(\Omega;\mathbb{R}^d)} \leq \varrho\|\mathbf{u}\|_{H^1(\Omega;\mathbb{R}^d)}+C_\varrho\|\mathbf{u}\|_{L^2(\Omega;\mathbb{R}^d)}. \end{equation} We will also resort to the following \emph{nonlinear} Poincar\'{e}-type inequality (proved in, e.g., \cite[Lemma 2.2]{gmrs}), with $\mathfrak{m}(w)$ the mean value of $w$: \begin{equation} \label{poincare-type} \forall\, q>0 \quad \exists\, C_q >0 \quad \forall\, w \in H^1(\Omega)\, : \qquad \| |w|^{q} w \|_{H^1(\Omega)} \leq C_q (\| \nabla (|w|^{q} w )\|_{L^2(\Omega)} + |\mathfrak{m}(w)|^{q+1})\,. \end{equation} \subsection{Assumptions} \label{ss:3.2} We now collect all the conditions on the functions $\phi,\,m,\,\sigma,\,\mathsf{K},\,a,\,W,\,\mathbb{V}$ in system \eqref{eqn:PDEsystem}. \par \noindent \textbf{Hypothesis (I).} Concerning the potential $\phi$ for the concentration variable $c$, we require that \begin{equation} \label{potential-phi} \begin{gathered} \phi = \widehat{\beta} + \gamma \quad \text{with } \widehat{\beta}: \mathbb{R} \to [0,+\infty] \text{ proper, convex, and l.s.c., with } \widehat{\beta}(0)=0, \text{ and } \\ \gamma \in \mathrm{C}^1(\mathbb{R}), \qquad \gamma \text{ $\lambda_{\gamma}$-concave for some $\lambda_{\gamma}\geq0$, and } \\ \text{such that } \exists\, C_\phi \in \mathbb{R}\, \ \forall\, c \in \mathrm{dom}(\phi) : \ \ \phi(c) \geq C_\phi\,. \end{gathered} \end{equation} In what follows, we will denote the convex-analysis subdifferential $\partial\widehat{\beta}:\mathbb{R} \rightrightarrows \mathbb{R}$ by $\beta$, and by $\mathrm{dom}(\beta)$ the set $\{ c \in \mathbb{R}\, : \ \beta(c)\neq \emptyset\}$. From $0\in \mathrm{Argmin}_{r\in \mathbb{R}} \widehat{\beta}(r)$, it follows that $0\in \beta(0)$. \color{black} \begin{remark}[Consequences of Hypothesis (I)] \upshape \label{rmk:l-convex-splitting} For later use we observe that, since the map $c \mapsto \gamma(c) - \lambda_\gamma\tfrac{c^2}{2}$ is concave, we have the following convex-concave decomposition for $\phi$: \begin{equation} \label{decomposition} \phi(c)= \ddd{\widehat{\beta}(c) + \lambda_\gamma \frac{c^2}{2}}{convex}{} + \ddd{\gamma(c) - \lambda_\gamma\frac{c^2}{2}}{concave}{}\,. \end{equation} \end{remark} \begin{example} \label{ex:phi} \upshape Admissible choices for $\widehat\beta$ are both the physically meaningful potentials $\widehat\beta(c)=(1+c)\log(1+c)+(1-c)\log(1-c)$ and $\widehat\beta(c)=I_{[-1,1]}(c)$, while $\gamma$ can be a general smooth concave perturbation, e.g.~$\gamma(c)=-\lambda_\gamma c^2$. \color{black} \end{example} \par\noindent \textbf{Hypothesis (II).} As for the nonlinear functions $m$ and $\sigma$, we suppose that \begin{align} &m \in \mathrm{C}^1 (\mathbb{R}\times\mathbb{R}) \ \text{ and } \ \exists\, m_0>0 \ \forall\, (c,z) \in \mathbb{R}\times \mathbb{R} \, : \ m(c,z) \geq m_0, \label{hyp-m}\\ &\sigma \in \mathrm{C}^2 (\mathbb{R}). \label{hyp-sigma} \end{align} \par \noindent \textbf{Hypothesis (III)} The heat conductivity function \begin{align} \label{hyp-K} \begin{gathered} \mathsf{K}:[0,+\infty)\to(0,+\infty) \ \text{ is continuous and}\\ \exists \, c_0, \, c_1>0 \quad\exists\kappa>1 \ \ \forall\vartheta\in[0,+\infty)\, :\quad c_0 (1+ \vartheta^{\kappa}) \leq \mathsf{K}(\vartheta) \leq c_1 (1+\vartheta^{\kappa})\,. \end{gathered} \end{align} We will denote by $\widehat{\mathsf{K}}$ the primitive $\widehat{\mathsf{K}} (x):= \int_0^x \mathsf{K}(r) \, \mathrm{d} r $ of $\mathsf{K}$. \par \noindent \textbf{Hypothesis (IV).} We require \begin{align} \label{data-a} \begin{aligned} a \in \mathrm{C}^1(\mathbb{R}\times\mathbb{R}) \quad\text{ and }\quad &\exists\, a_0,a_1>0 \quad\forall c, z\in \mathbb{R}\, : \quad &&a_0\leq a(c,z) \leq a_1,\\ &\exists\, a_2>0 \quad\forall c, z\in \mathbb{R}\, : \quad &&|a_{,c}(c,z)|+|a_{,z}(c,z)|\leq a_2. \end{aligned} \end{align} \noindent \textbf{Hypothesis (V).} We suppose that \begin{align} \label{eqn:assumptionW} W(x,c,\varepsilon,z)=\frac12 b(c,z)\mathbb{C}(x)(\varepsilon-\varepsilon^*(c)):(\varepsilon-\varepsilon^*(c)), \end{align} where we recall that $b(c,z)$ models the influence of the concentration and damage on the stiffness tensor $\mathbb{C}$ and $\varepsilon^*$ models the eigenstrain. We assume \begin{align}\label{eqn:assbV} &\varepsilon^*\in \mathrm{C}^2(\mathbb{R}),\qquad b\in \mathrm{C}^2(\mathbb{R}\times\mathbb{R}) \quad\text{ and }\quad\exists\, b_0>0\quad\forall c, z\in \mathbb{R}\, : \quad 0\leq b(c,z)\leq b_0,\quad \mathbb{V}=\omega\mathbb{C}, \quad\omega>0. \color{black} \end{align} The tensor function $\mathbb{C}$ should satisfy conditions \eqref{ass-elas} and \eqref{ellipticity}. Let us mention in advance that the last condition on $\mathbb{V}$ will play a crucial role in the proof of $H^2(\Omega;\mathbb{R}^d)$-regularity for the discrete displacements, cf.\ Lemma \ref{lemma:4.16} ahead. \color{black} \par For notational convenience, from now on we shall neglect the $x$-dependence of $W$. For later reference, we observe that \begin{equation} \label{later-ref} \begin{aligned} & W_{,c}(c,\varepsilon,z) = \frac12 b_{,c}(c,z) \mathbb{C}(\varepsilon-\varepsilon^*(c)):(\varepsilon-\varepsilon^*(c)) - \color{black} b(c,z) (\varepsilon^*)'(c) \mathbb{C} :(\varepsilon-\varepsilon^*(c)), \\ & \begin{aligned} W_{,cc}(c,\varepsilon,z) = & \frac12 b_{,cc}(c,z) \mathbb{C}(\varepsilon-\varepsilon^*(c)):(\varepsilon-\varepsilon^*(c)) - b_{,c}(c,z) (\varepsilon^*)'(c) \mathbb{C} :(\varepsilon-\varepsilon^*(c)) \\ & -b(c,z) (\varepsilon^*){''}(c) \mathbb{C} :(\varepsilon-\varepsilon^*(c)) + b(c,z) (\varepsilon^*)'(c) \mathbb{C} :(\varepsilon^*)'(c), \end{aligned} \\ & W_{,z}(c,\varepsilon,z) = \frac12 b_{,z}(c,z) \mathbb{C}(\varepsilon-\varepsilon^*(c)):(\varepsilon-\varepsilon^*(c)), \\ & W_{,zz}(c,\varepsilon,z) = \frac12 b_{,zz}(c,z) \mathbb{C}(\varepsilon-\varepsilon^*(c)):(\varepsilon-\varepsilon^*(c)), \\ & W_{,\varepsilon}(c,\varepsilon,z) = b(c,z) \mathbb{C}(\varepsilon-\varepsilon^*(c)), \\ & W_{,\varepsilon c }(c,\varepsilon,z) = b_{,c}(c,z) \mathbb{C}(\varepsilon-\varepsilon^*(c)) - b(c,z) (\varepsilon^*)'(c)\mathbb{C}, \\ & W_{,\varepsilon z }(c,\varepsilon,z) = b_{,z}(c,z) \mathbb{C}(\varepsilon-\varepsilon^*(c))\,. \end{aligned} \end{equation} \color{black} Finally, we will suppose throughout the work that $p>d$ and that the data $\mathbf{d}$, $\mathbf{f}$, $g$, and $h$ comply with \begin{subequations} \label{hyp:data} \begin{align} &\mathbf{d}\in H^1(0,T;H^2(\Omega;\mathbb{R}^d))\cap W^{1,\infty}(0,T;W^{1,\infty}(\Omega;\mathbb{R}^d))\cap H^2(0,T;H^1(\Omega;\mathbb{R}^d)), \label{dirichlet-data}\\ &\mathbf{f}\in L^2(0,T;L^2 (\Omega;\R^d)), \label{bulk-force}\\ &g \in L^1(0,T;L^1(\Omega)) \cap L^2 (0,T; H^1(\Omega)'),\quad g\geq 0 \quad\hbox{a.e. in }Q\,, \label{heat-source}\\ &h \in L^1 (0,T; L^2(\partial \Omega)), \quad h \geq 0 \quad\hbox{a.e. in }\Sigma\,, \label{dato-h} \end{align} \end{subequations} and that the initial data fulfill \begin{subequations} \label{h:initial} \begin{align} &c^0\in W^{1,p}(\Omega),\quad \widehat{\beta}( c^0\color{black}) \in L^1(\Omega), \quad \mathfrak{m}_0:= \mathfrak{m}( c^0\color{black}) \text{ belongs to the interior of } \mathrm{dom}(\beta), \label{data_c}\\ &z^0\in W^{1,p}(\Omega),\quad 0 \leq z^0 \leq 1 \text{ in }\Omega, \label{data_z}\\ &\vartheta^0 \in L^{1}(\Omega), \quad \log\vartheta^0\in L^1(\Omega),\quad\exists\, \vartheta_*>0\,: \;\vartheta^0\geq\vartheta_*>0\;\text{a.e. in\;}\Omega, \label{data_teta}\\ &\mathbf u^0\in H^2(\Omega;\mathbb{R}^d)\text{ with }\mathbf u^0=\mathbf{d}(0)\;\text{ a.e. on }\partial\Omega, \label{data_u}\\ &\mathbf v^0\in H^1(\Omega;\mathbb{R}^d). \label{data_v} \end{align} \end{subequations} \begin{remark} \upshape \label{rmk:on-init-data} Let us point out explicitly that, if we choose \color{black} $\phi$ as the logarithmic potential from Example \ref{ex:phi}, or with $\phi$ given by the sum $I_{[0,1]} + \gamma$, we enforce \color{black} the (physically meaningful) property that $c \in (0,1)$ ($c\in [0,1]$, respectively) in $\Omega$. From \eqref{data_c} we read that this constraint has to be enforced on the initial datum $ c^0$ \color{black} as well, in the same was as we require $ z^0\in [0,1]$ \color{black} with \eqref{data_z}. The latter condition, combined with the information that $z(\cdot,x)$ is nonincreasing for almost all $x\in\Omega$ thanks to the term $\partial I_{(-\infty,0]}(z_t)$ in \eqref{e:z}, will yield that the solution component $z$ is in $[0,1]$ a.e.\ in $Q$. This property, albeit \color{black} not needed for the analysis of \eqref{eqn:PDEsystem}, is in accordance with the physical meaning of the damage parameter. Clearly, in the case the concentration variable $c$ is forced to vary between two fixed values, and $z$ is forced to be in $[0,1]$, values of the functions $m$, $\sigma$, $a$ and $b$ outside these \color{black} ranges do not affect the PDE system. \end{remark} \subsection{Entropic solutions and main result} \label{ss:3.3} Prior to the precise statement of our weak solution notion for the initial-boundary value problem for system \eqref{eqn:PDEsystem}, we shortly introduce and motivate its main ingredients, namely a suitable weak formulation of the flow rule \eqref{e:z} for the damage variable and the ``entropic'' formulation of the heat equation \eqref{e:teta}. To them, the standard weak formulation of the Cahn-Hilliard equation, and the pointwise (a.e.\ in $Q$) momentum equation will be coupled. \\ \paragraph{\bf Entropy and total energy inequalities for the heat equation} Along the footsteps of \cite{fei, fpr09}, cf.\ also \cite{RocRos14} in the case of a PDE system in thermoviscoelasticity, we will weakly formulate \eqref{e:teta} by means of an ``entropy inequality'', and of a ``total energy (in)equality''. The former is obtained by testing \eqref{e:teta} by $\varphi/\vartheta$, with $\varphi$ a \emph{positive} \color{black} smooth test function. Integrating over space and time leads to \begin{equation} \label{later-4-comparison} \begin{aligned} & \begin{aligned} \int_0^T \int_\Omega \big(\partial_t \log(\vartheta) + c_t + z_t & + \rho \mathrm{div}(\mathbf{u}_t) \big) \varphi \, \mathrm{d} x \, \mathrm{d} t +\int_0^T \int_\Omega \mathsf{K}(\vartheta) \nabla \log(\vartheta)\cdot\nabla \varphi \, \mathrm{d} x \, \mathrm{d} t \\ & - \int_0^T \int_\Omega \mathsf{K}(\vartheta) \frac{\varphi}{\vartheta} \nabla \log(\vartheta) \cdot \nabla \vartheta \, \mathrm{d} x \, \mathrm{d} t \end{aligned} \\ & = \int_0^T \int_\Omega \big(g + |c_t|^2+ |z_t|^2 + a(c,z) \varepsilon(\mathbf{u}_t):\mathbb{V} \varepsilon(\mathbf{u}_t) + m(c,z)|\nabla \mu|^2\big) \frac\varphi\vartheta \, \mathrm{d} x \, \mathrm{d} t + \int_0^T \int_{\partial\Omega} h \frac\varphi\vartheta \, \mathrm{d} S \, \mathrm{d} t \end{aligned} \end{equation} for all $\varphi \in \mathcal{D}(\overline Q)$. Then, the entropy inequality \eqref{entropy-ineq} below follows. The total energy inequality (cf.\ the forthcoming \eqref{total-enid}) associated with system \eqref{eqn:PDEsystem} corresponds to its standard \emph{energy} estimate. Formally, it is indeed obtained by testing \eqref{e:c} by $\mu$, \eqref{e:mu} by $c_t$, \eqref{e:z} by $z_t$, \eqref{e:teta} by $1$, and \eqref{e:u} by $\mathbf{u}_t$, and it features the total energy \eqref{total-energy} of the system.\\ \paragraph{\bf Weak flow rule for the damage parameter} We will adopt the solution notion from \cite{hk1,hk2}, which can be motivated by observing that, due to the convexity of $I_{(-\infty,0]}$, the flow rule \color{black} \eqref{e:z} reformulates as $z_t\leq 0$ a.e.\ in $Q$ and \begin{subequations} \label{ineq-system} \begin{align} \label{ineq-system2} \Big( z_t-\Delta_p(z)+\xi + \sigma'(z)+\pd{z}(c,\varepsilon(\mathbf u),z)-\vartheta\Big) \zeta \geq{}& 0\quad && \qquad \text{a.e. in\;} Q,\text{ for all } \zeta \leq 0, \\ \label{ineq-system3} \Big( z_t-\Delta_p(z)+ \xi + \sigma'(z) +\pd{z}(c,\varepsilon(\mathbf u),z) -\vartheta\Big) z_t \leq{}& 0 && \qquad \text{a.e. in\;} Q, \end{align} \end{subequations} with $\xi \in \partial I_{[0,+\infty)}(z)$ in $\Omega \times (0,T)$. Our weak formulation of \eqref{e:z} in fact consists of the condition $z_t \leq 0$, of the integrated version of \eqref{ineq-system2}, with negative test functions from $W^{1,p}(\Omega)$, and of the \emph{damage energy-dissipation} inequality obtained by integrating \eqref{ineq-system3}. We are now in the position to give the following notion of weak solution: \begin{definition}[Entropic weak formulation] \label{def-entropic} Given data $(\mathbf{d}, \mathbf{f}, g, h)$ fulfilling \eqref{hyp:data} and initial values \linebreak $ (c^0,z^0,\vartheta^0,\mathbf u^0,\mathbf v^0) $ \color{black} fulfilling \eqref{h:initial}, we call a quintuple $(c, \mu, z,\vartheta,\mathbf u)$ an \emph{entropic weak solution} to the PDE system \eqref{eqn:PDEsystem}, supplemented with the initial and boundary conditions \eqref{init-bdry-conditions}, if \begin{align} &c\in L^\infty(0,T;W^{1,p}(\Omega))\cap H^1(0,T;L^2(\Omega)),\, \Delta_p(c)\in L^2(0,T;L^2(\Omega)),\label{reg-c}\\ &\mu\in L^2(0,T;H_N^2(\Omega)),\label{reg-mu}\\ &z\in L^\infty(0,T;W^{1,p}(\Omega))\cap H^1(0,T;L^2(\Omega)),\label{reg-z}\\ &\vartheta\in L^2(0,T;H^1(\Omega))\cap L^\infty(0,T;L^1(\Omega)),\, \vartheta^{\frac{\kappa+\alpha}{2}}\in L^2(0,T;H^1(\Omega))\text{ for all }\alpha\in(0,1), \label{reg-teta}\\ &\mathbf u\in H^1(0,T; H^2(\Omega;\mathbb{R}^d))\cap W^{1,\infty}(0,T; H^1(\Omega;\mathbb{R}^d))\cap H^2(0,T;L^2(\Omega;\mathbb{R}^d)),\label{reg-u} \end{align} and subgradients (specified in \eqref{eta-beta} and \eqref{xi-def} below) \begin{align} &\eta \in L^2(0,T;L^2(\Omega)),\\ &\xi \in L^2(0,T;L^2(\Omega)), \end{align} where $(c,z,\vartheta,\mathbf{u})$ comply the initial conditions (note that the initial condition for $\vartheta$ is implicitly formulated in \eqref{total-enid} below) \begin{align} \label{better-init} &&&c(0)=c^0, &&z(0)=z^0, &&\mathbf u(0)=\mathbf u^0, &&\mathbf u_t(0)=\mathbf v^0 &&\text{a.e. in }\Omega,&& \end{align} the Dirichlet condition \begin{align} \label{boundary-cond} \mathbf{u}=\mathbf{d}\quad\text{ a.e. on }\partial\Omega\times(0,T) \end{align} and the following relations: \color{black} \begin{itemize} \item[(i)] Cahn-Hilliard system: \begin{align} c_t={}&\dive(m(c,z)\nabla\mu) &&\text{a.e. in\;}\, Q,\label{ch-1}\\ \mu ={}&-\Delta_p(c)+\eta + \gamma'(c)+W_{,c}(c,\varepsilon(\mathbf u),z)-\vartheta+c_t &&\text{a.e. in\;}\, Q,\label{ch-2}\\ \eta \in{}& \partial \hat{\beta}(c) \color{black} &&\text{a.e. in\;}\, Q; \label{eta-beta} \end{align} \item[(ii)] balance of forces: \begin{align} &\mathbf u_{tt}-\dive{\boldsymbol{\sigma}}=\mathbf{f} &&\qquad\qquad\text{a.e. in\;}\, Q, \label{momentum-a.e.}\\ &{\boldsymbol{\sigma}}=a(c,z)\mathbb{V}\varepsilon(\mathbf u_t)+W_{,\varepsilon}(c,\varepsilon(\mathbf u),z)-\rho\vartheta\mathds 1 &&\qquad\qquad\text{a.e. in\;}\, Q; \label{stress-tensor} \end{align} \item[(iii)] weak formulation of the damage flow rule:\\ {\sl damage energy-dissipation inequality} for all $t \in (0,T]$, for $s=0$, and for almost all $0< s\leq t$ \begin{align} &\label{energ-ineq-z} \begin{aligned} \int_s^t \int_{\Omega} |z_t|^2 \, \mathrm{d} x \, \mathrm{d} r & +\int_\Omega\left( \frac1p |\nabla z(t)|^p + \sigma(z(t))\right)\, \mathrm{d} x\\ & \leq\int_\Omega\left( \frac1p |\nabla z(s)|^p+ \sigma(z(s))\right)\, \mathrm{d} x +\int_s^t \int_\Omega z_t \left(- \pd{z}(c,\varepsilon(\mathbf u), z) +\vartheta\right)\, \mathrm{d} x \, \mathrm{d} r \end{aligned} \end{align} and the {\sl one-sided variational inequality for the damage process} \begin{align} \label{var-ineq-z} &\begin{aligned} \int_\Omega \Big( z_t \zeta +|\nabla z|^{p-2} \nabla z \cdot \nabla \zeta + \xi \zeta + \sigma'(z(t)) \zeta & + \pd{z}(c,\varepsilon(\mathbf u), z) \zeta -\vartheta \zeta \Big)\,\mathrm{d}x \geq 0 \\ & \text{for all } \zeta\in W_-^{1,p}(\Omega), \quad \text{a.e. in\;}\, (0,T), \end{aligned} \end{align} wher \begin{align} \xi \in \partial I_{[0,+\infty)}(z)\qquad\text{a.e. in\;}\, Q, \label{xi-def} \end{align} as well as the constraints \begin{align} \label{constraint-chit} &z \in [0,1],\qquad z_t\in(-\infty,0] \qquad \text{a.e. in\;} Q; \end{align} \item[(iv)] strict positivity and entropy inequality: \begin{equation} \label{strict-pos-teta} \exists\,\underline{\vartheta}>0 \ \text{for a.a.}\, (x,t) \in Q\, : \ \ \vartheta(x,t)\geq \underline{\vartheta}>0 \end{equation} and for almost all $0\leq s \leq t \leq T$, and for $s=0$ the entropy inequality holds: \begin{equation} \label{entropy-ineq} \begin{aligned} &\int_s^t \int_\Omega (\log(\vartheta) + c+z) \varphi_t \, \mathrm{d} x \, \mathrm{d} r - \rho \int_s^t \int_\Omega \dive(\mathbf{u}_t) \varphi \, \mathrm{d} x \, \mathrm{d} r -\int_s^t \int_\Omega \mathsf{K}(\vartheta) \nabla \log(\vartheta) \cdot \nabla \varphi \, \mathrm{d} x \, \mathrm{d} r\\ &\begin{aligned} \leq \int_\Omega (\log(\vartheta(t))+c(t)+z(t)){\varphi(t)} \, \mathrm{d} x &-\int_\Omega (\log(\vartheta(s))+c(s)+z(s)){\varphi(s)} \, \mathrm{d} x\\ &-\int_s^t \int_\Omega \mathsf{K}(\vartheta)|\nabla\log(\vartheta)|^2\varphi\, \mathrm{d} x \, \mathrm{d} r \end{aligned}\\ &\quad-\int_s^t \int_\Omega \left( g +|c_t|^2+ |z_t|^2 + a(c,z) \varepsilon(\mathbf{u}_t):\mathbb{V} \varepsilon(\mathbf{u}_t) + m(c,z)|\nabla \mu|^2\right) \frac{\varphi}{\vartheta} \, \mathrm{d} x \, \mathrm{d} r -\int_s^t \int_{\partial\Omega} h \frac\varphi\vartheta \, \mathrm{d} S \, \mathrm{d} r \end{aligned} \end{equation} for all $\varphi \in \mathrm{C}^0 ([0,T]; W^{1,d+\epsilon}(\Omega)) \cap H^1 (0,T; L^{({d^\star})'}(\Omega))$ for some $\epsilon>0$, with $\varphi \geq 0$; \item[(v)] total energy inequality for almost all $0\leq s \leq t \leq T$, and for $s=0$: \begin{equation} \label{total-enid} \begin{aligned} \tE{c(t)}{z(t)}{\vartheta(t)}{\mathbf u(t)}{\mathbf u_t(t)} \leq{}&\tE{c(s)}{z(s)}{\vartheta(s)}{\mathbf u(s)}{\mathbf u_t(s)}\\ &+ \int_s^t\int_\Omega g \, \mathrm{d} x \, \mathrm{d} r + \int_s^t\int_{\partial\Omega} h \, \mathrm{d} S \, \mathrm{d} r\\ &+ \int_s^t \int_\Omega \mathbf{f} \cdot \mathbf{u}_t \, \mathrm{d} x \, \mathrm{d} r + \int_s^t \int_{\partial\Omega}\big({\boldsymbol{\sigma}} { \bf n \color{black}} \big)\cdot \mathbf{d}_t \, \mathrm{d} S \, \mathrm{d} r, \end{aligned} \end{equation} where for $s=0$ we read $\vartheta(0)= \vartheta^0$, and $\mathscr{E}$ is given by \eqref{total-energy}. \color{black} \end{itemize} \end{definition} \noindent \begin{remark} \label{rmk:weak-sol} A few comments on Definition \ref{def-entropic} are in order: \begin{itemize} \item[--] First of all, observe that inequalities \eqref{var-ineq-z} and \eqref{energ-ineq-z} yield the \emph{damage variational inequality} (with $\xi$ fulfilling \eqref{xi-def}) \begin{equation} \label{dam-var-ineq} \begin{aligned} \int_s^t\int_\Omega|\nabla z|^{p-2}\nabla z\cdot\nabla\zeta \, \mathrm{d} x \, \mathrm{d} r & -\int_\Omega\frac 1p|\nabla z(t)|^p \, \mathrm{d} x +\int_\Omega\frac 1p|\nabla z(s)|^p \, \mathrm{d} x\\ &+\int_s^t\int_\Omega\Big(z_t(\zeta-z_t)+\sigma'(z)(\zeta-z_t)+\xi(\zeta-z_t)\Big) \, \mathrm{d} x \, \mathrm{d} r \\ &\geq \int_s^t\int_\Omega\Big(-W_{,z}(c,\varepsilon(\mathbf u),z)(\zeta-z_t)+\vartheta(\zeta-z_t)\Big) \, \mathrm{d} x \, \mathrm{d} r \end{aligned} \end{equation} for all $t \in (0,T]$, for $s=0$, and for almost all $0< s\leq t$ and for all test functions $\zeta \in L^p (0,T; W_-^{1,p}(\Omega)) \cap L^\infty (0,T; L^\infty(\Omega))$. \item[--] Concerning the \emph{entropic} formulation (=entropy+total energy inequalities) of the heat equation, we point out that it is consistent with the classical one. Namely, \color{black} if \color{black} the functions $\vartheta,\, c,\, z$ are sufficiently smooth, then inequalities \eqref{entropy-ineq} and \eqref{total-enid}, combined with \eqref{e:c}--\eqref{e:z} and \eqref{e:u} yield the pointwise formulation of \eqref{e:teta}, cf.\ \cite[Rmk.\ 2.6]{RocRos14} for all details. \item[--] Observe that the \emph{damage energy-dissipation} inequality \eqref{energ-ineq-z} is required to hold for all $t\in (0,T]$ and for almost all $0 \leq s<t$, and $s=0$. Indeed we will not be able to improve it to an equality, or to an inequality holding on \emph{every} subinterval $[s,t]\subset[0,T]$. This is due to the fact that we will obtain \eqref{energ-ineq-z} by passing to the limit in its time-discrete version (cf.\ Lemma \ref{l:energy-est}), exploiting lower semicontinuity arguments to take the limit of the left-hand side, and pointwise, almost everywhere in $(0,T)$, convergences to take the limit of the right-hand side. Analogous considerations apply to the \emph{entropy} and \emph{total energy} inequalities \eqref{entropy-ineq} and \eqref{total-enid}. \item[--] We remark that the \emph{damage energy-dissipation} and the \emph{total energy} inequalities are obtained independently one of another: while this will be clear from the proof of Theorem \ref{thm:1} below, we refer to \cite[Rmk.\ 2.8]{RocRos14} and \cite[Sec. 2.4]{RocRos12} for further comments. \item[--] The quasi-linear $p$-Laplacian operator $\Delta_p:W^{1,p}(\Omega)\to W^{1,p}(\Omega)'$ with homogeneous Neumann conditions occurring in \eqref{ch-2} is defined in the distributional sense as $$ \langle - \color{black}\Delta_p(v),w\rangle_{W^{1,p}(\Omega)}=\int_\Omega|\nabla v|^{p-2}\nabla v\cdot\nabla w\,\mathrm dx. $$ However, since $\Delta_p(c)\in L^2(0,T;L^2(\Omega))$ due to \eqref{reg-c}, the Cahn-Hilliard system can be interpreted in a pointwise formulation. In view of the regularity result \cite[Thm.\ 2, Rmk.\ 3.5]{savare98}, we infer the enhanced regularity \begin{align*} c \in L^2 (0,T; W^{1+\sigma,p}(\Omega)) \qquad \text{for all } 1 \leq \sigma< \frac1p. \end{align*} \item[--] All the terms in the total energy inequality \eqref{total-enid} have \color{black} a physical interpretation: The second and the third term on the right-hand \color{black} side of \eqref{total-enid} describe energy changes due to external heat sources. The integrand $\mathbf{f}\cdot\mathbf{u}_t$ in \color{black} the fourth term on the right-hand side of \eqref{total-enid} specifies the power expended by \color{black} the external volume force $\mathbf{f}$, whereas the integrand $\big({\boldsymbol{\sigma}} n\big)\cdot \mathbf{d}_t$ of the fifth term indicates the power expended by \color{black} the time-dependent Dirichlet data $\mathbf{d}$ on the boundary $\partial\Omega$ (remember that ${\boldsymbol{\sigma}}$ is the stress tensor given in \eqref{stress-tensor}). \end{itemize} \end{remark} We can now state our existence result for the entropic formulation of system \eqref{eqn:PDEsystem}. Observe that, while the basic time-regularity for $\vartheta$ (in fact for $\log(\vartheta)$) is poor in the general case, under an additional restriction on the exponent $\kappa$ from Hypothesis (III) we will be able to obtain $\mathrm{BV}$-time regularity for $\vartheta$. \begin{theorem} \label{thm:1} Assume \textbf{Hypotheses (I)--(V)}, and let the data $(\mathbf{d},\mathbf{f}, g, h)$ comply with \eqref{hyp:data}. Then, for any quintuple $(c^0,z^0,\vartheta^0, \mathbf u^0,\mathbf v^0)$ fulfilling \eqref{h:initial} there exists an entropic weak solution $(c,\mu,z,\vartheta,\mathbf{u})$ to the PDE system \eqref{eqn:PDEsystem}, supplemented with the initial and boundary conditions \eqref{init-bdry-conditions}, such that \color{black} \begin{align} &\label{BV-log} \log(\vartheta) \in L^\infty(0,T;W^{1,d+\epsilon}(\Omega)') \qquad \text{for all } \epsilon >0. \end{align} Furthermore, if in addition the exponent $\kappa$ in \eqref{hyp-K} satisfies \begin{equation} \label{range-k-admissible} \kappa \in (1, 5/3) \quad\hbox{if $d=3$ and } \kappa \in (1, 2) \quad\hbox{if $d=2$ }, \end{equation} then we have \begin{equation} \label{furth-reg-teta} \vartheta\in \mathrm{BV}([0,T]; W^{2,d+\epsilon}(\Omega)') \qquad \text{for every } \epsilon>0, \end{equation} and the total energy inequality \eqref{total-enid} holds \underline{for all} $t \in [0,T]$, for $s=0$, and for almost all $s \in (0,t)$. \end{theorem} We will prove Theorem \ref{thm:1} throughout Sections \ref{s:5} \& \ref{s:6} by passing to the limit in a carefully devised time discretization scheme and several regularizations. Namely, in Section \ref{s:5} we are going to set up our time discretization scheme for system \eqref{eqn:PDEsystem} and perform on it all the a priori estimates allowing us to prove, in Sec.\ \ref{s:6}, that (along a suitable subsequence) the approximate solutions converge to an entropic weak solution to \eqref{eqn:PDEsystem}. However, to enhance the readability of the paper in Section \ref{s:4} we will (formally) perform all estimates on the time-continuous level, i.e.\ on system \eqref{eqn:PDEsystem} itself. \color{black} \section{\bf Formal a priori estimates} \label{s:4} Let us briefly outline all the estimates that will be formally developed on the time-continuous system \eqref{eqn:PDEsystem}: \color{black} \begin{itemize} \item[--] in the \underline{\bf First estimate}, from the (formally written) \emph{total energy identity} (cf.\ \eqref{calc1} below) we will derive a series of bounds on the \emph{non-dissipative} variables $c,\, z,\, \vartheta,\, \mathbf{u} $, as well as on $\|\mathbf{u}_t\|_{L^\infty (0,T; L^2(\Omega;\mathbb{R}^d))}$. \item[--] Then, with the \underline{\bf Second estimate}, we shall adapt some calculations first developed in \cite{fpr09} (see also \cite{RocRos14}) to derive a bound for $\|\vartheta\|_{ L^2 (0,T; H^1(\Omega))\color{black}}$ via a clever test of the heat equation \eqref{e:teta}. \item[--] Exploiting the previously obtained estimates, in the \underline{\bf Third estimate} we will obtain bounds for the \emph{dissipative} variables $c_t,\, z_t,\, \varepsilon(\mathbf{u}_t)$, as well as for $\nabla \mu$. \item[--] The \underline{\bf Fourth estimate} is an elliptic regularity estimate on the momentum equation, along the footsteps of \cite{bss} where it was developed in the case of a \emph{scalar} displacement variable. With this, in particular we gain a (uniform in time) bound on $\|\mathbf{u}\|_{H^2(\Omega;\mathbb{R}^d)}$ which translates into an (uniform in time) $L^2(\Omega)$-bound for the term $\pd{c}(c,\varepsilon(\mathbf u),z)$ in \eqref{e:mu}. \item[--] Using this, in the \underline{\bf Fifth estimate} we obtain a bound on the $L^2(0,T;H^1(\Omega))$-norm of $\mu$ from a bound on its mean value $\Xint-_\Omega \mu \, \mathrm{d} x$, combined with the previously obtained bound for $\nabla\mu$ via the Poincar\'e inequality. To develop the related calculations, we will momentarily suppose that \begin{equation} \label{mir-zelik} \begin{gathered} \widehat\beta \in \mathrm{C}^1(\mathbb{R}) \text{ and satisfies the following property:} \\ \forall\, \mathfrak{m} \in\mathbb{R}\ \exists\, C_{\mathfrak{m}},\, C_{\mathfrak{m}}'>0 \quad |\beta(c+\mathfrak{m})|\leq C_{\mathfrak{m}} \beta(c+\mathfrak{m})c +C_{\mathfrak{m}}'\,. \end{gathered} \end{equation} \item[--] We are then in the position to obtain a $L^2(0,T; L^2(\Omega;\mathbb{R}^d))$-estimate for each single term in \eqref{e:mu} in the \underline{\bf Sixth estimate}. \item[--] With the \underline{\bf Seventh} and \underline{\bf Eighth} estimates we gain some information on the ($\mathrm{BV}$-)time regularity of $ \log(\vartheta)$ and $\vartheta$, respectively (in the latter case, under the further condition \eqref{range-k-admissible} on the growth exponent $\kappa$ of $\mathsf{K}$). \item[--] Finally, in the \underline{\bf Ninth estimate} we resort to \color{black} higher elliptic regularity results to gain a uniform bound on $\|\mu\|_{L^2(0,T;H^2(\Omega))}$. \end{itemize} In the proof of the forthcoming \color{black} Proposition \ref{prop:aprio-discr} we will discuss how to make all of the following calculations rigorous in the context of the time-discretization scheme from Definition \ref{def:time-discrete} (let us mention in advance that, for the \emph{Fifth estimate} we will need the analogue of \eqref{mir-zelik} on the level of the Yosida regularization of $\beta$), with the exception of the computations related to the ensuing \textbf{Seventh a priori estimate}. Indeed, while in the present time-continuous context this formal estimate will provide a $\mathrm{BV}$-in-time bound for $\log(\vartheta)$, on the time-discrete level it will be possible to render it only in a \emph{weaker} form, albeit still useful for the compactness arguments developed in Section \ref{s:6}. \par In the following calculations, at several spots we will follow the footsteps of \cite{RocRos14}, hence we will give the main ideas, skipping some details and and referring to the latter paper. In comparison to \cite{RocRos14}, the additional coupling with the Cahn-Hilliard system \eqref{e:c}--\eqref{e:mu} requires new a priori estimates (see the \textbf{Fifth}, \textbf{Sixth} and \textbf{Ninth estimates} below). Beyond this the remaining system \eqref{e:z}--\eqref{e:u} also depends on the phase field variable $c$ and the estimation techniques used in \cite{RocRos14} need to be adapted to this situation. And, finally, the time-dependent Dirichlet boundary conditions for $\mathbf{u}$ requires substantial modifications especially in the \textbf{First}, but also in the \textbf{Third} and \textbf{Fourth estimates} below. \paragraph{\bf Strict positivity of $\vartheta$} Along the lines of \cite{fpr09}, we rearrange terms in \eqref{e:teta} and (formally, disregarding the -positive- boundary datum $h$) we obtain \begin{equation} \label{formal-positivity} \begin{aligned} \vartheta_t-\dive(\mathsf{K} (\vartheta)\nabla\vartheta) & = g +|c_t|^2+|z_t|^2 + a( c, z)\varepsilon(\mathbf{u}_t):\mathbb{V} \varepsilon(\mathbf{u}_t) + m(c,z) |\nabla \mu|^2 - c_t\vartheta-z_t\vartheta - \rho \vartheta \mathrm{div}(\mathbf{u}_t) \\ & \geq g +\frac12|c_t|^2+\frac12|z_t|^2 + c |\varepsilon(\mathbf{u}_t)|^2 + m(c,z) |\nabla \mu|^2 -C \vartheta^2 \geq -C\vartheta^2 \quad \text{a.e. in\;} \, Q. \end{aligned} \end{equation} Here, for the first inequality we have used that $\mathbb{V}$ is positive definite by \eqref{eqn:assbV} and \color{black} \eqref{ellipticity}, that $a$ is strictly positive thanks to \eqref{data-a}, and that \begin{equation} \label{eps-estim} | \dive(\mathbf{u}_t) | \leq c(d) |\varepsilon({\mathbf{u}_t})| \quad \text{a.e.\ in $Q$} \end{equation} with $c(d)$ a positive constant only depending on the space dimension $d$. The second inequality in \eqref{formal-positivity} \color{black} also relies on the fact that $g \geq 0$ a.e.\ in $Q$. Therefore we conclude that $v$ solving the Cauchy problem \[ v_t=-\frac12 v^2, \quad v(0)=\vartheta_*>0 \] is a subsolution of \eqref{e:teta}, and a comparison argument yields that there exists $\underline\vartheta>0$ such that \begin{equation}\label{teta-pos} \vartheta(\cdot,t)\geq v(t)>\underline\vartheta>0\quad \hbox{for all }t\in [0,T]\,. \end{equation} \paragraph{\bf First estimate:} We test \eqref{e:c} by $\mu$, \eqref{e:mu} by $c_t$, \eqref{e:z} by $z_t$, \eqref{e:teta} by 1, \eqref{e:u} by $\mathbf{u}_t$, add the resulting relations and integrate over the time interval $(0,t)$, $t\in (0,T]$. Here the second term in the force balance equation is treated by integration by parts in space as follows (notice that $\mathbf{u}_t=\mathbf{d}_t$ a.e. on $\partial\Omega\times(0,T)$): \begin{align} \label{sigmaInt} \begin{aligned} &\int_0^t\int_\Omega-\dive\big(a(c,z)\mathbb{V}\varepsilon(\mathbf u_t)+W_{,\varepsilon}(c,\varepsilon(\mathbf u),z)-\rho\vartheta\mathds{1}\big)\cdot\mathbf{u}_t\,\mathrm dx\,\mathrm ds\\ &\qquad=\int_0^t\int_\Omega a(c,z)\mathbb{V}\varepsilon(\mathbf u_t):\varepsilon(\mathbf u_t)+W_{,\varepsilon}(c,\varepsilon(\mathbf u),z):\varepsilon(\mathbf u_t)-\rho\vartheta\dive(\mathbf{u}_t)\,\mathrm dx\,\mathrm ds -\int_0^t\int_{\partial\Omega}({\boldsymbol{\sigma}}{ \bf n \color{black}} )\cdot\mathbf{d}_t\, \mathrm{d} S\,\mathrm ds. \end{aligned} \end{align} Furthermore, we use that, by the chain rule, \begin{align*} \begin{aligned} & \text{(i) } && \begin{aligned} &\int_0^t \int_\Omega \pd{c}(c,\varepsilon(\mathbf u),z) c_t + \pd{z}(c,\varepsilon(\mathbf u),z) z_t + \pd{\varepsilon}(c,\varepsilon(\mathbf u),z)\colon \varepsilon(\mathbf u_t) \, \mathrm{d} x \, \mathrm{d} s\\ &= \int_\Omega W(c(t),z(t),\varepsilon(\mathbf u(t))) \, \mathrm{d} x - \int_\Omega W(c(0),z(0),\varepsilon(\mathbf u(0))) \, \mathrm{d} x, \end{aligned}\\ & \text{(ii) } && \int_0^t \int_\Omega \left( \eta + \gamma'(c) \right ) c_t \, \mathrm{d} x \, \mathrm{d} s = \int_\Omega \phi(c(t)) \, \mathrm{d} x - \int_\Omega \phi(c(0)) \, \mathrm{d} x,\\ & \text{(iii) } && \int_0^t \int_\Omega\left( \partial I_{[0,+\infty)}(z) +\sigma'(z) \right) z_t \, \mathrm{d} x \, \mathrm{d} s = \int_\Omega I_{[0,+\infty)}(z(t)) + \sigma(z(t)) \, \mathrm{d} x - \int_\Omega I_{[0,+\infty)}(z(0)) + \sigma(z(0)) \, \mathrm{d} x, \end{aligned} \end{align*} as well as the identity $\int_0^t \int_\Omega \partial I_{(-\infty,0]}(z_t) z_t \, \mathrm{d} x \, \mathrm{d} s = \int_0^t \int_\Omega I_{(-\infty,0]}(z_t) \, \mathrm{d} x \, \mathrm{d} s= 0 $ due to the positive \color{black} $1$-homogeneity of $ \partial I_{(-\infty,0]}$. Also taking into account the cancellation of a series of terms, we arrive at the \emph{total energy identity} \begin{equation}\label{calc1} \begin{aligned} \tE{c(t)}{z(t)}{\vartheta(t)}{\mathbf u(t)}{\mathbf u_t(t)} ={}& \tE{c_0}{z_0}{\vartheta_0}{\mathbf u_0}{\mathbf v_0} + \int_0^t \int_\Omega g \, \mathrm{d} x\,\mathrm ds+ \int_0^t\int_{\partial\Omega} h \, \mathrm{d} S \, \mathrm{d} s\\ &+\int_0^t \int_\Omega \mathbf{f} \cdot \mathbf{u}_t \, \mathrm{d} x \, \mathrm{d} s +\int_0^t\int_{\partial\Omega}({\boldsymbol{\sigma}} { \bf n \color{black}})\cdot\mathbf{d}_t\, \mathrm{d} S\,\mathrm ds\,, \end{aligned} \end{equation} which incorporates the initial conditions \eqref{better-init}. We estimate the second, third and fourth terms on the right-hand side of \eqref{calc1} via \eqref{hyp:data} and obtain \begin{align*} \begin{aligned} & \left| \int_0^t \int_\Omega g \, \mathrm{d} x \, \mathrm{d} s \right| \stackrel{\eqref{heat-source}}{\leq} C, \qquad \left| \int_0^t \int_{\partial\Omega} h \, \mathrm{d} S \, \mathrm{d} s \right| \stackrel{\eqref{dato-h}}{\leq} C,\\ & \left| \int_0^t \int_\Omega \mathbf{f} \cdot \mathbf{u}_t \, \mathrm{d} x \, \mathrm{d} s \right| \stackrel{\eqref{bulk-force}}{\leq} C +\|\mathbf{u}_t\|_{L^2(0,T;L^2(\Omega;\mathbb{R}^d))}^2. \end{aligned} \end{align*} We now carefully handle the last term on the right-hand side of \eqref{calc1}. Since \color{black} no viscous term of the type $\varepsilon(\mathbf{u}_t)$ occurs on its left-hand side, to absorb the last term on the right-hand side and close the estimate \color{black} we will extensively make use of integration by parts in space, as well as of \color{black} the force balance equation \eqref{e:u} of \color{black} integration by parts in time, and of Young's inequality ($\delta>0$ will be chosen later): \begin{align*} \int_0^t\int_{\partial\Omega}({\boldsymbol{\sigma}}{ \bf n \color{black}})\cdot\mathbf{d}_t\, \mathrm{d} S\,\mathrm ds ={}&\int_0^t\int_\Omega\dive({\boldsymbol{\sigma}})\cdot\mathbf{d}_t\,\mathrm dx\,\mathrm ds +\int_0^t\int_\Omega {\boldsymbol{\sigma}}:\varepsilon(\mathbf{d}_t)\,\mathrm dx\,\mathrm ds\\ ={}&\int_0^t\int_\Omega(-\mathbf{f}+\mathbf{u}_{tt})\cdot\mathbf{d}_t\,\mathrm dx\,\mathrm ds +\int_0^t\int_\Omega{\boldsymbol{\sigma}}:\varepsilon(\mathbf{d}_t)\,\mathrm dx\,\mathrm ds\\ \leq{}& \|\mathbf{f}\|_{L^2(0,T;L^2(\Omega;\mathbb{R}^d))}\|\mathbf{d}_t\|_{L^2(0,T;L^2(\Omega;\mathbb{R}^d))} +\int_0^t\|\mathbf u_t\|_{L^2(\Omega;\mathbb{R}^d)}\|\mathbf{d}_{tt}\|_{L^2(\Omega;\mathbb{R}^d)}\,\mathrm ds\\ &+\delta\|\mathbf{u}_t(t)\|_{L^2(\Omega;\mathbb{R}^d)}^2+C_\delta\|\mathbf{d}_t(t)\|_{L^2(\Omega;\mathbb{R}^d)}^2 +\|\mathbf v^0\color{black} \|_{L^2(\Omega;\mathbb{R}^d)}\|\mathbf{d}_t(0)\|_{L^2(\Omega;\mathbb{R}^d)}\\ &+\underbrace{\int_0^t\int_\Omega a(c,z)\mathbb{V}\varepsilon(\mathbf{u}_t):\varepsilon(\mathbf{d}_t)\,\mathrm dx\,\mathrm ds}_{\doteq I_1} +\underbrace{\int_0^t\int_\Omega b(c,z)\mathbb{C}(\varepsilon(\mathbf{u})-\varepsilon^*(c)):\varepsilon(\mathbf{d}_t)\,\mathrm dx\,\mathrm ds}_{\doteq I_2}\\ &+\rho\|\dive(\mathbf{d}_t)\|_{L^\infty(Q)}\int_0^t\int_\Omega|\vartheta|\,\mathrm dx\,\mathrm ds. \end{align*} Moreover, by using integration by parts \color{black} in space again, the properties of the coefficient functions $a$ and $b$ stated in Hypothesis (IV) and (V), and by using \eqref{dirichlet-data} on $\mathbf{d}$, \color{black} $\mathbf{u}_t=\mathbf{d}_t$ a.e. on $\partial\Omega\times(0,T)$ and the trace theorem we obtain \color{black} \begin{align*} I_1={}&-\int_0^t\int_\Omega\mathbf{u}_t\cdot\dive\big(a(c,z)\mathbb{V}\varepsilon(\mathbf{d}_t)\big)\,\mathrm dx\,\mathrm ds +\int_0^t\int_{\partial\Omega}\mathbf{u}_t\cdot\big(a(c,z)\mathbb{V}\varepsilon(\mathbf{d}_t) { \bf n \color{black}} \big)\, \mathrm{d} S\,\mathrm ds\\ ={}&-\int_0^t\int_\Omega\mathbf{u}_t\cdot\Big(\big(a_{,c}(c,z)\nabla c+a_{,z}(c,z)\nabla z\big)\cdot\mathbb{V}\varepsilon(\mathbf{d}_t)\Big)\,\mathrm dx\,\mathrm ds -\int_0^t\int_\Omega \mathbf{u}_t \cdot\left(a(c,z)\mathbb{V}\dive(\varepsilon(\mathbf{d}_t))\right)\,\mathrm dx\,\mathrm ds \color{black}\\ &+\int_0^t\int_{\partial\Omega}\mathbf{d}_t\cdot\big(a(c,z)\mathbb{V}\varepsilon(\mathbf{d}_t) { \bf n \color{black}} \big)\, \mathrm{d} S\,\mathrm ds\\ \leq{}&C\|\varepsilon(\mathbf{d}_t)\|_{L^\infty(Q; \mathbb{R}^{d\times d} \color{black})}\Big(\int_0^t\|\mathbf{u}_t\|_{L^2(\Omega;\mathbb{R}^d)}^2\,\mathrm ds +\|a_{,c}(c,z)\|_{L^\infty(0,T;L^\infty(\Omega))}^2\int_0^t\|\nabla c\|_{L^2(\Omega;\mathbb{R}^d)}^2\,\mathrm ds\\ &\qquad\qquad\qquad\quad+\|a_{,z}(c,z)\|_{L^\infty(0,T;L^\infty(\Omega))}^2\int_0^t\|\nabla z\|_{L^2(\Omega;\mathbb{R}^d)}^2\,\mathrm ds\Big)\\ &+C\int_0^t\|\mathbf{u}_t\|_{L^2(\Omega;\mathbb{R}^d)}^2\,\mathrm ds+C\|a(c,z)\|^2_{L^\infty(0,T;L^\infty( \Omega))\color{black} }\|\varepsilon(\mathbf{d}_t)\|_{L^2(0,T;H^1(\Omega; \mathbb{R}^{d\times d} ))}^2 \color{black}\\ &+C\|\mathbf{d}_t\|_{L^2(0,T;H^1(\Omega;\mathbb{R}^d))}\|\varepsilon(\mathbf{d}_t)\|_{L^2(0,T;H^1(\Omega;\mathbb{R}^{d\times d}))} \|a(c,z)\|_{L^\infty(0,T;L^\infty(\partial\Omega))}\\ \leq{}&C\int_0^t\Big(\|\mathbf{u}_t\|_{L^2(\Omega;\mathbb{R}^d)}^2+\|\nabla c\|_{L^2(\Omega;\mathbb{R}^d)}^2 +\|\nabla z\|_{L^2(\Omega;\mathbb{R}^d)}^2\Big)\,\mathrm ds+C,\\ I_2\leq{}&C\int_0^t\int_\Omega b(c,z)^2\mathbb{C}(\varepsilon(\mathbf{u})-\varepsilon^*(c)):(\varepsilon(\mathbf{u})-\varepsilon^*(c))+\mathbb{C}\varepsilon(\mathbf{d}_t):\varepsilon(\mathbf{d}_t)\,\mathrm dx\,\mathrm ds\\ \leq{}&C\|b(c,z)\|_{L^\infty(0,T;L^\infty(\Omega))}\int_0^t\int_\Omega W(c,\varepsilon(\mathbf u),z)\,\mathrm dx\,\mathrm ds +C\|\varepsilon(\mathbf{d}_t)\|_{L^2(0,T;L^2(\Omega;\mathbb{R}^{d\times d}))}^2. \end{align*} All in all, again taking into account \eqref{hyp:data}, \color{black} we gain the estimate \begin{align*} &\tE{c(t)}{z(t)}{\vartheta(t)}{\mathbf u(t)}{\mathbf u_t(t)}\\ &\qquad\leq C_\delta+\delta\|\mathbf{u}_t(t)\|_{L^2(\Omega;\mathbb{R}^d)}^2 +\int_0^t C\big(\|\mathbf{d}_{tt}\|_{L^2(\Omega;\mathbb{R}^d)}^2+1\big)\times \color{black} \\ &\qquad\qquad\times \int_0^t \color{black}\Big( \int_\Omega \color{black} W(c,\varepsilon(\mathbf u),z)\,\mathrm dx +\|\nabla c\|_{L^2(\Omega;\mathbb{R}^d)}^2+\|\nabla z\|_{L^2(\Omega;\mathbb{R}^d)}^2 +\int_\Omega|\vartheta|\,\mathrm dx +\|\mathbf u_t\|_{L^2(\Omega;\mathbb{R}^d)}^2\Big)\,\mathrm ds\\ &\qquad\leq C_\delta+\delta\|\mathbf{u}_t(t)\|_{L^2(\Omega;\mathbb{R}^d)}^2 +\int_0^t C\big(\|\mathbf{d}_{tt}\|^2_{L^2(\Omega;\mathbb{R}^d)}+1\big)\tE{c(s)}{z(s)}{\vartheta(s)}{\mathbf u(s)}{\mathbf u_t(s)}\,\mathrm ds.\color{black} \end{align*} Choosing $\delta=1/4$, using Gronwall Lemma together with \eqref{dirichlet-data} and taking the positivity of $\vartheta$ into account, we conclude \begin{equation} \label{est1} \| \vartheta \|_{L^\infty (0,T;L^1(\Omega))} +\| \mathbf{u}\|_{W^{1,\infty}(0,T;L^2(\Omega;\mathbb{R}^d))} +\| c \|_{L^\infty(0,T;W^{1,p}(\Omega))} +\| \nabla z \|_{L^\infty(0,T;L^{p}(\Omega;\mathbb{R}^d))} \leq C. \end{equation} Note that we have also used the Poincar\'e inequality to obtain the boundedness for $c$ in $L^\infty(0,T;W^{1,p}(\Omega))$ because it holds $\int_\Omega c(t)\,\mathrm dx\equiv const$ for all $t\in[0,T]$ (this follows from \eqref{e:c} and the no-flux condition for $\mu$ in \eqref{bdry-conditions}). \color{black} \paragraph{\bf Second estimate:} Let $F(\vartheta) = \vartheta^\alpha/\alpha$, with $\alpha \in (0,1)$. We test \eqref{e:teta} by $F'(\vartheta):= \vartheta^{\alpha-1}$ , and integrate on $(0,t)$ with $t \in (0,T]$, thus obtaining \[ \begin{aligned} &\int_\Omega F(\vartheta_0)\, \mathrm{d} x+ \int_0^t \int_\Omega g F'(\vartheta) \, \mathrm{d} x \, \mathrm{d} s +\int_0^t \int_{\partial \Omega} h F'(\vartheta) \, \mathrm{d} S \, \mathrm{d} s +\int_0^t \int_\Omega (|c_t|^2+ |z_t|^2) F'(\vartheta) \, \mathrm{d} x \, \mathrm{d} s\\ &+\int_0^t \int_\Omega a(c,z) \varepsilon({\mathbf{u}_t}): \mathbb{V} \varepsilon({\mathbf{u}_t}) F'(\vartheta) \, \mathrm{d} x \, \mathrm{d} s + \int_0^t \int_\Omega m(c,z) |\nabla \mu|^2 F'(\vartheta) \, \mathrm{d} x \, \mathrm{d} s \\ &\quad=\int_\Omega F(\vartheta(t))\, \mathrm{d} x + \int_0^t \int_\Omega (c_t + z_t) \vartheta F'(\vartheta) \, \mathrm{d} x \, \mathrm{d} s +\rho \int_0^t \int_\Omega \vartheta \dive(\mathbf{u}_t) F'(\vartheta) \, \mathrm{d} x \, \mathrm{d} s \int_0^t \int_\Omega \mathsf{K}(\vartheta) \nabla \vartheta\cdot\nabla (F'(\vartheta)) \, \mathrm{d} x \, \mathrm{d} s. \end{aligned} \] By the positivity of $g$ and $h$ we can neglect the second and third terms on the left-hand side, whereas, taking into account the ellipticity condition \color{black} \eqref{ellipticity} and the positivity \eqref{hyp-m} and \eqref{data-a} of $m$ and $a$, \color{black} we infer \begin{align} \label{eqn:secondEstPre} \begin{aligned} &\frac{4(1-\alpha)}{\alpha^2} \int_0^t \int_\Omega \mathsf{K}(\vartheta) |\nabla (\vartheta^{\alpha/2})|^2 \, \mathrm{d} x \, \mathrm{d} s + \bar{c} \color{black}\int_0^t\int_\Omega(|\varepsilon({\mathbf{u}_t})|^2+|\nabla\mu|^2)F'(\vartheta) \, \mathrm{d} x \, \mathrm{d} s \\ & \qquad + \int_0^t \int_\Omega (|c_t|^2 + |z_t|^2) F'(\vartheta) \, \mathrm{d} x \, \mathrm{d} s \leq \int_\Omega |F(\vartheta_0)|\, \mathrm{d} x +I_1 +I_2+I_3, \end{aligned} \end{align} with $\bar{c}>0$ depending on $\nu_0$, $m_0$, and $a_0$, where $I_3 \doteq |\rho| \int_0^t \int_\Omega |\vartheta \dive(\mathbf{u}_t) F'(\vartheta) | \, \mathrm{d} x \, \mathrm{d} s$. \color{black} We estimate \[ \begin{aligned} I_1= \int_\Omega |F(\vartheta(t))|\, \mathrm{d} x \leq \frac1{\alpha}\int_\Omega \max\{ \vartheta(t), 1\}^\alpha \, \mathrm{d} x \leq \frac1{\alpha}\int_\Omega \max\{ \vartheta(t), 1\} \, \mathrm{d} x \leq C \end{aligned} \] since $\alpha <1$ and taking into account the previously obtained inequality \color{black} \eqref{est1}. Analogously we can estimate $\int_\Omega |F(\vartheta_0)|\, \mathrm{d} x$ thanks to \eqref{data_teta}; moreover, \[ I_2 = \int_0^t \int_\Omega |(c_t +z_t) \vartheta F'(\vartheta)| \, \mathrm{d} x \, \mathrm{d} s \leq \frac14 \int_0^t \int_\Omega \left( |c_t|^2 + |z_t|^2\right) F'(\vartheta) \, \mathrm{d} x \, \mathrm{d} s + 2 \int_0^t \int_\Omega F'(\vartheta)\vartheta^2 \, \mathrm{d} x \, \mathrm{d} s. \] Using inequality \eqref{eps-estim} \color{black} and Young's inequality, we have that \[ \begin{aligned} I_3 =|\rho| \int_0^t \int_\Omega | \vartheta \dive(\mathbf{u}_t) F'(\vartheta)| \, \mathrm{d} x \, \mathrm{d} s \leq \frac { \bar{c}\color{black}} 4 \int_0^t \int_\Omega |\varepsilon({\mathbf{u}_t})|^2 F'(\vartheta) \, \mathrm{d} x \, \mathrm{d} s + C\int_0^t \int_\Omega F'(\vartheta)\vartheta^2 \, \mathrm{d} x \, \mathrm{d} s\,. \end{aligned} \] All in all, we conclude \begin{equation} \label{all-in-all} \begin{aligned} &\frac{4(1-\alpha)}{\alpha^2} \int_0^t \int_\Omega \mathsf{K}(\vartheta) |\nabla (\vartheta^{\alpha/2})|^2 \, \mathrm{d} x \, \mathrm{d} s + \frac{ 3 \bar{c} \color{black}}{4}\int_0^t \int_\Omega(|\varepsilon({\mathbf{u}_t})|^2 +|\nabla\mu|^2)F'(\vartheta) \, \mathrm{d} x \, \mathrm{d} s\\ &+ \frac34 \int_0^t \int_\Omega (|c_t|^2 + |z_t|^2) F'(\vartheta) \, \mathrm{d} x \, \mathrm{d} s \leq C + C \int_0^t \int_\Omega \vartheta^{\alpha+1} \, \mathrm{d} x \, \mathrm{d} s. \end{aligned} \end{equation} Observe that \[ \int_0^t \int_\Omega \mathsf{K}(\vartheta) |\nabla (\vartheta^{\alpha/2})|^2 \, \mathrm{d} x \, \mathrm{d} s \geq c_1 \int_0^t \int_\Omega \vartheta^\kappa |\nabla (\vartheta^{\alpha/2})|^2 \, \mathrm{d} x \, \mathrm{d} s = \tilde c_1 \int_0^t \int_\Omega |\nabla (\vartheta^{(\kappa+\alpha)/2} )|^2 \, \mathrm{d} x \, \mathrm{d} s\,. \] Hence, from \eqref{all-in-all} we infer the estimate \begin{equation} \label{calc2.1} \tilde c_1 \int_0^t \int_\Omega |\nabla (\vartheta^{(\kappa+\alpha)/2} )|^2 \, \mathrm{d} x \, \mathrm{d} s \leq C_{ 0} + C_{ 0}\int_0^t\int_\Omega \vartheta^{\alpha+1} \, \mathrm{d} x \, \mathrm{d} s. \end{equation} We now repeat the very same calculations as in the \emph{Second} and \emph{Third} estimates in \cite[Sec.\ 3]{RocRos14}, to which we refer for all details. Namely, we introduce the auxiliary quantity $w : = \max\{ \vartheta^{(\kappa+\alpha)/2}, 1 \}$ and observe that \begin{align} &\int_0^t \int_\Omega |\nabla (\vartheta^{(\kappa+\alpha)/2} )|^2 \, \mathrm{d} x \, \mathrm{d} s \geq \int_0^t \color{black}\int_{\{ \vartheta(s) \color{black} \geq 1\}} |\nabla (\vartheta^{(\kappa+\alpha)/2} )|^2\, \mathrm{d} x \, \mathrm{d} s = \int_0^t \int_\Omega |\nabla w |^2 \, \mathrm{d} x \, \mathrm{d} s,\\ &\vartheta^{\alpha+1} =\left( \vartheta^{(\alpha+1)/q} \right)^q \leq w^q \qquad \text{a.e. in\;}\, Q, \label{eqn:thetaWEst} \end{align} for all $q \geq 1 $ such that \begin{equation} \label{1st-restr-alpha} \frac{\kappa+\alpha}2 \geq \frac{\alpha +1}{q} \Leftrightarrow \ q \geq 2- 2\frac{\kappa-1}{\kappa+\alpha}. \end{equation} Therefore from \eqref{calc2.1} we infer that \begin{equation}\label{calc2.2}\begin{aligned} & \tilde c_1\int_0^t \int_\Omega |\nabla w|^2 \, \mathrm{d} x \, \mathrm{d} s \leq C_{ 0} + C_{ 0} \int_0^t \| w \|_{L^q(\Omega)}^q\, \mathrm{d} s. \end{aligned} \end{equation} We now apply the Gagliardo-Nirenberg inequality for $d=3$, yielding \begin{equation}\label{gagliardo} \| w \|_{L^q(\Omega)} \leq c_1 \| \nabla w\|_{L^2(\Omega;\mathbb{R}^d)}^\theta \| w \|_{L^r(\Omega)}^{1-\theta} + c_2 \| w \|_{L^r(\Omega)} \end{equation} with $ 1 \leq r \leq q $ and $\theta $ satisfying $1/q= \theta/6 + (1-\theta)/r$. Hence $\theta= 6 (q-r)/q (6-r)$. Observe that $\theta \in (0,1)$ if $q<6$. Applying the Young inequality with exponents $2/(\theta q)$ and $2/(2-\theta q)$ we infer \begin{equation} \label{added-label} C_{ 0}\int_0^t \| w \|_{L^q(\Omega)}^q\, \mathrm{d} s \leq \frac{\tilde c_1}2\int_0^t \int_\Omega |\nabla w|^2 \, \mathrm{d} x \, \mathrm{d} s +C\int_0^t \| w\|_{L^r(\Omega)}^{2q(1-\theta)/(2-q\theta)} \, \mathrm{d} s + C' \int_0^t \| w\|_{L^r(\Omega)}^{q} \, \mathrm{d} s. \end{equation} We then plug \eqref{added-label}\, into \eqref{calc2.2}, and obtain \begin{equation} \label{added-label2} \frac{\tilde c_1}2\int_0^t \int_\Omega |\nabla w|^2 \, \mathrm{d} x \, \mathrm{d} s \leq C_0+C\int_0^t \| w\|_{L^r(\Omega)}^{2q(1-\theta)/(2-q\theta)} \, \mathrm{d} s + C' \int_0^t \| w\|_{L^r(\Omega)}^{q} \, \mathrm{d} s. \end{equation} Hence, we choose $1\leq r \leq 2/(\kappa+\alpha)$ so that for almost all $t\in (0,T)$ \begin{equation} \label{r-estimate} \| w(t) \|_{L^r(\Omega)} = \left( \int_\Omega \max\{ \vartheta(t)^{r(\kappa+\alpha)/2} ,1\} \, \mathrm{d} x \right)^{1/r} \leq C\left( \|\vartheta(t)\|_{L^1(\Omega)} + |\Omega|\right) \leq C' \end{equation} where we have used the bound for $ \| \vartheta\|_{L^\infty (0,T;L^1(\Omega))}$ from estimate \eqref{est1}. Observe that the inequalities \[ \begin{cases} \theta q \leq 2 \ \Leftrightarrow \ 6\frac{q-r}{6-r} \leq 2 \ \Leftrightarrow \ q \leq 2 +\frac23 r,\\ r \leq \frac2{\kappa+\alpha} \end{cases} \] lead to $q\leq 2 +\frac{4}{3(\kappa+\alpha)}$ which is still compatible with \eqref{1st-restr-alpha}, since $\frac{\kappa-1}{\kappa+\alpha}<1$. Inserting \eqref{r-estimate} into \eqref{added-label2}\, we ultimately deduce $ \int_0^t \int_\Omega |\nabla w|^2 \, \mathrm{d} x \, \mathrm{d} s \leq C$. Taking also \eqref{added-label} and \eqref{r-estimate} into account we then conclude \color{black} $\int_0^t\|w\|_{L^q(\Omega)}^q\,\mathrm ds \leq C$. By using this as well as estimates \eqref{calc2.1} and \eqref{eqn:thetaWEst} we see that \begin{equation} \label{additional-info} \begin{aligned} c \int_0^t \int_\Omega \vartheta^{\kappa+\alpha - 2} |\nabla \vartheta|^2 \, \mathrm{d} x \, \mathrm{d} s = \int_0^t \int_\Omega |\nabla (\vartheta^{(\kappa+\alpha)/2})|^2 \, \mathrm{d} x \, \mathrm{d} s\leq C. \end{aligned} \end{equation} From \eqref{additional-info} and the strict positivity of $\vartheta$ (see \eqref{teta-pos})\, it follows that $ \int_0^t \int_\Omega |\nabla \vartheta|^2 \, \mathrm{d} x \, \mathrm{d} s \leq C, $ provided that $\kappa +\alpha-2 \geq 0$. Observe that, since $\kappa>1$ we can choose $\alpha \in (0, 1)$ such that this inequality holds. Hence, in view of \color{black} estimate \eqref{est1} and applying Poincar\'e inequality, we gather \begin{equation} \label{crucial-est3.2} \| \vartheta \|_{L^{2} (0,T; H^1(\Omega))} \leq C . \end{equation} With the very same calculations as in \cite[Sec.\ 3]{RocRos14} we also obtain \begin{equation}\label{estetainterp} \|\vartheta\|_{L^q(Q)}\leq C\quad\hbox{with }q=8/3 \quad \hbox{if } d=3, \quad q=3 \quad \hbox{if } d=2\, \end{equation} \color{black} interpolating between estimate \eqref{crucial-est3.2} and estimate \eqref{est1} for $\|\vartheta\|_{L^\infty (0,T; L^1(\Omega))}$ and using the Gagliardo-Nirenberg inequality \eqref{gn-ineq}. Furthermore, we observe \begin{equation} \label{quoted-later-ohyes} \int_\Omega |\nabla \vartheta^{(\kappa -\alpha)/2}|^2 \, \mathrm{d} x = c \int_\Omega \vartheta^{\kappa-\alpha-2} |\nabla \vartheta|^2 \, \mathrm{d} x \leq \frac c{{\underline\vartheta}^{2\alpha}}\int_{\Omega} \vartheta^{\kappa+\alpha-2} |\nabla \vartheta|^2 \, \mathrm{d} x \leq C, \end{equation} thanks to the positivity property \eqref{teta-pos} and estimate \eqref{additional-info}. Resorting to a nonlinear version of the Poincar\'e inequality (cf.\ e.g.\ \eqref{poincare-type}), we then infer \begin{equation} \label{necessary-added} \| \vartheta^{(\kappa -\alpha)/2} \|_{L^2 (0,T; H^1(\Omega))}, \, \| \vartheta^{(\kappa +\alpha)/2} \|_{L^2 (0,T; H^1(\Omega))} \leq C. \end{equation} \paragraph{\bf Third estimate:} We test \eqref{e:teta} by $1$, integrate in time, and subtract the resulting relation from the total energy balance \eqref{calc1}. We thus obtain \begin{equation} \label{mech-energ-est} \begin{aligned} & \int_0^t\int_\Omega |c_t|^2 \, \mathrm{d} x \, \mathrm{d} s +\int_\Omega \tfrac1p |\nabla c(t)|^p + \phi(c(t))\, \mathrm{d} x + \int_0^t\int_\Omega m(c,z)|\nabla \mu|^2 \, \mathrm{d} x \, \mathrm{d} s+ \\ & \quad +\int_0^t\int_\Omega |z_t|^2 \, \mathrm{d} x \, \mathrm{d} s +\int_\Omega \tfrac1p |\nabla z(t)|^p + I_{[0,+\infty)}(z(t)) + \sigma(z(t)) \, \mathrm{d} x \\ & \quad + \frac 12\int_\Omega |\mathbf{u}_t(t)|^2\, \mathrm{d} x +\int_0^t\int_\Omega a(c,z) \mathbb{V} \varepsilon(\mathbf u_t)\colon \varepsilon(\mathbf u_t) \, \mathrm{d} x \, \mathrm{d} s + \int_\Omega W(c(t),\varepsilon(\mathbf u(t)),z(t)) \, \mathrm{d} x \\ & = \int_\Omega \tfrac1p |\nabla c_0|^p + \phi(c_0) \, \mathrm{d} x +\int_\Omega \tfrac1p |\nabla z_0|^p + I_{[0,+\infty)}(z_0) + \sigma(z_0) \, \mathrm{d} x + \frac12 \int_\Omega|\mathbf v_0|^2\, \mathrm{d} x \\ & \quad + \int_\Omega W(c_0,\varepsilon(\mathbf u_0),z_0) \, \mathrm{d} x + \int_0^t\int_\Omega\vartheta \left(\rho \dive \mathbf{u}_t+c_t+z_t\right)\, \mathrm{d} x \, \mathrm{d} s+\int_0^t \int_\Omega {\bf f}\cdot \mathbf{u}_t\, \mathrm{d} x \, \mathrm{d} s\\ &\quad+\int_0^t\int_{\partial\Omega}({\boldsymbol{\sigma}} { \bf n \color{black}})\cdot\mathbf{d}_t\, \mathrm{d} S\,\mathrm ds. \end{aligned} \end{equation} Observe that the \color{black} first, second, third, and fourth integral terms on the right-hand side are bounded thanks to conditions \eqref{h:initial} on $(c_0,z_0,\mathbf{u}_0,\mathbf{v}_0)$. As in the First estimate we deduce boundedness of the last and the last but one \color{black} integral terms on the right-hand side. Since $\phi$, $I_{[0,+\infty)} +\sigma$, and $W$ are bounded from below, exploiting \eqref{ellipticity}, \eqref{data-a}, and \eqref{eqn:assbV} \color{black} to deduce that $\int_0^t\int_\Omega a( c, z) \mathbb{V} \varepsilon(\mathbf u_t)\colon \varepsilon(\mathbf u_t) \, \mathrm{d} x \, \mathrm{d} x \geq c \int_0^t\int_\Omega |\varepsilon(\mathbf u_t)|^2 \, \mathrm{d} x \, \mathrm{d} s$, and using \color{black} \eqref{hyp-m} to deduce that $\int_0^t\int_\Omega m(c,z)|\nabla\mu|^2\,\mathrm dx\,\mathrm ds\geq m_0\int_0^t\int_\Omega|\nabla\mu|^2\,\mathrm dx\,\mathrm ds$, we find that \begin{equation} \label{dissip-est} \begin{aligned} &\int_0^t\int_\Omega(|c_t|^2+|z_t|^2+|\nabla\mu|^2+|\varepsilon(\mathbf u_t)|^2)\, \mathrm{d} x \, \mathrm{d} s \leq C+\int_0^t\int_\Omega\vartheta \left(\rho \dive \mathbf{u}_t+c_t+z_t\right)\, \mathrm{d} x \, \mathrm{d} s. \end{aligned} \end{equation} Then, we can estimate the integral term \color{black} on the right-hand side by \[ \varrho\int_0^t \int_\Omega \left( |\varepsilon(\mathbf u_t)|^2 + |c_t|^2 + |z_t|^2 \right) \, \mathrm{d} x \, \mathrm{d} s + C_\varrho \int_0^t \int_\Omega |\vartheta|^2 \, \mathrm{d} x \, \mathrm{d} s, \] for a sufficiently small constant $\varrho>0$, in such a way as to absorb the first integral term into the left-hand side of \eqref{dissip-est}. Exploiting \eqref{crucial-est3.2} on $\vartheta$, we thus conclude, also with the aid of Korn's inequality and condition \eqref{dirichlet-data} on the boundary value $\mathbf{d}$, \color{black} \begin{equation}\label{est5} \|c_t\|_{L^2(Q)} + \| \nabla \mu \|_{L^2(Q;\mathbb{R}^d)} + \|z_t\|_{L^2(Q)}+\|\mathbf{u}_t\|_{L^2(0,T; H^1(\Omega;\mathbb{R}^d))} \leq C\,. \end{equation} Furthermore, taking into account the previously proved bound \eqref{est1}, we also gather \begin{equation} \label{est5-added} \| z \|_{L^\infty (0,T;W^{1,p}(\Omega))} +\| \mathbf u \|_{H^1(0,T;H^1(\Omega;\mathbb{R}^d))}\leq C. \end{equation} \paragraph{\bf Fourth estimate:} We test \eqref{e:u} by $-\mathrm{div}(\mathbb{V}\varepsilon(\mathbf{u}_t))$ and integrate in time. This leads to \begin{equation} \label{added-4-clarity} \begin{aligned} & -\int_0^t \mathbf{u}_{tt}\,\cdot\, \mathbf{\dive} (\mathbb{V}\varepsilon(\mathbf{u}_t)) \, \mathrm{d} x \, \mathrm{d} s + \int_0^t \int_{\Omega} \dive( a(c,z)\mathbb{V}\varepsilon(\mathbf{u}_t)) \,\cdot\, \mathbf{\dive} (\mathbb{V}\varepsilon(\mathbf{u}_t)) \, \mathrm{d} x \, \mathrm{d} s \\ & =- \int_0^t \int_{\Omega} \dive(\pd{\varepsilon}(c,\varepsilon(\mathbf u),z)) \,\cdot\, \mathbf{\dive} (\mathbb{V}\varepsilon(\mathbf{u}_t)) \, \mathrm{d} x \, \mathrm{d} s + \rho \int_0^t \int_\Omega \nabla \vartheta \cdot \mathbf{\dive} (\mathbb{V}\varepsilon(\mathbf{u}_t)) \, \mathrm{d} x \, \mathrm{d} s \\ & \quad - \int_0^t \int_\Omega \mathbf{f}\,\cdot \mathbf{\dive} (\mathbb{V}\varepsilon(\mathbf{u}_t)) \, \mathrm{d} x \, \mathrm{d} s\,. \end{aligned} \end{equation} The following calculations \color{black} are based on \cite[Sec.\ 5]{RocRos14}, to which we refer for details. However in the present case we have to take care of the non-homogeneous Dirichlet boundary condition for $\mathbf u$. The first term on the left-hand side of \eqref{added-4-clarity} gives \begin{align} \label{est-added-0} \begin{aligned} &-\int_0^t \int_\Omega \mathbf{u}_{tt}\cdot \mathbf{\dive} (\mathbb{V}\varepsilon(\mathbf{u}_t)) \, \mathrm{d} x \, \mathrm{d} s\\ &\qquad=-\int_0^t\int_{\partial\Omega}\mathbf{u}_{tt}\cdot\big(\mathbb{V}\varepsilon(\mathbf{u}_t) { \bf n \color{black}} \big)\, \mathrm{d} S\,\mathrm ds +\int_\Omega \frac12 \varepsilon(\mathbf{u}_t (t)):\mathbb{V} \varepsilon(\mathbf{u}_t (t)) \, \mathrm{d} x -\int_\Omega \frac12 \varepsilon(\mathbf{u}_t (0)):\mathbb{V} \varepsilon(\mathbf{u}_t (0)) \, \mathrm{d} x. \end{aligned} \end{align} On the boundary cylinder $\partial\Omega\times(0,T)$ we find $\mathbf{u}_{tt}=\mathbf{d}_{tt}$ a.e. \color{black} (note that not necessarily $\varepsilon(\mathbf{u}_{t})=\varepsilon(\mathbf{d}_{t})$ a.e. on $\partial\Omega\times(0,T)$) which yields by using the trace theorem and Young's inequality ($\delta>0$ will be chosen later) \begin{align*} \Big|\int_0^t\int_{\partial\Omega}\mathbf{u}_{tt}\cdot\big(\mathbb{V}\varepsilon(\mathbf{u}_t) { \bf n \color{black}} \big)\, \mathrm{d} S\,\mathrm ds\Big| ={}&\Big|\int_0^t\int_{\partial\Omega}\mathbf{d}_{tt}\cdot\big(\mathbb{V}\varepsilon(\mathbf{u}_t) { \bf n \color{black}}\big)\, \mathrm{d} S\,\mathrm ds\Big|\\ \leq{}& \delta\|\mathbf{u}_t\|_{L^2(0,T;H^2(\Omega;\mathbb{R}^d))}^2+C_\delta\|\mathbf{d}_{tt}\|_{L^2(0,T;H^{1}(\Omega;\mathbb{R}^d))}^2. \end{align*} The last term on the right-hand side can be estimated by using \eqref{dirichlet-data}. For the second term on the left-hand side of \eqref{added-4-clarity} we find \begin{equation} \label{est-to-fill-2} \begin{aligned} &\int_0^t \int_{\Omega} \mathbf{\dive} ( a(c,z)\mathbb{V}\varepsilon(\mathbf{u}_t)) \,\cdot\, \mathbf{\dive} (\mathbb{V}\varepsilon(\mathbf{u}_t)) \, \mathrm{d} x \, \mathrm{d} s\\ &=\int_0^t\int_\Omega a(c,z) \mathbf{\dive} (\mathbb{V} \varepsilon(\mathbf{u}_t))\,\cdot\,\mathbf{\dive} (\mathbb{V}\varepsilon(\mathbf{u}_t))\, \mathrm{d} x \, \mathrm{d} s +\int_0^t\int_\Omega (\nabla a(c,z)\cdot \mathbb{V} \varepsilon(\mathbf{u}_t))\,\cdot\,\mathbf{\dive} (\mathbb{V}\varepsilon(\mathbf{u}_t))\, \mathrm{d} x \, \mathrm{d} s\\ &\geq c\int_0^t \| \mathbf{u}_t \|_{H^2(\Omega;\mathbb{R}^d)}^2 \, \mathrm{d} s-\|\mathbf{d}_t\|_{L^2(0,T;H^2(\Omega;\mathbb{R}^d))}^2+I_1, \end{aligned} \end{equation} where the second inequality follows from \eqref{H2reg}. The second term on the right-hand side is bounded due to \eqref{dirichlet-data}. We move $I_1$ to the right-hand side of \eqref{added-4-clarity} and estimate \begin{equation} \label{est-to-fill-bis} \begin{aligned} |I_1|&=\left| \int_0^t\int_\Omega \left(\color{black}\nabla a(c,z)\mathbb{V} \varepsilon({\mathbf{u}_t})\right)\color{black}\,\cdot\,\mathbf{\dive} (\mathbb{V}\varepsilon(\mathbf{u}_t)) \right| \, \mathrm{d} x \, \mathrm{d} s \\ &\leq C\int_0^t\|\nabla a(c,z)\|_{L^{d+\zeta}(\Omega;\mathbb{R}^d)}\|\varepsilon({\mathbf{u}_t})\|_{L^{d^{\star}-\eta}(\Omega;\mathbb{R}^{d\times d})}\|{\mathbf{\dive} (\mathbb{V}\varepsilon(\mathbf{u}_t))}\|_{L^2(\Omega; \mathbb{R}^d)}\, \mathrm{d} s\\ &\leq \delta \int_0^t\|\mathbf{u}_t\|_{H^2(\Omega;\mathbb{R}^d)}^2 \, \mathrm{d} s +C_\delta\int_0^t \|\nabla a(c,z)\|_{L^{d+\zeta}(\Omega;\mathbb{R}^d)}^2 \|\varepsilon({\mathbf{u}_t})\|_{L^{d^{\star}-\zeta}(\Omega;\mathbb{R}^{d\times d})}^2 \, \mathrm{d} s \\ &\leq \delta \int_0^t\|\mathbf{u}_t\|_{H^2(\Omega;\mathbb{R}^d)}^2 \, \mathrm{d} s +C_\delta\varrho^2\int_0^t \big(\|c\|_{W^{1,p}(\Omega)}^2+ \|z\|_{W^{1,p}(\Omega)}^2\big)\|\mathbf{u}_t\|_{H^2(\Omega;\mathbb{R}^d)}^2 \, \mathrm{d} s\\ &\quad+C_\delta C_\varrho\int_0^t \big(\|c\|_{W^{1,p}(\Omega)}^2+ \|z\|_{W^{1,p}(\Omega)}^2\big)\|\mathbf{u}_t\|_{L^2(\Omega;\mathbb{R}^d)}^2 \, \mathrm{d} s. \end{aligned} \end{equation} In the first line, we have chosen $\zeta>0$ fulfilling $p\geq d+\zeta$, and $\eta>0$ such that $\tfrac1{d+\zeta} + \tfrac{1}{d^\star -\eta} + \tfrac12 \leq 1$, with $d^{\star}$ from \eqref{dstar}, in order to apply the H\"older inequality. Moreover, we have exploited \eqref{data-a}, giving $\|\nabla a( c, z)\|_{L^{d+\zeta}(\Omega;\mathbb{R}^d)} \leq C(\|c\|_{W^{1,p}(\Omega)}+\|z\|_{W^{1,p}(\Omega)})$, as well as \eqref{interp} to estimate $\|\varepsilon({\mathbf{u}_t})\|_{L^{d^{\star}-\zeta}(\Omega;\mathbb{R}^{d\times d})}$. Finally, $\delta $ and $\varrho$ are positive constants that we will choose later, accordingly determining $C_\delta, \, C_\varrho>0$ via the Young inequality. For the right-hand side of \eqref{added-4-clarity} we proceed as follows \begin{equation} \label{est-to-fill-1} \begin{aligned} & -\int_0^t \int_{\Omega} \dive(\pd{\varepsilon}(c,\varepsilon(\mathbf u), z)) \,\cdot\, \mathbf{\dive} (\mathbb{V}\varepsilon(\mathbf{u}_t)) \, \mathrm{d} x \, \mathrm{d} s\\ & = -\int_0^t\int_\Omega \pd{\varepsilon c}(c,\varepsilon(\mathbf u),z) \nabla c \,\cdot\, \mathbf{\dive}(\mathbb{V}\varepsilon(\mathbf{u}_t))\, \mathrm{d} x \, \mathrm{d} s -\int_0^t\int_\Omega \pd{\varepsilon z}(c,\varepsilon(\mathbf u),z) \nabla z \,\cdot\, \mathbf{\dive}(\mathbb{V}\varepsilon(\mathbf{u}_t))\, \mathrm{d} x \, \mathrm{d} s\\ &\quad -\int_0^t\int_\Omega \big(\pd{\varepsilon \varepsilon}(c,\varepsilon(\mathbf u),z) :\hspace*{-0.2em}\cdot\,\nabla (\varepsilon(\mathbf u))\big)\,\cdot\, \mathbf{\dive}(\mathbb{V}\varepsilon(\mathbf{u}_t))\, \mathrm{d} x \, \mathrm{d} s \\ &\leq C_4 \int_0^t\int_\Omega\big(|\nabla c|+|\nabla z|\big)|\big(|\varepsilon(\mathbf u)|+1\big)|\dive(\mathbb{V}\varepsilon(\mathbf u_t))|\,\mathrm dx\,\mathrm ds +C_4\int_0^t\int_\Omega|\nabla(\varepsilon(\mathbf u))||\dive(\mathbb{V}\varepsilon(\mathbf u_t))|\,\mathrm dx\,\mathrm ds\\ &\leq C_4 \int_0^t \left( \|\nabla c\|_{L^{d+\zeta}(\Omega;\mathbb{R}^d)}+\|\nabla z\|_{L^{d+\zeta}(\Omega;\mathbb{R}^d)}\right) \big(\|\varepsilon({\bf u})\|_{L^{d^{\star}-\eta} (\Omega;\mathbb{R}^{d\times d})}+1\big)\|{\mathbf{\dive} (\mathbb{V}\varepsilon(\mathbf{u}_t))}\|_{L^2(\Omega;\mathbb{R}^d)}\, \mathrm{d} s\\ &\quad+ C_4'\int_0^t\|\mathbf u\|_{H^2(\Omega;\mathbb{R}^d)}\|\mathbf u_t\|_{H^2(\Omega;\mathbb{R}^d)} \, \mathrm{d} s\\ &\leq \sigma\int_0^t\|\mathbf u_t\|_{H^2(\Omega;\mathbb{R}^d)}^2 \, \mathrm{d} s +C_\sigma \int_0^t \left( \left( \|c\|_{W^{1,p}(\Omega)}^2 + \|z\|_{W^{1,p}(\Omega)}^2+1\right)\left( \|\mathbf u\|_{H^2(\Omega; \mathbb{R}^d)}^2+1 \right) \|\mathbf u\|_{H^2(\Omega;\mathbb{R}^d)}^2\right) \, \mathrm{d} s\,. \end{aligned} \end{equation} Here, the positive constants $\zeta$ and $\eta$ again fulfill $p\geq d+\zeta$ and $\tfrac1{d+\zeta} + \tfrac{1}{d^\star -\eta} + \tfrac12 \leq 1$, and we have exploited inequality \eqref{interp} with a constant $\sigma$ that we will choose later, and some $C_\sigma>0$. Moreover, we have used the structural assumption \eqref{eqn:assumptionW} on $W$ (cf.\ also \eqref{later-ref}), \color{black} and estimates \eqref{est1} and \eqref{est5-added}, yielding $\| c \|_{L^\infty(Q)} + \| z \|_{L^\infty(Q)} \leq C$, whence $$ \|b(c,z) \|_{L^{\infty}(Q)}+\|b_{,c}(c,z) \|_{L^{\infty}(Q)}+\|b_{,z}(c,z) \|_{L^{\infty}(Q)} + \|\varepsilon^*(c)\|_{L^{\infty}(Q)}+\|(\varepsilon^*)'(c)\|_{L^{\infty}(Q)}\leq C. $$ Finally, we estimate \begin{equation} \label{est-to-fill-3} \left| \rho \int_0^t \int_\Omega \nabla \vartheta \cdot \mathbf{\dive} (\mathbb{V}\varepsilon(\mathbf{u}_t)) \, \mathrm{d} x \, \mathrm{d} s \right| \leq \eta \int_0^t\|\mathbf{u}_t\|_{H^2(\Omega;\mathbb{R}^d)}^2 \, \mathrm{d} s+C_\eta\int_0^t \|\nabla\vartheta\|_{L^2(\Omega;\mathbb{R}^d)}^2 \, \mathrm{d} s \end{equation} for some positive constant $\eta$ to be fixed later and for some $C_\eta>0$. Combining estimates \eqref{est-added-0}--\eqref{est-to-fill-3} with \eqref{added-4-clarity} taking into account the previously proved estimates \eqref{est1}, \eqref{crucial-est3.2}, and exploiting \eqref{bulk-force} on $\mathbf{f}$ to estimate the last term on the right-hand side of \eqref{added-4-clarity}, we obtain \begin{align*} &\frac{\nu_0\color{black}}{2} \int_\Omega |\varepsilon(\mathbf{u}_t (t) ) |^2 \, \mathrm{d} x +c\int_0^t \| \mathbf{u}_t \|_{H^2(\Omega;\mathbb{R}^d)}^2 \, \mathrm{d} s\\ &\qquad\leq C\int_\Omega|\varepsilon( \mathbf{v}^0 \color{black})|^2 \, \mathrm{d} x + C\| \mathbf{f}\|_{L^2 (0,T;L^2 (\Omega;\R^d))}^2 +C\| \mathbf{d}\|_{H^1(0,T;H^2(\Omega;\mathbb{R}^d))\cap H^2(0,T;H^1(\Omega;\mathbb{R}^d))}^2\\ &\qquad\quad+\frac{c}2 \int_0^t \| \mathbf{u}_t \|_{H^2(\Omega;\mathbb{R}^d)}^2 \, \mathrm{d} s +C\left(1+\|\mathbf{u}^0\color{black}\|_{H^2(\Omega;\mathbb{R}^d)}^2+\int_0^t\int_0^s\|\mathbf{u}_t\|_{H^2(\Omega;\mathbb{R}^d)}^2\, \, \mathrm{d} r \, \mathrm{d} s\right)\,, \end{align*} with $\beta_0$ from \eqref{ellipticity} (cf.~also \eqref{eqn:assbV}), \color{black} where we have used the fact that $\int_0^t\|\mathbf{u}\|_{H^2(\Omega;\mathbb{R}^d)}^2 \, \mathrm{d} s \leq \|\mathbf{u}_0\|_{H^2(\Omega;\mathbb{R}^d)}^2+\int_0^t\int_0^s \|\mathbf{u}_t\|_{H^2(\Omega;\mathbb{R}^d)}^2 \, \mathrm{d} r \, \mathrm{d} s $ and chosen $\delta$, $\varrho$, $\sigma$, and $\eta$ sufficiently small. Therefore, using the standard Gronwall lemma and conditions \eqref{data_u}--\eqref{data_v} on the initial data $\mathbf{u}^0$ and $\mathbf{v}^0$, \color{black} we conclude \begin{equation} \label{palla} \| \mathbf{u}_t \|_{ L^{2}(0,T; H^2(\Omega;\mathbb{R}^d))\cap L^{\infty}(0, T; H^1(\Omega;\mathbb{R}^d)) } \leq C \quad \text{whence} \quad \| \mathbf{u} \|_{ L^{\infty}(0,T; H^2(\Omega;\mathbb{R}^d))} \leq C. \end{equation} By comparison in \eqref{e:u} we also get \begin{equation} \| \mathbf{u}_{tt} \|_{L^{2}(0,T; L^2 (\Omega;\R^d))} \leq C. \label{utt-comparison} \end{equation} In the end, taking into account the form \eqref{eqn:assumptionW} of $W$, we infer from \eqref{est5-added} and \eqref{palla}, taking into account the continuous embedding $H^2(\Omega;\mathbb{R}^d)\subset W^{1,d^\star}(\Omega;\mathbb{R}^d)$, that \begin{equation} \label{est-for-Ws} \| \pd{c}(c,\varepsilon(\mathbf u),z) \|_{L^\infty (0,T; L^2(\Omega))} +\| \pd{z}(c,\varepsilon(\mathbf u),z) \|_{L^\infty (0,T; L^2(\Omega))}\leq C. \end{equation} \paragraph{\bf Fifth estimate:} Recall that, for the time being we suppose $\widehat\beta \in \mathrm{C}^1(\mathbb{R})$, and we will use the notation $\phi'= \beta +\gamma'$. It follows from \eqref{e:c} and the no-flux boundary conditions on $c$ that $\Xint-_\Omega c_t \, \mathrm{d} x = 0$ a.e.\ in $(0,T)$, hence there exists $\mathfrak{m}_0 \in \mathbb{R}$ with \begin{equation} \label{constant-mean} \Xint-_\Omega c(t) \, \mathrm{d} x =\mathfrak{m}_0 \quad \text{for all } t \in [0,T]. \end{equation} Now, from \eqref{e:mu} we deduce that \begin{equation} \label{mean-mu} \Xint-_\Omega \mu \, \mathrm{d} x = \Xint-_\Omega \phi'(c) \, \mathrm{d} x + \Xint-_\Omega W_{,c}(c,\varepsilon(\mathbf u),z) \, \mathrm{d} x - \Xint-_\Omega \vartheta \, \mathrm{d} x \quad \text{a.e. in\;} \, (0,T). \end{equation} Thanks to estimates \eqref{crucial-est3.2} and \eqref{est-for-Ws}, we have that \begin{equation} \label{used-here} \| \textstyle{\Xint-_\Omega \vartheta \, \mathrm{d} x} \|_{L^2(0,T)} + \left\| \Xint-_\Omega W_{,c}(c,\varepsilon(\mathbf u),z) \, \mathrm{d} x \right\|_{L^\infty (0,T)} \leq C. \end{equation} Therefore, in order to estimate $\Xint-_\Omega \mu \, \mathrm{d} x$ it is sufficient to gain a bound for $\Xint-_\Omega \phi'(c) \, \mathrm{d} x$. We shall do so by testing \eqref{e:mu} by $c- \Xint-_\Omega c \, \mathrm{d} x = c - \mathfrak{m}_0 $. This gives for a.a.\ $t\in (0,T)$ \begin{equation} \label{clever-c} \begin{aligned} &\int_\Omega |\nabla c(t)|^p \, \mathrm{d} x + \int_\Omega \beta(c (t)) (c(t)-\mathfrak{m}_0) + \gamma'(c (t)) (c(t)-\mathfrak{m}_0) \, \mathrm{d} x\\ &=\int_\Omega \big(\vartheta(t) - W_{,c}(c(t),\varepsilon(\mathbf u(t)),z(t))\big) (c(t)-\mathfrak{m}_0) \, \mathrm{d} x +\int_\Omega \left(\mu(t) -\Xint-_\Omega \mu(t) \, \mathrm{d} x \right) ( c(t)- \mathfrak{m}_0 )\, \mathrm{d} x\\ &\quad-\int_\Omega c_t(t)c(t)\,\mathrm dx\\ &\leq C \left( \|\vartheta(t)\|_{L^2(\Omega)}+ \| W_{,c}(c(t),\varepsilon(\mathbf u(t)),z(t)) \|_{L^2(\Omega )} \right) \| c(t)\|_{L^2(\Omega)} + \| \nabla \mu(t)\|_{L^2(\Omega)} \| \nabla c(t)\|_{L^2(\Omega)}\\ &\quad+\|c_t(t)\|_{L^1(\Omega)}\|c(t)\|_{L^\infty(\Omega)} \end{aligned} \end{equation} where for the first equality we have used that $ (\Xint-_\Omega \mu(t) \, \mathrm{d} x )( \int_\Omega (c(t)- \mathfrak{m}_0 )\, \mathrm{d} x ) =0$ and $\mathfrak{m}_0\int_\Omega c_t(t)\,\mathrm dx=0$, and for the second one the Poincar\'e inequality for the second integral. Now, observe that \begin{equation} \label{added-4-gamma} \int_\Omega \gamma'(c (t)) (c(t)-\mathfrak{m}_0) \, \mathrm{d} x \geq -C \end{equation} since, by the $L^\infty (0,T;W^{1,p}(\Omega))$-estimate for $c$ and the fact that $p>d$, we have \begin{equation} \label{gamma'-bounded} \| \gamma'(c) \|_{L^\infty (Q)} \leq C\,. \end{equation} Combining \eqref{clever-c} and \eqref{added-4-gamma} with \eqref{mir-zelik}, yielding \begin{equation} \label{danke_zelik} \exists\, C_{\mathfrak{m}_0},\,C_{\mathfrak{m}_0}'>0 \ \ \text{for a.a. } t \in (0,T)\, : \quad \int_\Omega | \beta(c(t))| \, \mathrm{d} x \leq C_{\mathfrak{m}_0} \int_\Omega \beta(c (t)) (c(t)-\mathfrak{m}_0) \, \mathrm{d} x + C_{\mathfrak{m}_0}', \end{equation} and taking into account estimates \eqref{crucial-est3.2}, \eqref{est5}, \eqref{est5-added}, and \eqref{est-for-Ws}, we conclude that $\left\|\beta(c) \right\|_{L^2 (0,T; L^1(\Omega))} \leq C$, whence $\left\|\phi'(c) \right\|_{L^2 (0,T; L^1(\Omega))} \leq C.$ Then, arguing by comparison in \eqref{mean-mu} and taking into account \eqref{used-here} we ultimately conclude $\| \Xint-_\Omega \mu\, \mathrm{d} x \|_{L^2(0,T)} \leq C$. Combining this with \eqref{est5-added} and using the Poincar\'e inequality we infer that \begin{equation} \label{est-for-mu} \| \mu\|_{L^2(0,T;H^1(\Omega))} \leq C. \end{equation} \paragraph{\bf Sixth estimate:} We now argue by comparison in \eqref{e:mu} and take into account estimates \eqref{crucial-est3.2}, \eqref{est5}, \color{black} \eqref{est-for-Ws}, and \eqref{est-for-mu}, as well as \eqref{gamma'-bounded}. Then we conclude that \[ \| \Delta_p(c) +\eta \|_{L^2(0,T; L^2(\Omega))} \leq C. \] Now, in view of the monotonicity of the function $\beta$, it is not difficult to deduce from the above estimate that \begin{equation} \| \Delta_p(c)\|_{L^2(0,T; L^2(\Omega))}+ \| \eta \|_{L^2(0,T; L^2(\Omega))} \leq C. \end{equation} \paragraph{\bf Seventh estimate:} We test \eqref{e:teta} by $\frac{w}{\vartheta}$, with $w$ a test function in $ W^{1,d+\epsilon}(\Omega)$ with $\epsilon>0 $, which then ensures $w\in L^\infty(\Omega)$. Thus, using the place-holders \begin{align*} &H := - c_t - z_t -\rho\mathrm{div}(\mathbf{u}_t),\\ &J:= \frac1\vartheta (g+ a(c,z) \varepsilon(\mathbf{u}_t):\mathbb{V} \varepsilon(\mathbf{u}_t) + |c_t|^2 + |z_t|^2 + m(c,z)|\nabla \mu|^2), \end{align*} we obtain that \[ \begin{aligned} & \left| \int_\Omega \partial_t \log(\vartheta) w \, \mathrm{d} x \right| \\ & = \left| \int_\Omega \left( H w - \frac{\mathsf{K}(\vartheta)}\vartheta \nabla \vartheta \cdot \nabla w - \frac{\mathsf{K}(\vartheta)}{\vartheta^2} |\nabla\vartheta|^2 w + Jw \right) \, \mathrm{d} x +\int_{\partial\Omega} h \frac{w}{\vartheta} \, \mathrm{d} S \right| \\ & \leq \left|\int_\Omega H w \, \mathrm{d} x \right| + \left| \int_\Omega \frac{\mathsf{K}(\vartheta)}\vartheta \nabla \vartheta \cdot \nabla w \, \mathrm{d} x \right| + \left| \int_\Omega \frac{\mathsf{K}(\vartheta)}{\vartheta^2} |\nabla\vartheta|^2 w \, \mathrm{d} x \right| + \left|\int_\Omega J w \, \mathrm{d} x \right| + \left|\int_{\partial\Omega} h\frac{w}\vartheta \, \mathrm{d} S\right| \\ & \doteq I_1+I_2+I_3+I_4 +I_5. \end{aligned} \] From estimate \eqref{est5} we deduce that $\|H\|_{L^2(0,T; L^2(\Omega))} \leq C$, therefore \[ |I_1| \leq \mathcal{H}(t) \|w \|_{L^2(\Omega)} \quad \text{ with } \mathcal{H}(t)= \| H(\cdot,t)\|_{L^2(\Omega)} \in L^2(0,T). \] Analogously, also in view of \eqref{heat-source}, of \eqref{teta-pos} and of estimate \eqref{est5}, we infer that \[ |I_4| \leq \frac{1}{\underline\vartheta}\mathcal{J}(t) \|w\|_{L^\infty(\Omega)} \qquad \text{with } \mathcal{J}(t) := \| J(\cdot,t)\|_{L^1(\Omega)} \in L^1(0,T). \] Moreover, $ |I_5| \leq \frac{1}{\underline\vartheta} \|h(t)\|_{L^2(\partial \Omega)} \| w\|_{L^2(\partial \Omega)} $, with $ \|h(t)\|_{L^2(\partial \Omega)} \in L^1(0,T)$ thanks to \eqref{dato-h}. In order to estimate $I_2$ and $I_3$ we develop the very same calculations as in the proof of \cite[Sec.\ 3, \emph{Sixth estimate}]{RocRos14}. Referring to the latter paper for all details, we mention here that, exploiting the growth condition \eqref{hyp-K} on $\mathsf{K}$, the positivity of $\vartheta$ \eqref{teta-pos}, and the H\"older inequality, we have \[ \begin{aligned} & |I_2| \leq C \frac C{\underline\vartheta} \mathcal{O}(t) \| \nabla w\|_{L^2(\Omega;\mathbb{R}^d)} + C \widetilde{\mathcal{O}}(t) \| \nabla w\|_{L^{{d+\epsilon}}(\Omega;\mathbb{R}^d)} \\ & \quad \text{with } \begin{cases} \mathcal{O}(t) := \| \nabla \vartheta(t) \|_{L^2(\Omega;\mathbb{R}^d)} \in L^2(0,T) & \text{by \eqref{crucial-est3.2},} \\ \widetilde{ \mathcal{O}}(t) \color{black}:= \| \vartheta(t)^{(\kappa +\alpha-2)/2} \nabla \vartheta (t) \|_{L^2(\Omega;\mathbb{R}^d)} \|\vartheta(t)^{(\kappa -\alpha)/2} \|_{L^{d^\star-\eta}(\Omega)} \in L^1(0,T) & \text {by \eqref{additional-info}, \eqref{crucial-est3.2}, \eqref{necessary-added},} \end{cases} \end{aligned} \] with $\tfrac1{d+\epsilon} + \tfrac{1}{d^\star -\eta} +\tfrac12 \leq 1 $. With analogous arguments, we find \[ \begin{aligned} & |I_3| \leq \frac C{{\underline \vartheta }^2} \mathcal{O}(t)^2 \| w\|_{L^\infty (\Omega)} + C \overline{\mathcal{O}}(t) \|w \|_{L^\infty(\Omega)} \\ & \text{with } \overline{\mathcal{O}}(t)= \int_\Omega \vartheta(t)^{\kappa+\alpha-2} |\nabla \vartheta(t)|^2 \, \mathrm{d} x + \int_\Omega |\nabla \vartheta(t)|^2 \, \mathrm{d} x \in L^1(0,T)\ \text{ by \eqref{additional-info} and \eqref{crucial-est3.2}.} \end{aligned} \] All in all, we infer that there exists a positive function $\mathcal{C} \in L^1(0,T)$ such that $ | \int_\Omega \partial_t \log(\vartheta(t)) w \, \mathrm{d} x | \leq \mathcal{C} (t) \|w\|_{W^{1,d+\epsilon}(\Omega)} $ for a.a.\ \color{black} $ t \in (0,T). $ Hence, \begin{equation}\label{est6} \|\partial_t\log(\vartheta)\|_{L^1(0,T; (W^{1,d+\epsilon}(\Omega)'))} \leq C. \end{equation} \paragraph{ \bf Eighth estimate [$ {\boldsymbol \kappa} {\boldsymbol \in} { \bf (1,5/3)}$ if ${\bf d=3}$ and $\boldsymbol{\kappa} {\boldsymbol \in} {\bf (1,2)}$ if ${ \bf d=2}$]: \color{black}} We multiply \eqref{e:teta} by a test function $w \in W^{1,\infty}(\Omega)$ (which e.g.\ holds if $w \in W^{2,d+\epsilon}(\Omega)$ for $\epsilon>0$) and find \[ \begin{aligned} \left| \int_\Omega \vartheta_t w \, \mathrm{d} x \right| \leq \left|\int_\Omega L w \, \mathrm{d} x \right| + \left| \int_\Omega \mathsf{K}(\vartheta) \nabla \vartheta \cdot \nabla w \, \mathrm{d} x \right| + \left|\int_{\partial\Omega} hw \, \mathrm{d} S\right| \doteq I_1+I_2+I_3, \end{aligned} \] where we have set $$ L= -c_t\vartheta - z_t \vartheta -\rho\vartheta \mathrm{div}(\mathbf{u}_t)+g+ a(c,z)\varepsilon(\mathbf{u}_t):\mathbb{V} \varepsilon(\mathbf{u}_t) +|c_t|^2 + |z_t|^2 + m(c,z)|\nabla \mu|^2. $$ Therefore, \[ |I_1| \leq \mathcal{L}(t) \|w\|_{L^\infty (\Omega)} \quad \text{with } \mathcal{L}(t):=\|L(t)\|_{L^1(\Omega)} \in L^1(0,T), \quad |I_3| \leq \| h(t) \|_{L^2(\partial \Omega)} \| w\|_{L^2(\partial \Omega)} \text{ with } h\in L^1(0,T) \] thanks to \eqref{heat-source}, \eqref{crucial-est3.2}, and \eqref{est5} for $I_1$, and \eqref{dato-h} for $I_3$. We estimate $I_2$ by proceeding exactly in the same way as for \cite[Sec.\ 3, \emph{Seventh estimate}]{RocRos14}. Namely, taking into account once again the growth condition \eqref{hyp-K} on $\mathsf{K}$, we find \begin{equation} \label{citata-dopo-ehsi} |I_2|\leq C\| \vartheta^{(\kappa-\alpha+2)/2} \|_{L^2(\Omega)} \|\vartheta^{(\kappa+\alpha-2)/2} \nabla \vartheta\|_{L^2(\Omega;\mathbb{R}^d)} \|\nabla w\|_{L^\infty (\Omega;\mathbb{R}^d)} + C \| \nabla \vartheta\|_{L^2(\Omega;\mathbb{R}^d)} \|\nabla w\|_{L^2 (\Omega;\mathbb{R}^d)}. \end{equation} Observe that, since $\kappa <\frac53$ if $d=3$, and $\kappa <2$ if $d=2$, and $\alpha$ can be chosen arbitrarily close to $1$, from estimate \eqref{estetainterp} we have that $\vartheta^{(\kappa-\alpha+2)/2}$ is bounded in $L^2(0,T; L^2(\Omega))$. Thus, also taking into account \eqref{crucial-est3.2}, we conclude that $|I_2|\leq C \mathcal{L}^*(t) \|\nabla w \|_{L^\infty (\Omega)} $ for some $\mathcal{L}^* \in L^1(0,T)$. Hence, \begin{equation} \label{bv-esti-temp} \|\vartheta_t\|_{L^1(0,T; W^{1,\infty}(\Omega)')} \leq C. \end{equation} \paragraph{\bf Ninth estimate:} We test \eqref{e:c} by $\Delta\mu$ and integrate in time. It follows \begin{align} \label{eqn:est9} \int_0^t\int_\Omega\dive\big(m(c,z)\nabla\mu\big)\Delta\mu\,\mathrm dx\,\mathrm ds =\int_0^t\int_\Omega c_t\Delta\mu\,\mathrm dx\,\mathrm ds. \color{black} \end{align} The left-hand side is estimated \color{black} below by exploiting Hypotheses (II) and the boundedness $\|c\|_{L^\infty(Q)}+\|z\|_{L^\infty(Q)}\leq C$, viz. \begin{align*} \int_0^t\int_\Omega\dive\big(m(c,z)\nabla\mu\big)\Delta\mu\,\mathrm dx\,\mathrm ds &\geq\int_0^t\int_\Omega\big(\nabla m(c,z)\cdot\nabla\mu\big)\Delta\mu\,\mathrm dx\,\mathrm ds +m_0\int_0^t\int_\Omega|\Delta\mu|^2\,\mathrm dx\,\mathrm ds\\ &\geq-C\int_0^t\int_\Omega(|\nabla c|+|\nabla z|)|\nabla\mu||\Delta\mu|\,\mathrm dx\,\mathrm ds +m_0\int_0^t\int_\Omega|\Delta\mu|^2\,\mathrm dx\,\mathrm ds. \end{align*} By using the interpolation inequality \eqref{interp2} and by using analogous calculations as in the \textit{Fourth estimate}, we find by Young's inequality \begin{align*} &\int_0^t\int_\Omega(|\nabla c|+|\nabla z|)|\nabla\mu||\Delta\mu|\,\mathrm dx\,\mathrm ds\\ &\qquad\leq C\int_0^t\big(\|\nabla c\|_{L^{d+\zeta}(\Omega;\mathbb{R}^d)}+\|\nabla z\|_{L^{d+\zeta}(\Omega;\mathbb{R}^d)}\big) \|\nabla\mu\|_{L^{d^*-\eta}(\Omega;\mathbb{R}^d)}\|\Delta\mu\|_{L^2(\Omega)}\,\mathrm ds\\ &\qquad\leq C\big(\|\nabla c\|_{L^\infty(0,T;L^{p}(\Omega;\mathbb{R}^d))}+\|\nabla z\|_{L^\infty(0,T;L^{p}(\Omega;\mathbb{R}^d))}\big)\int_0^t\|\nabla\mu\|_{L^{d^*-\eta}(\Omega;\mathbb{R}^d)}\|\Delta\mu\|_{L^2(\Omega)}\,\mathrm ds\\ &\qquad\leq C'\int_0^t\big(\varrho\|\nabla\mu\|_{H^{1}(\Omega;\mathbb{R}^d)}+C_\varrho\|\nabla\mu\|_{L^2(\Omega;\mathbb{R}^d)}\big)\|\Delta\mu\|_{L^2(\Omega;\mathbb{R}^d)}\,\mathrm ds\\ &\qquad\leq \varrho C'C_\delta\int_0^t\|\nabla\mu\|_{H^{1}(\Omega;\mathbb{R}^d)}^2\,\mathrm ds +C' C_\varrho C_\delta\int_0^t\|\nabla\mu\|_{L^{2}(\Omega;\mathbb{R}^d)}^2\,\mathrm ds +\delta C'\int_0^t\|\Delta\mu\|_{L^2(\Omega)}^2\,\mathrm ds. \end{align*} By choosing suitable $\delta>0$ and $\varrho>0$, we see that \begin{align*} &\int_0^t\int_\Omega\big(|\nabla c|+|\nabla z|\big)|\nabla\mu||\Delta\mu|\,\mathrm dx\,\mathrm ds \leq \epsilon\int_0^t\|\mu\|_{H^{2}(\Omega)}^2\,\mathrm ds +C_\epsilon\int_0^t\|\nabla\mu\|_{L^{2}(\Omega;\mathbb{R}^d)}^2\,\mathrm ds. \end{align*} All in all, we find from the above estimates \begin{align*} \int_0^t\|\Delta\mu\|_{L^2(\Omega)}^2 \,\mathrm ds \color{black} \leq \epsilon\int_0^t\|\mu\|_{H^2(\Omega)}^2\,\mathrm ds+C_\epsilon\int_0^t\|c_t\|_{L^2(\Omega)}^2 \,\mathrm ds \color{black} +C_\epsilon\int_0^t\|\nabla\mu\|_{L^{2}(\Omega;\mathbb{R}^d)}^2\,\mathrm ds, \end{align*} where the second and the third term on the right-hand side are bounded by \eqref{est5} for fixed $\epsilon>0$. By the $H^2$-elliptic regularity estimate for homogeneous Neumann problems, i.e. \begin{align*} \|\mu\|_{H^2(\Omega)}^2\leq C\big(\|\Delta \mu\|_{L^2(\Omega)}^2+\|\mu\|_{H^1(\Omega)}^2\big), \end{align*} we conclude by choosing $\epsilon>0$ sufficiently small and by using the boundedness of $\|\mu\|_{L^2(0,T;H^1(\Omega))}$ in \eqref{est5} that \begin{align} \|\mu\|_{L^2(0,T;H^2(\Omega))}\leq C. \end{align} \hfill $\square$ \section{\bf Time discretization and regularizations} \label{s:5} In this section we will introduce and motivate a \textit{thermodynamically consistent time-discretization scheme} for system \eqref{eqn:PDEsystem} and devote a large part of Sec. \ref{ss:5.2} to the proof that it admits solutions. Next, in Sec.\ \ref{ss:5.3} we will derive the energy and entropy inequalities fulfilled by the discrete solutions, and, starting from them, we will obtain a series of a priori estimates on the approximate solutions. \subsection{Setup of the time-discrete system} \label{ss:5.1} We consider an equidistant partition of $[0,T]$, with time-step $\tau>0$ and nodes \begin{align} t_\tau^k:=k\tau, \label{time-nodes} \end{align} $k=0,\ldots,K_\tau$, and we approximate the data $\mathbf{f}$, $g$, and $h$ by local means, i.e. setting for all $k=1,\ldots,K_{\tau}$ \begin{equation} \label{local-means} \ftau{k}:= \frac{1}{\tau}\int_{t_\tau^{k-1}}^{t_\tau^k} \mathbf{f}(s)\, \mathrm{d} s\,, \qquad \gtau{k}:= \frac{1}{\tau}\int_{t_\tau^{k-1}}^{t_\tau^k} g(s) \, \mathrm{d} s\,, \qquad \htau{k}:= \frac{1}{\tau}\int_{t_\tau^{k-1}}^{t_\tau^k} h(s) \, \mathrm{d} s\,. \end{equation} In what follows, for a given $K_\tau$-tuple $(v_{\tau}^k)_{k=1}^{K_\tau}$ the time-discrete derivative is denoted by \[ D_{\tau,k}(v) =\frac{v_{\tau}^k-v_{\tau}^{k-1}}{\tau} \quad \text{so that} \quad D_{\tau,k}(D_{\tau,k}(v)) = \frac{v_{\tau}^k-2v_{\tau}^{k-1} + v_{\tau}^{k-2}}{\tau^2}. \] Before stating the complete time-discrete scheme in Problem \ref{def:time-discrete}, we are going to introduce its main ingredients in what follows. \color{black} \paragraph{\bf Regularization of the coefficient functions depending on $\bf c$ \color{black}} In the following we will analyze a specially chosen time-discretization scheme for system \eqref{eqn:PDEsystem}. To ensure suitable coercivity properties in the time-discrete system needed for existence of solutions we utilize the following $\omega$-regularizations which will eventually vanish as $\omega\downarrow 0$: \begin{itemize} \item[--] First of all, we will replace the maximally monotone operator $\beta$ (the derivative of the convex part of the potential $\phi$ (see Hypothesis (I)) by its Yosida regularization $\beta_\omega\in \mathrm{C}^0(\mathbb{R})$ with Yosida index $\omega\in(0,\infty)$. This will be crucial to render rigorously the \emph{Fifth a priori estimate} on the time-discrete level, cf. the calculations in Sec.\ \ref{ss:5.4}. Observe that the Yosida approximation $\widehat{\beta}_\omega\in C^1(\mathbb{R})$ of $\widehat\beta$, fulfilling $\widehat{\beta}_\omega' = \beta_\omega$, is still convex, and that $\beta_\omega(0)=0$. \color{black} For notational consistency we set $\phi_\omega:=\widehat{\beta}_\omega+\gamma$. \item[--] Let $\{\mathcal R_\omega\}_{ \omega>0\color{black}}\subseteq C^2(\mathbb{R})\cap W^{2,\infty}(\mathbb{R})$ be a family of functions (we can think of ``smoothed truncations'') such that: \begin{align} \forall M>0\quad\exists \omega_0>0\qquad\forall \omega\in(0,\omega_0),\;c\in(-M,M): \qquad \mathcal R_\omega(c)=c. \label{Rtrunc} \end{align} They have the role to somehow provide for the information that $c$ is bounded, which is not supplied by the concentration potential $\phi$, defined on all of $\mathbb{R}$. In turn, this information is crucial in order to make some of the following calculations rigorous. The limit passage as $\omega \downarrow 0$ will be possible thanks to an a priori bound for $c$ in $L^\infty (Q)$, cf.\ Sec.\ \ref{s:6} ahead. \color{black} \par We define the following regularizations for the elastic energy density: \begin{align*} &W^\omega(c,\varepsilon,z):=W(\mathcal R_\omega(c),\varepsilon,z). \end{align*} and observe that for fixed $\omega>0$ and fixed $\varepsilon\in\mathbb{R}_{sym}^{d\times d}$ and $z\in\mathbb{R}$ (cf.\ also \eqref{later-ref}): \color{black} \begin{align} \label{eqn:WtauEst} |W^\omega(c,\varepsilon,z)|+|W_{,c}^\omega(c,\varepsilon,z)|+|W_{,cc}^\omega(c,\varepsilon,z)|\leq C \qquad\text{ uniformly in }c\in\mathbb{R}. \end{align} \end{itemize} Throughout \underline{this section} we neglect the subscript $\omega$ on the solutions $c$, $\mu$, $z$, $\vartheta$ and $\mathbf{u}$ for the sake of readability. \paragraph{\bf Convex-concave \color{black} splitting of the coefficient functions} Let us mention in advance how all the various nonlinear terms in \eqref{eqn:PDEsystem} will be coped with in the discrete system \eqref{PDE-discrete}, which is in fact carefully designed in such a way as to ensure the validity of the \emph{discrete total energy inequality}, cf.\ the forthcoming Lemma \ref{l:energy-est}. To this aim, it will be crucial to employ the \emph{convex-concave splitting} of the functions $c\mapsto W^\omega(c,\varepsilon,z)$, $z \mapsto W^\omega(c,\varepsilon,z)$, $z\mapsto\sigma(z)$, as well as the specific splitting \eqref{specific-splitting-phi} below (cf.\ also \eqref{decomposition}) for $\phi_\omega$. Recall that, a convex-concave decomposition of some real-valued $\mathrm{C}^2(I)$-function $\psi$ with bounded second derivative on an interval $I$ may be canonically given by $\psi = \conv{\psi} + \conc{\psi}$, with \begin{align} \label{eqn:splitting} &\conv{\psi}(x):=\psi(x)+\frac12\Big(\max_{y\in I}|\psi''(y)|\Big)x^2, &&\conc{\psi}(x):=-\frac12\Big(\max_{y\in I}|\psi''(y)|\Big)x^2. \end{align} Therefore, we will proceed as follows: \begin{itemize} \item[--] The nonlinear contribution $\sigma'(z)$ in \eqref{e:z} will be discretized via the convex-concave splitting \eqref{eqn:splitting} on $I=[0,1]$: \begin{align*} \sigma'(z)\;\text{ via }\;(\conv{\sigma})'(z_\tau^k)+(\conc{\sigma})'(z_\tau^{k-1}). \end{align*} \item[--] For the time-discrete version of the term $\pd{c}(c,\varepsilon(\mathbf u),z)$ in \eqref{e:mu} and $\pd{z}(c,\varepsilon(\mathbf u),z)$ in \eqref{e:z} we will resort to partial convex-concave splittings of $ W^\omega$. To denote them, we will use the symbols \eqref{eqn:splitting}, combined with subscripts \color{black} to denote the variable with respect to which the splitting is computed. Therefore, we set \begin{subequations} \label{eqn:convConcSplittingWc} \begin{align} &{\breve{W}_{1}^\omega}(c,\varepsilon,z):= W^\omega(c,\varepsilon(\mathbf u),z)+\frac12\Big ( \sup_{\widetilde c \in \mathbb{R}}| W_{,cc}^\omega(\widetilde c,\varepsilon,z)|\Big)c^2,\\ &{\invbreve{W}_{1}^\omega}(c,\varepsilon,z):=-\frac12\Big(\sup_{\widetilde c \in \mathbb{R}}| W_{,cc}^\omega(\widetilde c,\varepsilon,z)|\Big)c^2,\\ &{\breve{W}_{3}^\omega}(c,\varepsilon,z):=W^\omega(c,\varepsilon(\mathbf u),z)+\frac12\Big ( \sup_{\widetilde z \in [0,1]}| W_{, zz\color{black}}^\omega(c,\varepsilon,\widetilde z)|\Big)z^2,\\ &{\invbreve{W}_{3}^\omega}(c,\varepsilon,z):=-\frac12\Big(\sup_{\widetilde z \in [0,1]}|W_{, zz}\color{black}^\omega(c,\varepsilon,\widetilde z)|\Big)z^2. \end{align} \end{subequations} Note that these functions are well-defined for fixed $\omega>0$ due to \eqref{eqn:WtauEst}. The splitting of $ W^\omega$ with respect to $\varepsilon(\mathbf u)$ is not needed due to the convexity of $ W^\omega$ with respect to $\varepsilon(\mathbf u)$ by the structural assumption \eqref{eqn:assumptionW} and the non-negativity of $b$ in Hypothesis (V). We easily see that $$ W^\omega={\breve{W}_{1}^\omega}+{\invbreve{W}_{1}^\omega}={\breve{W}_{3}^\omega}+{\invbreve{W}_{3}^\omega} $$ and that \begin{align*} &&&&&{\breve{W}_{1}^\omega}(\cdot,\varepsilon,z)\text{ is convex on $\mathbb{R} $}, &&{\invbreve{W}_{1}^\omega}(\cdot,\varepsilon,z)\text{ is concave on } \mathbb{R} \hspace*{2.2em}\bigg\}\text{ for all fixed }\varepsilon,z,\\ &&&&& W^\omega(c,\cdot,z)\text{ is convex on $\mathbb{R}_{sym}^{n\times n}$} &&\hspace*{14.0em}\bigg\}\text{ for all fixed }c,z,\\ &&&&&{\breve{W}_{3}^\omega}(c,\varepsilon,\cdot)\text{ is convex on $[0,1]$}, &&\hspace*{0.07em}{\invbreve{W}_{3}^\omega}(c,\varepsilon,\cdot)\text{ is concave on }[0,1]\quad\bigg\}\text{ for all fixed }c,\varepsilon. \end{align*} We will replace the terms $W_{,c}$, $W_{,\varepsilon}$, and $W_{,z}$ in system \eqref{eqn:PDEsystem} by their \color{black} time-discretized and regularized versions: \color{black} \begin{align*} &&&&&\pd{c}(c,\varepsilon(\mathbf u),z)&&\text{ via }&&\breve{W}_{1,c}^\omega(c_\tau^{k},\varepsilon(\ub_\tau^{k-1}),z_\tau^{k-1})+\invbreve{W}_{1,c}^\omega(c_\tau^{k-1},\varepsilon(\ub_\tau^{k-1}),z_\tau^{k-1}),&&&&&&\\ &&&&&\pd{\varepsilon}(c,\cdot,z)&&\text{ via }&&\pd{\varepsilon}^\omega(c_\tau^{k},\varepsilon(\ub_\tau^k),z_\tau^k),\\ &&&&&\pd{z}(c,\varepsilon(\mathbf u),z)&&\text{ via }&&\breve{W}_{3,z}^\omega(c_\tau^{k},\varepsilon(\ub_\tau^{k-1}),z_\tau^k)+\invbreve{W}_{3,z}^\omega(c_\tau^{k},\varepsilon(\ub_\tau^{k-1}),z_\tau^{k-1}). \end{align*} By exploiting convexity and concavity estimates this time-discretization scheme leads to the crucial estimate \begin{align} \begin{aligned} &\Big(\breve{W}_{1,c}^\omega(c_\tau^{k},\varepsilon(\ub_\tau^{k-1}),z_\tau^{k-1})+\invbreve{W}_{1,c}^\omega(c_\tau^{k-1},\varepsilon(\ub_\tau^{k-1}),z_\tau^{k-1})\Big)(c_\tau^{k}-c_\tau^{k-1})\\ &+\pd{\varepsilon}^\omega(c_\tau^{k},\varepsilon(\ub_\tau^k),z_\tau^k):\varepsilon(\ub_\tau^k-\ub_\tau^{k-1})\\ &+\Big(\breve{W}_{3,z}^\omega(c_\tau^{k},\varepsilon(\ub_\tau^{k-1}),z_\tau^k)+\invbreve{W}_{3,z}^\omega(c_\tau^{k},\varepsilon(\ub_\tau^{k-1}),z_\tau^{k-1})\Big)(z_\tau^k-z_\tau^{k-1})\\ &\qquad\geq W^\omega(c_\tau^{k},\varepsilon(\ub_\tau^{k-1}),z_\tau^{k-1})- W^\omega(c_\tau^{k-1},\varepsilon(\ub_\tau^{k-1}),z_\tau^{k-1})\\ &\qquad\quad+ W^\omega(c_\tau^{k},\varepsilon(\ub_\tau^k),z_\tau^k)- W^\omega(c_\tau^{k},\varepsilon(\ub_\tau^{k-1}),z_\tau^k)\\ &\qquad\quad+ W^\omega(c_\tau^{k},\varepsilon(\ub_\tau^{k-1}),z_\tau^k)- W^\omega(c_\tau^{k},\varepsilon(\ub_\tau^{k-1}),z_\tau^{k-1})\\ &\qquad\geq W^\omega(c_\tau^{k},\varepsilon(\ub_\tau^k),z_\tau^k)- W^\omega(c_\tau^{k-1},\varepsilon(\ub_\tau^{k-1}),z_\tau^{k-1}), \end{aligned} \label{eqn:convConcWall} \end{align} which will be used later in the proof of the discrete total energy inequality. \item[--] We will discretize the (formally written) term $\phi'(c) = \beta(c) +\gamma'(c)$ in \eqref{e:mu} in the following way: As mentioned above the maximally monotone operator $\beta$ is replaced by its Yosida regularization $\beta_\omega\in \mathrm{C}^0(\mathbb{R})$. Hence, in view of the $\lambda_\gamma$-convexity of $\gamma$ (cf. Remark \ref{rmk:l-convex-splitting}), the functions \color{black} \begin{equation} \label{specific-splitting-phi} \conv{\phi}_\omega(c): = \widehat{\beta}_\omega(c) + \lambda_\gamma \frac{c^2}2 \quad \text{and} \quad \conc{\phi}(c):=\gamma(c) - \lambda_\gamma \frac{c^2}2 \end{equation} provide a convex-concave decomposition of $\phi_\omega:= \widehat{\beta}_\omega + \gamma$. Thus, we will approximate \begin{align*} \phi'(c)\;\text{ via }\;(\conv{\phi}_\omega)'(c_\tau^{k})+(\conc{\phi})'(c_\tau^{k-1}) \qquad \text{with } \conv{\phi}_\omega,\, \conc{\phi} \text{ given by \eqref{specific-splitting-phi}.} \end{align*} \end{itemize} \paragraph{\bf Statement of the time-discrete problem and existence result} In the following we are going to describe the time-discrete problem formally. Later on the precise spaces and a weak notion of solution \color{black} will be fixed. The time-discrete problem (formally) reads as follows: \begin{problem} \upshape \label{def:time-discrete} Let $\omega>0$ and $\tau>0$ be given. Find functions $\{(c_\tau^{k}, \mu_\tau^{k},z_\tau^k,\vartheta_\tau^k)\}_{k=0}^{K_\tau}$ and $\{\ub_\tau^k\}_{k=-1}^{K_\tau}$ which satisfy for all $k\in\{1,\ldots,K_\tau\}$ the following time-discrete version of \eqref{eqn:PDEsystem}: \begin{subequations} \label{PDE-discrete} \begin{itemize} \item[(i)] Cahn-Hilliard system: \begin{align} \label{eqn:discr1} D_{\tau,k}(c)={}&\dive\big(m(c_\tau^{k-1},z_\tau^{k-1})\nabla\mu_\tau^{k}\big),\\ \notag \mu_\tau^{k}={}&-\Delta_p(c_\tau^{k})+(\conv{\phi}_\omega)'(c_\tau^{k}) +(\conc{\phi})'(c_\tau^{k-1}) +\breve{W}_{1,c}^\omega(c_\tau^{k},\varepsilon(\ub_\tau^{k-1}),z_\tau^{k-1})\\ &+\invbreve{W}_{1,c}^\omega(c_\tau^{k-1},\varepsilon(\ub_\tau^{k-1}),z_\tau^{k-1})-\vartheta_\tau^k+D_{\tau,k}(c), \label{eqn:discr2} \end{align} \item[(ii)] damage equation: \begin{equation} \begin{aligned} \label{eqn:discr3} &D_{\tau,k}(z)-\Delta_p(z_\tau^k)+\ell_\tau^{k} +\zeta_\tau^{k} + (\conv{\sigma})'(z_\tau^k)+ (\conc{\sigma})'(z_\tau^{k-1}) \\ & =-\breve{W}_{3,z}^\omega(c_\tau^{k},\varepsilon(\ub_\tau^{k-1}),z_\tau^k) - \invbreve{W}_{3,z}^\omega(c_\tau^{k},\varepsilon(\ub_\tau^{k-1}),z_\tau^{k-1})+\vartheta_\tau^k \end{aligned} \end{equation} with \begin{align*} &\ell_\tau^{k}\in \partial I_{[0,\infty)}\big(z_\tau^k\big),\qquad \zeta_\tau^{k}\in \partial I_{(-\infty,0]}\big(D_{\tau,k}(z)\big), \end{align*} \item[(iii)] temperature equation: \begin{equation} \label{eqn:discr4} \begin{aligned} &D_{\tau,k}(\vartheta) - \mathrm{div}(\mathsf{K}(\vartheta_\tau^k)\nabla \vartheta_\tau^k) \color{black} +D_{\tau,k}(c)\vartheta_\tau^k+D_{\tau,k}(z)\vartheta_\tau^k+\rho\vartheta_\tau^k\dive(D_{\tau,k}(\mathbf u))\\ &=g_\tau^k+|D_{\tau,k}(c)|^2+|D_{\tau,k}(z)|^2+m(c_\tau^{k-1},z_\tau^{k-1})|\nabla\mu_\tau^{k}|^2 \\ & \qquad \qquad \qquad + a(c_\tau^{k-1},z_\tau^{k-1})\varepsilon(D_{\tau,k}(\mathbf u)):\mathbb{V}\varepsilon(D_{\tau,k}(\mathbf u)), \end{aligned} \end{equation} \item[(iv)] balance of forces: \begin{align} &D_{\tau,k}(D_{\tau,k}(\mathbf u)) -\dive\Big( a(c_\tau^{k-1},z_\tau^{k-1})\mathbb{V}\varepsilon(D_{\tau,k}(\mathbf u)) + W_{,\varepsilon}^\omega(c_\tau^{k},\varepsilon(\ub_\tau^k),z_\tau^k) -\rho\vartheta_\tau^k\mathds 1\Big)=\bold f_\tau^k, \label{eqn:discr5} \end{align} \end{itemize} \end{subequations} supplemented with the initial data \begin{align} &\hspace*{9.3em}\left. \begin{matrix} c_\tau^0=c^0,\qquad& z_\tau^0=z^0,\qquad& \vartheta_\tau^0=\vartheta^0,\qquad\\ \mathbf u_\tau^0=\mathbf u^0,\qquad& \mathbf u_\tau^{-1}=\mathbf u^0-\tau\mathbf v^0\qquad \end{matrix} \right\} &&\text{a.e. in }\Omega\label{discre-initial-cond} \end{align} and the boundary data \begin{align} &\left. \begin{matrix} \nablac_\tau^{k}\cdot { \bf n \color{black}}=0,\qquad& m(c_\tau^{k-1},z_\tau^{k-1})\nabla\mu_\tau^{k}\cdot { \bf n \color{black}}=0,\qquad& \nabla z_\tau^k\cdot { \bf n \color{black}}=0,\qquad\\ \mathsf{K}(\vartheta_\tau^k)\nabla\vartheta_\tau^k\cdot{ \bf n \color{black}}=h_\tau^k,\qquad& \ub_\tau^k=\mathbf{d}_\tau^k \end{matrix} \right\} &&\text{a.e. on }\partial\Omega.\label{discre-boundary-cond} \end{align} \end{problem} \begin{remark} \label{remark:discProbl} \upshape A few comments on Problem \ref{def:time-discrete} are in order: \color{black} \begin{itemize} \item[(i)] It will turn out that a solution of the time-discrete problem always satisfies the constraints: \begin{align} \label{discre-constraints} &&&z_\tau^k\in[0,1],&&D_{\tau,k}(z)\leq 0, &&\vartheta_\tau^k\geq\underline\vartheta\quad(\text{for some }\underline\vartheta>0) &&\text{a.e. in }\Omega \end{align} as long as the initial data satisfy \eqref{h:initial}. \item[(ii)] Observe that the scheme is fully implicit and, in particular, the discrete temperature equation \eqref{eqn:discr4} is coupled with \eqref{eqn:discr2}, \eqref{eqn:discr3}, and \eqref{eqn:discr5} via the implicit term $\vartheta_\tau^k$ featuring in $D_{\tau,k}(c)\vartheta_\tau^k$, $D_{\tau,k}(z)\vartheta_\tau^k$, and $ \rho \color{black} \, \vartheta_\tau^k\dive(D_{\tau,k}(\mathbf u))$. Indeed, having $\vartheta_\tau^k$ implicit in these terms is crucial for the argument we will develop later on for proving the positivity of $\vartheta_\tau^k$, cf.\ the proof of Lemma \ref{l:positivityThetaDiscr}. \color{black} \item[(iii)] The subgradients $\ell_\tau^{k}$ and $\zeta_\tau^{k}$ account for non-negativity as well as irreversibility constraints for $z$. In the pointwise formulation we obtain by the sum rule for $z_\tau^{k-1} \not= 0$ and by direct calculations for $z_\tau^{k-1}=0$ \color{black} $$ \partial I_{[0,\infty)}\big(z_\tau^k\big) +\partial I_{(-\infty,0]}\big(D_{\tau,k}(z)\big) = \partial I_{[0,\infty)}\big(z_\tau^k\big) +\partial I_{(-\infty,z_\tau^{k-1}]}\big(z_\tau^k\big) =\partial I_{[0,z_\tau^{k-1}]}(z_\tau^k) $$ and, consequently, the double inclusion in (ii) may be replaced by the single inclusion \begin{align*} &D_{\tau,k}(z)-\Delta_p(z_\tau^k)+\xi_\tau^{k} + (\conv{\sigma})'(z_\tau^k)+ (\conc{\sigma})'(z_\tau^{k-1}) \\ & =-\breve{W}_{3,z}^\omega(c_\tau^{k},\varepsilon(\ub_\tau^{k-1}),z_\tau^k) - \invbreve{W}_{3,z}^\omega(c_\tau^{k},\varepsilon(\ub_\tau^{k-1}),z_\tau^{k-1})+\vartheta_\tau^k \end{align*} with \begin{align*} &\xi_\tau^{k}\in \partial I_{[0,z_\tau^{k-1}]}(z_\tau^k). \end{align*} \item[(iv)] By assuming the additional growth assumptions \begin{align*} &\sigma(0)\leq \sigma(z),\qquad b(c,0)\leq b(c,z)\text{ for all }c\in\mathbb{R},z\in\mathbb{R} \text{ with }z<0, \end{align*} it is possible the prove a maximum principle for equation \eqref{eqn:discr3} which ensures $z_\tau^k\geq 0$ as long as $z^0\geq 0$. In this case the subdifferential term $ \partial I_{[0,\infty)}(z_\tau^k) $ \color{black} in equation \eqref{eqn:discr3} may be dropped. For details we refer to \cite[Proposition 5.5]{KRZ}. \end{itemize} \end{remark} We can now state our existence result for Problem \ref{def:time-discrete}, where we also fix the concept of weak solution to system \eqref{PDE-discrete}. With this aim, let us also \color{black} introduce the nonlinear operator $\mathcal A^k:X\to H^1(\Omega)'$, with \begin{align} & X:=\Big\{\theta\in H^1(\Omega)\;:\;\int_\Omega\mathsf{K}(\theta)\nabla\theta\cdot\nabla v\,\mathrm dx \text{ is well-defined for all }v\in H^1(\Omega)\Big\}, \notag \\ & \big\langle\mathcal A^k(\theta),v\big\rangle_{H^1}:=\int_\Omega \mathsf{K}(\theta)\nabla\theta\cdot\nabla v\,\mathrm dx -\int_{\partial\Omega}h_\tau^k v\,\mathrm dx. \label{A-operator} \end{align} \begin{proposition} \label{prop:exist-discr} Assume \textbf{Hypotheses (I)--(V)}, as well as \eqref{hyp:data} on $(\mathbf{f},g,h)$ and \eqref{h:initial} on $(c^0,z^0,\vartheta^0,\mathbf{u}^0,\mathbf{v}^0)$. \color{black} Then, for every $\omega>0$ and $\tau>0$ Problem \ref{def:time-discrete} admits a weak solution \begin{align} \label{eqn:regDiscSol} \{(c_\tau^{k}, \mu_\tau^{k},z_\tau^k,\vartheta_\tau^k,\ub_\tau^k)\}_{k=1}^{K_\tau}\subseteq W^{1,p}(\Omega)\times H_N^2(\Omega)\times W^{1,p}(\Omega)\times H^1(\Omega)\times H^2(\Omega;\mathbb{R}^d)\color{black} \end{align} in the following sense: \begin{itemize} \item[--] \eqref{eqn:discr1} and \eqref{eqn:discr5} are fulfilled a.e. in $\Omega$, with the boundary conditions $ \nablac_\tau^{k}\cdot n=0$ and $ \ub_\tau^k=\mathbf{d}_\tau^k$ a.e.\ in $\partial\Omega$, \color{black} \item[--] \eqref{eqn:discr2} is fulfilled in $W^{1,p}(\Omega)'$, \item[--] \eqref{eqn:discr4} is fulfilled in $H^1(\Omega)'$, in the form \[ \begin{aligned} &D_{\tau,k}(\vartheta)+\mathcal A^k(\vartheta_\tau^k)+D_{\tau,k}(c)\vartheta_\tau^k+D_{\tau,k}(z)\vartheta_\tau^k+\rho\vartheta_\tau^k\dive(D_{\tau,k}(\mathbf u))\\ &=g_\tau^k+|D_{\tau,k}(c)|^2+|D_{\tau,k}(z)|^2+m(c_\tau^{k-1},z_\tau^{k-1})|\nabla\mu_\tau^{k}|^2 \\ & \qquad \qquad \qquad + a(c_\tau^{k-1},z_\tau^{k-1})\varepsilon(D_{\tau,k}(\mathbf u)):\mathbb{V}\varepsilon(D_{\tau,k}(\mathbf u)), \end{aligned} \] \color{black} \item[--] \eqref{eqn:discr3} is reformulated as (cf. Remark \ref{remark:discProbl} (ii)) \begin{equation} \begin{aligned} &D_{\tau,k}(z)-\Delta_p(z_\tau^k)+\xi_\tau^{k} + (\conv{\sigma})'(z_\tau^k)+ (\conc{\sigma})'(z_\tau^{k-1})\\ \label{eqn:discr3b} &=-\breve{W}_{3,z}^\omega(c_\tau^{k},\varepsilon(\ub_\tau^{k-1}),z_\tau^k) - \invbreve{W}_{3,z}^\omega(c_\tau^{k},\varepsilon(\ub_\tau^{k-1}),z_\tau^{k-1})+\vartheta_\tau^k \end{aligned} \end{equation} and fullfilled in $W^{1,p}(\Omega)'$ with $\xi_\tau^{k}\in \partial I_{Z_\tau^{k-1}}(z_\tau^k)$ where \begin{equation} \label{eqn:set_z} Z_\tau^{k-1}:=\{z\in W^{1,p}(\Omega)\,|\,0\leq z\leq z_\tau^{k-1}\},\color{black} \end{equation} \item[--] the initial conditions \eqref{discre-initial-cond} and the boundary conditions \eqref{discre-boundary-cond} \color{black} are satisfied, \item[--] the constraints \eqref{discre-constraints} are satisfied. \end{itemize} \end{proposition} We will prove Proposition \ref{prop:exist-discr} in the ensuing section by performing a double passage to the limit in a carefully devised approximation of system \eqref{PDE-discrete}, depending on two additional parameters $\nu$ and $\varrho$. \subsection{Proof of Proposition \ref{prop:exist-discr}} \label{ss:5.2} \noindent We will split the proof of Prop.\ \ref{prop:exist-discr} in several steps and obtain a series of intermediate results. Our argument is based on a double approximation procedure and two consecutive limit passages. More precisely, we approximate system \eqref{PDE-discrete} by \begin{enumerate} \item adding the higher order terms \begin{align*} &&&&&&&+\nu\dive\big(|\nabla \mu_\tau^{k}|^{\varrho-2}\nabla\mu_\tau^{k}\big)-\nu\mu_\tau^{k} &&\text{to the right-hand sides of the discrete Cahn-Hilliard equation \eqref{eqn:discr1}},\\ &&&&&&&+\nu|c_\tau^{k}|^{\varrho-2}c_\tau^{k} &&\text{to the right-hand sides of the discrete Cahn-Hilliard equation \eqref{eqn:discr2}},\\ &&&&&&&+\nu|z_\tau^k|^{\varrho-2}c_\tau^{k} &&\text{to the left-hand sides of the discrete damage equation \eqref{eqn:discr3}},\\ &&&&&&&-\nu\dive\big(|\varepsilon(\ub_\tau^k-\mathbf{d}_\tau^k)|^{\varrho-2}\varepsilon(\ub_\tau^k-\mathbf{d}_\tau^k) \big) \color{black} &&\text{to the left-hand side of the discrete momentum equation \eqref{eqn:discr5}} \end{align*} with $\nu>0$ and $\varrho>4$. In this way, the quadratic growth of the terms on the right-hand side of the temperature equation will be compensated and coercivity properties of the elliptic operators involved in the time-discrete scheme \color{black} ensured. \item Truncating the heat conduction function $\mathsf{K}$ and replacing it with a bounded $\mathsf{K}_M$ with $M\in\mathbb{N}$. In this way the elliptic operator in the discrete heat equation will be defined on $H^1(\Omega)$, with values in $H^1(\Omega)'$, but we will of course loose \color{black} the enhanced estimates on the temperature variable provided by the coercivity properties of $\mathsf{K}$. That is why, we will have to accordingly \color{black} truncate all occurrences of $\vartheta$ in the quadratic terms. \end{enumerate} Let us mention in advance that this double approximation, leading to system \eqref{discr-syst-appr} later on, shall be devised in such a way as to allow us to prove the existence of solutions to \eqref{discr-syst-appr}, by resorting to a result from the theory of elliptic systems featuring pseudomonotone operators, cf.\ \cite{Rou05}. \color{black} \textbf{A caveat on notation:} the solutions to the approximate discrete system \eqref{discr-syst-appr} at the $k$-th time step, with \underline{given} $S_{\tau}^{k-1}:=(c_{\tau}^{k-1}, z_{\tau}^{k-1}, \mathbf{u}_{\tau}^{k-1}, \vartheta_{\tau}^{k-1})$ and $\mathbf{u}_\tau^{k-2}$ , will depend on the parameters $\tau$, $\nu$ and $M$ (and on $\omega$ which we omit at the moment). Therefore, we should denote them by $ S_{\tau,\nu,M}^k:= (c_{\tau,\nu,M}^k,\mu_{\tau,\nu,M}^k, z_{\tau,\nu,M}^k, \vartheta_{\tau,\nu,M}^k, \mathbf u_{\tau,\nu,M}^k)$. However, to increase readability, we will simply write $c^k$, $\mu^k$, $z^k$, $\vartheta^k$ and $\mathbf u^k$ and use the notation $c^k_M, \ldots, \mathbf u_M^k$ ($c^k_\nu, \ldots, \mathbf u_\nu^k$, respectively), only upon addressing the limit passage as $M\to\infty$ (as $\nu \downarrow 0$, respectively). \paragraph{\bf Outline of the proof of Proposition \ref{prop:exist-discr}:} \color{black} For given $\tau>0$, the construction of the solution quintuples \color{black} $S_{\tau,\nu,M}^k$ and the limit passages as $M\to\infty$ and as $\nu \downarrow 0$ \color{black} are performed recursively over $k=1,\ldots,K_\tau$ in the following order: \begin{align*} &\qquad\vdots &&\qquad\vdots &&\quad\vdots &&\qquad\vdots &&\quad\vdots &&\quad\vdots &&\quad\vdots\\ &(S_{\tau}^{k-2},\mathbf u_{\tau}^{k-3}) &&\xmapsto[\text{Step 1}]{\text{pseudo-mon. op. theory}} &&S_{\tau,\nu,M}^{k-1} &&\xrightarrow[\text{Step 2}]{\;M\to\infty\;} &&S_{\tau,\nu}^{k-1} &&\xrightarrow[\text{Step 3}]{\;\nu\downarrow0\;} &&S_{\tau}^{k-1}\\ &(S_{\tau}^{k-1},\mathbf u_{\tau}^{k-2}) &&\xmapsto[\text{Step 1}]{\text{pseudo-mon. op. theory}} &&S_{\tau,\nu,M}^{k} &&\xrightarrow[\text{Step 2}]{\;M\to\infty\;} &&S_{\tau,\nu}^{k} &&\xrightarrow[\text{Step 3}]{\;\nu\downarrow0\;} &&S_{\tau}^{k}\\ &(S_{\tau}^{k},\mathbf u_{\tau}^{k-1}) &&\xmapsto[\text{Step 1}]{\text{pseudo-mon. op. theory}} &&S_{\tau,\nu,M}^{k+1} &&\xrightarrow[\text{Step 2}]{\;M\to\infty\;} &&S_{\tau,\nu}^{k+1} &&\xrightarrow[\text{Step 3}]{\;\nu\downarrow0\;} &&S_{\tau}^{k+1}\\ &\qquad\vdots &&\qquad\vdots &&\quad\vdots &&\qquad\vdots &&\quad\vdots &&\quad\vdots &&\quad\vdots \end{align*} The construction of $S_{\tau,\nu,M}^{k}$ will be tackled in Subsection \ref{sss:4.2.1}, the limit passage as $M\to\infty$ to $S_{\tau,\nu}^k$ in Subsection \ref{sss:4.2.2.}, and the one as $\nu \downarrow 0$ to $S_\tau^k$ in Subsection \ref{sss:4.2.3.}. Throughout all of them, we will work under the assumptions of Proposition \ref{prop:exist-discr}, and omit to explicitly invoke them in the following statements. \color{black} \subsubsection{\textbf{Step 1: Existence and uniform estimates of the time-discrete system with ${\boldsymbol \nu}$- and ${\bf M}$-regularization.}} \label{sss:4.2.1} \noindent From now on let $\nu>0$, $\varrho>4$ and $M\in\mathbb{N}$. Let \begin{equation} \label{def-k-m} \mathsf{K}_M(r):= \left\{ \begin{array}{ll} \mathsf{K}(0) & \text{if } r <0, \\ \mathsf{K}(r) & \text{if } 0\leq r \leq M, \\ \mathsf{K}(M) & \text{if } r >M \end{array} \right. \end{equation} and accordingly we \color{black} introduce the quasilinear operator $\mathcal{A}_M^k$ in analogy to \eqref{A-operator}\color{black}: \begin{equation} \label{M-operator} \mathcal{A}_M^k: H^1(\Omega) \to H^1(\Omega)' \ \text{ defined by } \pairing{}{H^1(\Omega)}{\mathcal{A}_M^k(\theta)}{v}:= \int_\Omega \mathsf{K}_M(\theta) \nabla \theta \cdot \nabla v \, \mathrm{d} x - \int_{\partial \Omega} \htau{k}v \, \mathrm{d} S \end{equation} Observe that, thanks to \eqref{hyp-K} there still holds $\mathsf{K}_M(r) \geq c_{0} $ for all $r \in \mathbb{R}$, and therefore by the trace theorem \begin{equation} \label{ellipticity-retained} \pairing{}{H^1(\Omega)}{ \mathcal{A}_M^{k} (\theta)}{\theta} \geq \tilde{c}_0 \|\nabla \theta\|_{L^2(\Omega)}^2-c_1\|\theta\|_{L^2(\Omega)}^2 -c_1\|h_\tau^k\|_{L^2(\partial\Omega)}^2 \qquad \text{for all } \theta \in H^1(\Omega). \end{equation} We also introduce the truncation operator $\mathcal{T}_M : \mathbb{R} \to \mathbb{R}$ \begin{equation} \label{def-truncation-m} \mathcal{T}_M(r):= \left\{ \begin{array}{ll} 0 & \text{if } r <0,\\ r & \text{if } 0\leq r \leq M, \\ M & \text{if } r >M. \end{array} \right. \end{equation} The $(\nu,M)$-regularized time-discrete system at time step $k$ reads as follows: \begin{subequations} \label{discr-syst-appr} \begin{align} &D_k(c)=\dive\Big(m(c^{k-1},z^{k-1})\nabla\mu^k\Big)+\nu\dive\Big(|\nabla\mu^k|^{\varrho-2}\nabla\mu^k\Big)-\nu\mu^k, \label{discr-syst-appr-c}\\ &\mu^k=-\Delta_p(c^k)+(\conv{\phi}_\omega)'(c^k)+(\conc{\phi})'(c^{k-1})+\breve{W}_{1,c}^\omega(c^k,\varepsilon(\mathbf u^{k-1}), z^{k-1})+\invbreve{W}_{1,c}^\omega(c^{k-1},\varepsilon(\mathbf u^{k-1}), z^{k-1})\notag\\ &\qquad -\mathcal T_M(\vartheta^k)+D_k(c)+\nu|c^k|^{\varrho-2}c^k, \label{discr-syst-appr-mu}\\ &D_k(z)-\Delta_p(z^k)+\xi^k+(\conv{\sigma})'(z^k) + (\conc{\sigma})'(z^{k-1})+\nu|z^k|^{\varrho-2}z^k\notag\\ &\quad=-\breve{W}_{3,z}^\omega( c^{k},\varepsilon(\mathbf u^{k-1}),z^k)-\invbreve{W}_{3,z}^\omega( c^{k},\varepsilon(\mathbf u^{k-1}),z^{k-1})+\mathcal T_M(\vartheta^k) \quad\text{with }\xi^k\in \partial I_{[0,z^{k-1}]}(z^k), \label{discr-syst-appr-z}\\ &D_k(\vartheta) + \mathcal{A}_M^k(\vartheta^k)+D_k(c)\mathcal T_M(\vartheta^k)+D_k(z)\mathcal T_M(\vartheta^k)+\rho\mathcal T_M(\vartheta^k)\dive(D_k(\mathbf u))\notag\\ &\quad=g^k+|D_k(c)|^2+|D_k(z)|^2+ a(c^{k-1},z^{k-1})\varepsilon(D_k(\mathbf u)):\mathbb{V}\varepsilon(D_k(\mathbf u)) +m(c^{k-1},z^{k-1})|\nabla\mu^k|^2, \label{discr-syst-appr-teta}\\ &D_k(D_k(\mathbf u))-\dive\Big(a(c^{k-1},z^{k-1})\mathbb{V}\varepsilon(D_k(\mathbf u)) + W_{,\varepsilon}^\omega(c^{k},\varepsilon(\mathbf u^k),z^k) \color{black}-\rho\mathcal T_M(\vartheta^k)\mathds 1 \Big)\notag\\ &\qquad-\nu\dive\Big(|\varepsilon(\mathbf u^k-\mathbf{d}^k)|^{\varrho-2}\varepsilon(\mathbf u^k-\mathbf{d}^k)\Big)=\bold f^k, \label{discr-syst-appr-u} \end{align} \end{subequations} supplemented with the previously given boundary conditions. \color{black} Please note that the functions $c^{k}$, $\mu^k,z^k$, $\vartheta^k$ and $\mathbf u^k$ depend on $M$, $\nu$, $\tau$ and $\omega$ whereas the functions from the previous time steps $c^{k-1}$, $\mu^{k-1},z^{k-1}$, $\vartheta^{k-1}$, $\mathbf u^{k-1}$ and $\mathbf u^{k-2}$ only depend on $\tau$ and $\omega$ and do \textbf{not} depend on $M$ and $\nu$. We are now in the position to prove existence of weak solutions for system \eqref{discr-syst-appr} by resorting to an existence result for pseudomonotone operators from \cite{Rou05}, which is in turn based on a fixed point argument. \color{black} \begin{lemma}[Existence of the time-discrete system for $\nu>0$ and $M\in\mathbb{N}$] \label{l:exist-approx-discr} Let $\omega>0$, $\tau>0$, $k\in\{1,\ldots,K_\tau\}$, $\nu>0$ and $M\in\mathbb{N}$ be given. We assume that \begin{align*} (c^{k-1},\mu^{k-1},z^{k-1},\vartheta^{k-1},\mathbf u^{k-1},\mathbf u^{k-2})\in W^{1,p}(\Omega)\times H^{2}(\Omega)\times W^{1,p}(\Omega)\times H^1(\Omega)\times H^2(\Omega;\mathbb{R}^d)\times H^2(\Omega;\mathbb{R}^d). \end{align*} Then\, there exists a weak solution \begin{align*} (c^k, \mu^k,z^k,\vartheta^k,\mathbf u^k)\in W^{1,p}(\Omega)\times W^{1,\varrho}(\Omega)\times W^{1,p}(\Omega)\times H^1(\Omega)\times W^{1,\varrho}(\Omega;\mathbb{R}^d) \end{align*} to system \eqref{discr-syst-appr} at time step $k$\, in the following sense: \begin{itemize} \item[--] \eqref{discr-syst-appr-c} is fulfilled in $W^{1,\varrho}(\Omega)'$, \item[--] \eqref{discr-syst-appr-mu} is fulfilled in $W^{1,p}(\Omega)'$, \item[--] \eqref{discr-syst-appr-z} is fulfilled in $W^{1,p}(\Omega)'$ with $\xi^k\in \partial I_{Z^{k-1}}(z^k)$, \item[--] \eqref{discr-syst-appr-teta} is fulfilled in $H^{1}(\Omega)'$, \item[--] \eqref{discr-syst-appr-u} is fulfilled in $W_0^{1,\varrho}(\Omega;\mathbb{R}^d)'$, \item[--] the initial conditions \eqref{discre-initial-cond} and the boundary condition $\mathbf{u}^k=\mathbf{d}^k$ a.e. on $\partial\Omega$ are satisfied, \item[--] the constraints \eqref{discre-constraints} are satisfied. \end{itemize} \end{lemma} \begin{proof} Our approach for finding a solution to \eqref{discr-syst-appr} for a given $k$ is to rewrite the system as \begin{align} \label{label:inclusion2} 0\in \mathbf A(c^k,\mu^k,z^k,\vartheta^k,\mathbf u^k-\mathbf{d}^k)+ \partial\Psi(c^k,\mu^k,z^k,\vartheta^k,\mathbf u^k -\mathbf{d}^k \color{black}), \end{align} where $\mathbf A$ is a (to be specified) pseudomonotone and coercive operator and $\partial\Psi$ is \color{black} the subdifferential of a (to be specified) proper, convex and l.s.c. potential $\Psi$. Note that both the operator $\mathbf A$ as well as $\Psi$ will depend on the discrete functions obtained in previous time step $k-1$, but we choose not to highlight this for notational simplicity. To be more precise, we introduce the space $$ \mathbf{X} \color{black}:=W^{1,p}(\Omega)\times W^{1,\varrho}(\Omega)\times W^{1,p}(\Omega)\times H^1(\Omega)\times W_0^{1,\varrho}(\Omega;\mathbb{R}^d) $$ and the announced operator \begin{align*} &\mathbf A= \begin{bmatrix} A_1\\A_2\\A_3\\A_4\\A_5 \end{bmatrix} : \mathbf{X} \color{black}\to \mathbf{X}' \color{black} \end{align*} given component-wise by \begin{align*} A_1(c,\mu,z,\vartheta,\widetilde\mathbf u)={}&-\mu-\Delta_p(c) +\nu |c|^{\varrho-2}c +(\conv{\phi}_\omega)'(c) +(\conc{\phi})'(c^{k-1}) +\breve{W}_{1,c}^\omega(c,\varepsilon(\mathbf{u}^{k-1}), z^{k-1})\\ &+\invbreve{W}_{1,c}^\omega(c^{k-1},\varepsilon(\mathbf{u}^{k-1}), z^{k-1}) -\mathcal T_M(\vartheta)+(c-c^{k-1})\tau^{-1},\\ A_2(c,\mu,z,\vartheta,\widetilde\mathbf u)={}&-\dive(m(c^{k-1},z^{k-1})\nabla\mu)-\nu\dive(|\nabla\mu|^{\varrho-2}\nabla\mu)+\nu\mu+(c-c^{k-1})\tau^{-1},\\ A_3(c,\mu,z,\vartheta,\widetilde\mathbf u)={}&-\Delta_p(z)+\nu|z|^{\varrho-2}z+(z-z^{k-1})\tau^{-1}+(\conv{\sigma})'(\mathcal T(z))+(\conc{\sigma})'(z^{k-1})\\ &\quad +\breve{W}_{3,z}^\omega( c^{k},\varepsilon(\mathbf u^{k-1}),\mathcal T(z))+\invbreve{W}_{3,z}^\omega( c^{k},\varepsilon(\mathbf u^{k-1}),z^{k-1})-\mathcal T_M(\vartheta),\\ \end{align*} \begin{align*} A_4(c,\mu,z,\vartheta,\widetilde\mathbf u)={}& \mathcal{A}_M^k(\vartheta)+(\vartheta-\vartheta^{k-1})\tau^{-1}+(c-c^{k-1})\tau^{-1}\mathcal T_M(\vartheta) +(z-z^{k-1})\tau^{-1}\mathcal T_M(\vartheta) \\ & \quad +\rho\mathcal T_M(\vartheta)\dive(\widetilde\mathbf{u}+\mathbf{d}^k-\mathbf u^{k-1})\tau^{-1}-g_\tau^k -|(c-c^{k-1})\tau^{-1}|^2-|(z-z^{k-1})\tau^{-1}|^2 \\ & \quad -a(c^{k-1},z^{k-1})\varepsilon((\widetilde\mathbf{u}+\mathbf{d}^k-\mathbf u^{k-1})\tau^{-1}):\mathbb{V}\varepsilon((\widetilde\mathbf u+\mathbf{d}^k-\mathbf u^{k-1})\tau^{-1}) -m(c^{k-1},z^{k-1})|\nabla\mu|^2, \\ A_5(c,\mu,z,\vartheta,\widetilde\mathbf u)={}&(\widetilde\mathbf u+\mathbf{d}^k-2\mathbf u^{k-1}+\mathbf u^{k-2})\tau^{-2} -\nu\dive\big(|\varepsilon(\widetilde\mathbf u)|^{\varrho-2}\varepsilon(\widetilde\mathbf u)\big)\\ &\quad-\dive\Big(a(c^{k-1},z^{k-1})\mathbb{V}\varepsilon((\widetilde\mathbf u+\mathbf{d}^k-\mathbf u^{k-1})\tau^{-1} ) \color{black}+ W_{,\varepsilon}^\omega( c^{k},\varepsilon(\widetilde\mathbf u+\mathbf{d}^k),\mathcal T(z)) -\rho\mathcal T_M(\vartheta)\mathds 1\Big)\\ &\quad-\bold f_\tau^k, \end{align*} where we make use of the truncation operator $\mathcal T$ \begin{align*} &\mathcal T(z):= \begin{cases} 0&\text{if }z < 0,\\ z&\text{if }0 < z < 1,\\ 1&\text{if }z > 1. \end{cases} \end{align*} The potential $\Psi: \mathbf{X} \color{black} \to (-\infty,+\infty]$ featuring in \eqref{label:inclusion2} is \color{black} given by \begin{align*} &\Psi(c,\mu,z,\vartheta,\widetilde\mathbf u):=I_{Z^{k-1}}(z)= \begin{cases} 0&\text{if }0\leq z\leq z^{k-1}\text{ a.e. in }\Omega,\\ \infty&\text{else,} \end{cases} \end{align*} where the set $ Z^{k-1}$ is defined in \eqref{eqn:set_z}. \color{black} We remark that for solutions of \eqref{label:inclusion2} the truncation operator $\mathcal T$ will disappear in the resulting system since $ \mathrm{dom}(\partial\Psi)\subseteq \{(c,\mu,z,\vartheta,\widetilde\mathbf{u})\in \mathbf{X} \color{black} \,|\, 0\leq z\leq 1\text{ a.e. in }\Omega\}. $ It is merely used as an auxiliary construction to ensure coercivity of the operator $\mathbf A$. Furthermore, the boundary values for the displacement variable are shifted to $0$ in order to obtain a vector space structure for the domain $\mathbf{X}$ of $\mathbf A$. \color{black} As a result, we have to add $\mathbf{d}^k$ to the displacement $\widetilde\mathbf{u}$ of the solution afterwards. In following we are going to verify coercivity of $\mathbf A$. To this end, we will estimate $\langle \mathbf A(\boldsymbol{x}),\boldsymbol{x}\rangle_{ \mathbf{X}\color{black}}$ for every $\boldsymbol{x}=(c,\mu,z,\vartheta,\widetilde\mathbf{u})\in \mathbf{X}\color{black}$ from below: \begin{equation} \label{to-refer-to-later} \begin{aligned} \langle \mathbf A(\boldsymbol{x}),\boldsymbol{x}\rangle_{ \mathbf{X}\color{black}}=\big\langle \mathbf A(c,\mu,z,\vartheta,\widetilde\mathbf{u}),(c,\mu,z,\vartheta,\widetilde\mathbf{u})\big\rangle_{ \mathbf{X} \color{black}} ={}& \big\langle A_1(c,\mu,z,\vartheta,\widetilde\mathbf{u}),c\big\rangle_{W^{1,p}(\Omega)} +\big\langle A_2(c,\mu,z,\vartheta,\widetilde\mathbf{u}),\mu\big\rangle_{W^{1,\varrho}(\Omega)}\\ &+\big\langle A_3(c,\mu,z,\vartheta,\widetilde\mathbf{u}),z\big\rangle_{W^{1,p}(\Omega)} +\big\langle A_4(c,\mu,z,\vartheta,\widetilde\mathbf{u}),\vartheta\big\rangle_{H^1(\Omega)}\\ &+\big\langle A_5(c,\mu,z,\vartheta,\widetilde\mathbf{u}),\widetilde\mathbf{u}\big\rangle_{W^{1,\varrho}(\Omega;\mathbb{R}^d)}\\ &=:I_1+I_2+\ldots+I_5. \end{aligned} \end{equation} We now estimate the partial derivatives $\breve{W}_{1,c}^\omega$ and $\invbreve{W}_{1,c}^\omega$ of ${\breve{W}_{1}^\omega}$ and ${\invbreve{W}_{1}^\omega}$ w.r.t.\ $c$, i.e.\ \color{black} \begin{align*} &\breve{W}_{1,c}^\omega(c,\varepsilon(\mathbf u),z)= W_{,c}^\omega(c,\varepsilon(\mathbf u),z)+\big(\sup_{\widetilde{c}\in \mathbb{R}}| W_{,cc}^\omega(\widetilde c,\varepsilon(\mathbf u),z)|\big)\,c,\\ &\invbreve{W}_{1,c}^\omega(c,\varepsilon(\mathbf u),z)=-\big(\sup_{\widetilde{c}\in \mathbb{R}}| W_{,cc}^\omega(\widetilde c,\varepsilon(\mathbf u),z)|\big)\,c. \end{align*} Taking into account \eqref{eqn:convConcSplittingWc} and Hypothesis (V) (cf.\ also \eqref{later-ref}), \color{black} we obtain \begin{align} &\left| \breve{W}_{1,c}^\omega(c,\varepsilon(\mathbf u),z)\right| \leq C(|c| +1 )(1+|\varepsilon(\mathbf u)|^2), \label{est-quoted-5.1}\\ &\left| \invbreve{W}_{1,c}^\omega(c,\varepsilon(\mathbf u),z)\right| \leq C|c|(1+|\varepsilon(\mathbf u)|^2) \label{est-quoted-5.2} \end{align} We can also verify that \color{black} \begin{align} \label{oh-yes-quote} & \left| W_{,\varepsilon}^\omega(c,\varepsilon(\mathbf u),z)\right| \leq C(1+|\varepsilon(\mathbf u)|), \end{align} and \begin{align} \label{est-quoted-5.3} &\left| \breve{W}_{3,z}^\omega(c,\varepsilon(\mathbf u),z)\right| \leq C(1+|\varepsilon(\mathbf u)|^2),\\ \label{est-quoted-5.4} &\left| \invbreve{W}_{3,z}^\omega(c,\varepsilon(\mathbf u),z) \right| \leq C(1+|\varepsilon(\mathbf u)|^2)\,. \end{align} Estimates \eqref{est-quoted-5.1}--\eqref{est-quoted-5.4} are valid \color{black} for all $c\in\mathbb{R}$, $z\in[0,1]$ and $\mathbf u\in\mathbb{R}^d$, and fixed $C>0$. Taking also the boundedness properties \[ \mathcal T(z),\;z^{k-1}\in{ }[0,1]\qquad\;\;\text{a.e. in }\Omega,\qquad \mathcal T_M(\vartheta)\in{} [0,M]\qquad\text{a.e. in }\Omega \] into account, we obtain \begin{align*} \breve{W}_{1,c}^\omega(c,\varepsilon(\mathbf{u}^{k-1}), z^{k-1}) \geq{}& -C(|c| +1 )(1+|\varepsilon(\mathbf{u}^{k-1})|^2),\\ \invbreve{W}_{1,c}^\omega(c^{k-1},\varepsilon(\mathbf{u}^{k-1}), z^{k-1}) \geq{}& -C|c^{k-1}|(1+|\varepsilon(\mathbf{u}^{k-1})|^2), \\ W_{,\varepsilon}^\omega(c,\varepsilon(\widetilde\mathbf{u}+\mathbf{d}^k),\mathcal T(z)) \geq{}&-C(1+|\varepsilon(\widetilde\mathbf{u})|^2+|\varepsilon(\mathbf{d}^k)|^2),\\ \breve{W}_{3,z}^\omega( c,\varepsilon(\mathbf u^{k-1}),\mathcal T(z)) \geq{}& -C(1+|\varepsilon(\mathbf u^{k-1})|^2),\\ \invbreve{W}_{3,z}^\omega( c,\varepsilon(\mathbf u^{k-1}),z^{k-1}) \geq{}& -C(1+|\varepsilon(\mathbf u^{k-1})|^2). \end{align*} Together with Young's inequality and estimates \eqref{est-quoted-5.1}--\eqref{est-quoted-5.4}, \color{black} a calculation reveals for the terms $I_1,\ldots,I_5$ from \eqref{to-refer-to-later} the following bounds (hereafter, we will write $\|\cdot\|_{L^p}$ in place of $\|\cdot\|_{L^p(\Omega)}$ for shorter notation and we will denote by $\delta$ a positive constant, to be chosen later, and by $C_\delta>0$ a constant depending on $\delta$): \color{black} \begin{align*} I_1={}&\|\nabla c\|_{L^p}^p+\nu\|c\|_{L^\varrho}^\varrho +\tau^{-1}\|c\|_{L^2}^2-\tau^{-1}\int_\Omega c^{k-1} c\,\mathrm dx-\int_\Omega\mu c\,\mathrm dx\\ &+\int_\Omega\Big(\beta_\omega(c)+\lambda_\gamma c +\gamma'(c^{k-1})-\lambda_\gamma c^{k-1} +\breve{W}_{1,c}^\omega(c,\varepsilon(\mathbf{u}^{k-1}), z^{k-1})\Big)c\,\mathrm dx\\ &+\int_\Omega\Big(\invbreve{W}_{1,c}^\omega(c^{k-1},\varepsilon(\mathbf{u}^{k-1}), z^{k-1})-\mathcal T_M(\vartheta)\Big)c\,\mathrm dx\\ \geq{}&\|\nabla c\|_{L^p}^p+\nu\|c\|_{L^\varrho}^\varrho -\delta\|\mu\|_{L^2}^2 -C_\delta\|c\|_{L^2}^2 -C_\delta\|\varepsilon(\mathbf{u}^{k-1})\|_{L^4}^4 -C_\delta,\\ I_2={}&\int_\Omega m(c^{k-1},z^{k-1})|\nabla\mu|^2\,\mathrm dx +\nu\|\nabla\mu\|_{L^\varrho}^\varrho+\nu\|\mu\|_{L^2}^2+\tau^{-1}\int_\Omega (c-c^{k-1})\mu\,\mathrm dx\\ \geq{}& \nu\|\nabla\mu\|_{L^\varrho}^\varrho+\nu\|\mu\|_{L^2}^2-\delta\|\mu\|_{L^2}^2 -C_\delta\|c\|_{L^2}^2-C_\delta\\ I_3={}&\|\nabla z\|_{L^p}^p+\nu\|z\|_{L^\varrho}^\varrho+\tau^{-1}\|z\|_{L^2}^2-\tau^{-1}\int_\Omega z^{k-1} z\,\mathrm dx\\ &+\int_\Omega\Big((\conv{\sigma})'(\mathcal T(z))+(\conc{\sigma})'(z^{k-1}) +\breve{W}_{3,z}^\omega( c,\varepsilon(\mathbf u^{k-1}),\mathcal T(z))+\invbreve{W}_{3,z}^\omega( c,\varepsilon(\mathbf u^{k-1}),z^{k-1})-\mathcal T_M(\vartheta) \Big)z\,\mathrm dx\\ \geq{}&\|\nabla z\|_{L^p}^p+\nu\|z\|_{L^\varrho}^\varrho-\delta\|z\|_{L^2}^2 -C_\delta\|\varepsilon(\mathbf u^{k-1})\|_{L^4}^4-C_\delta,\\ I_4={}&\int_\Omega \mathsf{K}_M(\vartheta)|\nabla \vartheta|^2\,\mathrm dx -\int_{\partial \Omega} \htau{k}\vartheta \,\mathrm dx +\tau^{-1}\|\vartheta\|_{L^2}^2-\tau^{-1}\int_\Omega\vartheta^{k-1}\vartheta\,\mathrm dx\\ &+\tau^{-1}\int_\Omega\Big((c-c^{k-1})+(z-z^{k-1}) +\rho\dive(\widetilde\mathbf{u}+\mathbf{d}^k-\mathbf u^{k-1})\Big)\mathcal T_M(\vartheta)\vartheta\,\mathrm dx -\int_\Omega g^k\vartheta\,\mathrm dx\\ &-\int_\Omega\Big(|(c-c^{k-1})\tau^{-1}|^2+|(z-z^{k-1})\tau^{-1}|^2+ a(c^{k-1},z^{k-1})\varepsilon\Big(\frac{\widetilde\mathbf{u}+\mathbf{d}^k-\mathbf u^{k-1}}{\tau}\Big):\mathbb{V}\varepsilon\Big(\frac{\widetilde\mathbf{u}+\mathbf{d}^k-\mathbf u^{k-1}}{\tau}\Big)\Big)\vartheta\,\mathrm dx\\ &-\int_\Omega m(c^{k-1},z^{k-1})|\nabla\mu|^2\vartheta\,\mathrm dx\\ \geq{}&c_0\|\nabla\vartheta\|_{L^2}^2+\tau^{-1}\|\vartheta\|_{L^2}^2 -\delta\|\vartheta\|_{H^1}^2-C_\delta\|h^k\|_{H^{1/2}(\partial\Omega)}^2 -C_\delta\|\vartheta^{k-1}\|_{L^2}^2 -C_\delta\|c\|_{L^4}^4 -C_\delta\|z\|_{L^4}^4 -C_\delta\|\varepsilon(\widetilde\mathbf{u})\|_{L^4}^4\\ &-C_\delta\|\varepsilon(\mathbf{d}^k)\|_{L^4}^4 -C_\delta\| c^{k-1} \|_{L^4}^4 - C_\delta \| z^{k-1} \|_{L^4}^4 -C_\delta\|\varepsilon(\mathbf u^{k-1})\|_{L^4}^4 -C_\delta\|\nabla\mu\|_{L^4}^4 -C_\delta\|g^k\|_{L^2}^2 -C_\delta,\\ \end{align*} \begin{align*} I_5={}&\nu\|\varepsilon(\widetilde\mathbf{u})\|_{L^\varrho}^\varrho +\tau^{-2}\|\widetilde\mathbf{u}\|_{L^2}^2 +\tau^{-2}\int_\Omega(\mathbf{d}^k-2\mathbf u^{k-1}+\mathbf u^{k-2})\cdot\widetilde\mathbf{u}\,\mathrm dx +\tau^{-1}\int_\Omega a(c^{k-1},z^{k-1})\mathbb{V}\varepsilon(\widetilde\mathbf{u}):\varepsilon(\widetilde\mathbf{u})\,\mathrm dx\\ &+\tau^{-1}\int_\Omega a(c^{k-1},z^{k-1})\mathbb{V}\varepsilon(\mathbf{d}^k-\mathbf u^{k-1}):\varepsilon(\widetilde\mathbf{u})\,\mathrm dx +\int_\Omega W_{,\varepsilon}^\omega(c,\varepsilon( \widetilde\mathbf{u} \color{black}+\mathbf{d}^k),\mathcal T(z)):\varepsilon(\widetilde\mathbf u)\,\mathrm dx\\ &-\int_\Omega \rho\mathcal T_M(\vartheta)\dive(\widetilde\mathbf{u})\,\mathrm dx -\int_\Omega \mathbf{f}^k\cdot\widetilde\mathbf{u}\,\mathrm dx\\ \geq{}&\nu\|\varepsilon(\widetilde\mathbf{u})\|_{L^\varrho}^\varrho+\tau^{-2}\|\widetilde\mathbf{u}\|_{L^2}^2 -\delta\|\widetilde\mathbf{u}\|_{H^1}^2-C_\delta\|\mathbf u^{k-1}\|_{H^1}^2-C_\delta\|\mathbf{d}^k\|_{H^1}^2-C_\delta\|\mathbf u^{k-2}\|_{L^2}^2 -C_\delta\|\mathbf{f}^k\|_{L^2}^2-C_\delta. \end{align*} In conclusion, choosing $\delta>0$ sufficiently small in such a way as to absorb the negative terms multiplied by $\delta$ into suitable positive contributions, \color{black} we obtain constants $c',C>0$ such that \begin{align*} \langle \mathbf A(\boldsymbol{x}),\boldsymbol{x}\rangle_{ \mathbf{X}\color{black}} \geq{}& c'\Big(\|\nabla c\|_{L^p}^p +\|c\|_{L^\varrho}^\varrho +\|\nabla\mu\|_{L^\varrho}^\varrho +\|\mu\|_{L^2}^2 +\|\nabla z\|_{L^p}^p +\|z\|_{L^\varrho}^\varrho +\|\nabla\vartheta\|_{L^2}^2+\|\vartheta\|_{L^2}^2\Big)\\ &+c'\Big(\|\varepsilon(\widetilde\mathbf{u})\|_{L^\varrho}^\varrho +\|\widetilde\mathbf{u}\|_{L^2}^2\Big)-C \end{align*} which leads to coercivity of $\mathbf A$ by using Korn's inequality. The pseudomonotonicity follows from standard arguments in the theory of quasilinear elliptic equations, cf.\ \cite[Chapter 2.4]{Rou05}. By virtue of the existence theorem in \cite[Theorem 5.15]{Rou05} together with \cite[Lemma 5.17]{Rou05}, we find an $\boldsymbol{x}\in \mathbf{X} \color{black}$ solving \eqref{label:inclusion2}. Thus a solution of \eqref{label:inclusion2} proves the claim. \end{proof} We now derive the incremental energy inequality satisfied by the solutions to system \eqref{discr-syst-appr}. This will be the starting point for the derivation of all a priori estimates allowing us to pass to the limit, first as $M\to\infty$ and then $\nu \to 0$. \begin{lemma}[ Incremental energy inequality for the approximate discrete system] \label{l:energy-est} Let $(c^k,\mu^k,z^k,\vartheta^k,\mathbf u^k)$ be the\, weak solution to system \eqref{discr-syst-appr} at time step $k$ according to Lemma \ref{l:exist-approx-discr}. Then, for every $M\in\mathbb{N}$ and $\nu>0$ the following energy inequality holds: \begin{equation} \label{discr-total-ineq} \begin{aligned} &\mathscr{E}_\omega(c^k,z^k,\vartheta^k,\mathbf u^k,\mathbf v^k) +\frac\nu\varrho\|c^k\|_{L^\varrho(\Omega)}^\varrho +\frac\nu\varrho\|z^k\|_{L^\varrho(\Omega)}^\varrho +\frac\nu\varrho\|\varepsilon(\mathbf u^k)\|_{L^\varrho(\Omega)}^\varrho +\nu\tau\Big(\|\nabla\mu^k\|_{L^\varrho(\Omega;\mathbb{R}^d)}^\varrho +\|\mu^k\|_{L^2}^2\Big)\\ &\leq\mathscr{E}_\omega(c^{k-1},z^{k-1},\vartheta^{k-1},\mathbf u^{k-1},\mathbf v^{k-1}) +\frac\nu\varrho\|c^{k-1}\|_{L^\varrho(\Omega)}^\varrho +\frac\nu\varrho\|z^{k-1}\|_{L^\varrho(\Omega)}^\varrho +\frac\nu\varrho\|\varepsilon(\mathbf u^{k-1})\|_{L^\varrho(\Omega)}^\varrho\\ &\qquad+\tau\Big(\int_\Omega g^k\,\mathrm dx+ \int_{\partial\Omega} h^k \,\mathrm dx +\int_\Omega \bold f^k \cdot\mathbf v^k\,\mathrm dx\Big)\\ &\qquad+\tau\int_\Omega D_k(\mathbf v)\cdot D_k(\mathbf{d})\,\mathrm dx +\tau\int_\Omega a(c^{k-1},z^{k-1})\mathbb{V}\varepsilon(\mathbf v^k):\varepsilon(D_k(\mathbf{d}))\,\mathrm dx\\ &\qquad+\tau\int_\Omega W_{,\varepsilon}^\omega(c^{k},\varepsilon(\mathbf{u}^k),z^k):\varepsilon(D_k(\mathbf{d}))\,\mathrm dx -\tau\int_\Omega\rho\mathcal T_M(\vartheta^k)\dive(D_k(\mathbf{d}))\,\mathrm dx -\tau\int_\Omega \bold f^k \cdot D_k(\mathbf{d})\,\mathrm dx \end{aligned} \end{equation} where we set \color{black} $\mathbf v^k:=D_k(\mathbf u)$ and denote by $\mathscr{E}_\omega$ the approximation of the total energy $\mathscr{E}$ from \eqref{total-energy} obtained by replacing $\phi$ with $\phi_\omega = \widehat{\beta}_\omega +\gamma$ and $W$ with $W^\omega$. \end{lemma} \begin{proof} The convex-concave splitting give rise to the following crucial estimates, (cf.\ also \eqref{eqn:convConcWall}): \begin{subequations} \label{eqn:convConcEst} \begin{align} &\Big((\conv{\phi}_\omega)'(c^k)+(\conc{\phi})'(c^{k-1})\Big)(c^k-c^{k-1})\geq\phi_\omega(c^k)-\phi_\omega(c^{k-1}),\\ &\Big((\conv{\sigma})'(z^k)+(\conc{\sigma})'(z^{k-1})\Big)(z^k-z^{k-1})\geq\sigma(z^k)-\sigma(z^{k-1}),\\ &\Big(\breve{W}_{1,c}^\omega(c^k,\varepsilon(\mathbf u^{k-1}), z^{k-1})+\invbreve{W}_{1,c}^\omega(c^{k-1},\varepsilon(\mathbf u^{k-1}), z^{k-1})\Big)(c^k-c^{k-1})\notag\\ &\quad + W_{,\varepsilon}^\omega( c^{k},\varepsilon(\mathbf u^k),z^k):\varepsilon(\mathbf u^k-\mathbf u^{k-1}) + \Big(\breve{W}_{3,z}^\omega( c^{k},\varepsilon(\mathbf u^{k-1}),z^k)+\invbreve{W}_{3,z}^\omega( c^{k},\varepsilon(\mathbf u^{k-1}),z^{k-1})\Big)(z^k-z^{k-1})\notag \\ & \geq W^\omega(c^{k},\varepsilon(\mathbf u^k),z^k)- W^\omega(c^{k-1},\varepsilon(\mathbf u^{k-1}),z^{k-1}) \,. \color{black} \end{align} \end{subequations} Moreover, we will make use of standard convexity estimates: \begin{subequations} \label{eqn:stdConvEst} \begin{align} &|\nabla c^k|^{p-2}\nabla c^k\cdot\nabla (c^k-c^{k-1}) \geq \frac 1p|\nabla c^k|^p-\frac 1p|\nabla c^{k-1}|^p,\\ &|c^k|^{\varrho-2}c^k (c^k-c^{k-1}) \geq \frac 1\varrho|c^k|^\varrho-\frac 1\varrho|c^{k-1}|^\varrho,\\ &|\nabla z^k|^{p-2}\nabla z^k\cdot\nabla (z^k-z^{k-1}) \geq \frac 1p|\nabla z^k|^p-\frac 1p|\nabla z^{k-1}|^p,\\ &|z^k|^{\varrho-2}z^k (z^k-z^{k-1}) \geq \frac 1\varrho|z^k|^\varrho-\frac 1\varrho|z^{k-1}|^\varrho,\\ &|\varepsilon(\mathbf u^k)|^{\varrho-2}\varepsilon(\mathbf u^k):\varepsilon(\mathbf u^k-\mathbf u^{k-1}) \geq \frac 1\varrho|\varepsilon(\mathbf u^k)|^\varrho-\frac 1\varrho|\varepsilon(\mathbf u^{k-1})|^\varrho,\\ &\Big(\mathbf u^k-2\mathbf u^{k-1}+\mathbf u^{k-2}\Big)\cdot(\mathbf u^k-\mathbf u^{k-1})\geq \frac12|\mathbf u^k-\mathbf u^{k-1}|^2-\frac12|\mathbf u^{k-1}-\mathbf u^{k-2}|^2. \end{align} \end{subequations} To obtain the energy estimate, we test the time-discrete system \eqref{discr-syst-appr} as follows: \begin{align*} & \text{\eqref{discr-syst-appr-c}}\times(c^{ k}-c^{ k-1})\;+\; \text{\eqref{discr-syst-appr-mu}}\times\tau\mu^{ k}\;+\; \text{\eqref{discr-syst-appr-z}}\times(z^{k}-z^{ k-1})\;+\; \text{\eqref{discr-syst-appr-teta}}\times\tau\;\\ &+\text{\eqref{discr-syst-appr-u}}\times(\mathbf u^{ k}-\mathbf u^{ k-1}-(\mathbf{d}^k-\mathbf{d}^{k-1})) \end{align*} and exploit estimates \eqref{eqn:convConcEst} and \eqref{eqn:stdConvEst}. \end{proof} \begin{remark} We note that in comparison with the calculations in the \textit{First estimate} in Section \ref{s:4}, where we assumed spatial $H^2$-regularity for $\mathbf{u}$, we cannot test the weak formulation \eqref{discr-syst-appr-u} with $\mathbf u^k-\mathbf u^{k-1}$ because the boundary values of $\mathbf u^k-\mathbf u^{k-1}$ are not necessarily $0$. \end{remark} \begin{lemma}[Positivity of $\vartheta^k$] \label{l:positivityThetaDiscr} There exists a constant \color{black} $\underline\vartheta>0$, independent of $\omega$, $\tau$, $k$, $M$ and $\nu$, such that $\vartheta^k\geq\underline\vartheta$ a.e. in $\Omega$. \end{lemma} \begin{proof} The proof is carried out in two steps: At first we show non-negativity of $\vartheta^k$ and then, in the second step, strictly positivity as claimed is shown. \begin{itemize} \item[]\textit{Step 1:} Testing the discrete heat equation \eqref{discr-syst-appr-teta} with $-(\vartheta^k)^-:=\min\{\vartheta^k,0\}$ shows after integration over $\Omega$: \begin{align*} &\int_\Omega \frac{1}{\tau}\underbrace{\vartheta^k(-(\vartheta^k)^-)}_{=|(\vartheta^k)^-|^2}\underbrace{-\frac{1}{\tau}\vartheta^{k-1}(-(\vartheta^k)^-)}_{\geq 0} +\Big(D_k(c)+D_k(z)+\rho\dive(D_k(\mathbf u))\Big)\underbrace{\mathcal T_M(\vartheta^k)(-(\vartheta^k)^-)}_{=0}\,\mathrm dx\\ &=\int_\Omega\underbrace{\Big(g^k+|D_k(c)|^2+|D_k(z)|^2+ a(c^{k-1},z^{k-1})\varepsilon(D_k(\mathbf u)):\mathbb{V}\varepsilon(D_k(\mathbf u)) }_{\geq 0}\underbrace{(-(\vartheta^k)^-)}_{\leq 0}\,\mathrm dx\\ &\quad+\int_\Omega\underbrace{ m(c^{k-1},z^{k-1})|\nabla\mu_\tau^{k}|^2}_{\geq 0}\underbrace{(-(\vartheta^k)^-)}_{\leq 0}\,\mathrm dx. \end{align*} Here we have merely used the information that $\vartheta^{k-1}\geq 0$ a.e. in $\Omega$. We obtain $$ \int_\Omega|(\vartheta^k)^-|^2\,\mathrm dx\leq 0 $$ and thus $\vartheta^k\geq 0$ a.e. in $\Omega$. \item[]\textit{Step 2:} The proof follows the very same lines as the argument developed in \cite[Lemma 4.4 - Step 3]{RocRos14}, hence we will just outline it and refer to \cite{RocRos14} for all details. Namely, repeating the arguments formally developed in Sec.\ \ref{s:4} (cf.\ \eqref{formal-positivity}), we deduce from \eqref{discr-syst-appr-teta} that there exists $C>0$ such that \[ \int_\Omega D_k(\vartheta)w \, \mathrm{d} x + \int_\Omega \mathsf{K}_M(\vartheta^k) w \, \mathrm{d} x \geq - C \int_\Omega (\vartheta^k)^2 w \, \mathrm{d} x \qquad \text{for every }w \in W_{+}^{1,2}(\Omega)\,. \] Then, we compare the functions $(\vartheta^k)_{k=1}^{K_\tau}$ with the solutions $(v^k)_{k=1}^{K_\tau}$ of the finite difference equation $\frac{v_k-v_{k-1}}{\tau} = - C v_k^2$, with $v_0 = \vartheta_*$, and we conclude that $\vartheta^k \geq v_k $ a.e.\ in $\Omega$. Finally, with a comparison argument we prove that \[ \vartheta^k \geq v_k \geq \frac{\vartheta_*}{1+CT\vartheta_*} \doteq \underline\vartheta \quad \text{a.e. in\;}\, \Omega \qquad\text{for all } k =1,\ldots,K_\tau. \] \end{itemize} \end{proof} Lemma \ref{l:energy-est} and Lemma \ref{l:positivityThetaDiscr} give rise to the following uniform estimates: \begin{lemma} \label{lemma:firstEstDiscr} The following estimates hold uniformly in $\nu>0$ and $M\in\mathbb{N}$: \begin{subequations} \label{est-5.7} \begin{align} &\|c^k\|_{W^{1,p}(\Omega)}+\|z^k\|_{W^{1,p}(\Omega)}+\|\mathbf v^k\|_{L^2(\Omega;\mathbb{R}^d)}+\|\vartheta^k\|_{L^1(\Omega)}\leq C, \label{est-5.7.1}\\ &\nu^{\frac1\varrho}\|c^k\|_{L^\varrho(\Omega)}+\nu^{\frac1\varrho}\|z^k\|_{L^\varrho(\Omega)}+\nu^{\frac1\varrho}\|\varepsilon(\mathbf u^k)\|_{L^\varrho(\Omega;\mathbb{R}^{d\times d})}\leq C, \label{est-5.7.2}\\ &\nu\tau\Big(\|\nabla\mu^k\|_{L^\varrho(\Omega)}^\varrho+\|\mu^k\|_{L^2(\Omega)}^2\Big)\leq C. \label{est-5.7.3} \end{align} \end{subequations} \end{lemma} \begin{proof} In order to deduce estimates \eqref{est-5.7}, \color{black} it \color{black} suffices to estimate the terms of the $k$-th time step on the right-hand side of the incremental energy inequality \eqref{discr-total-ineq} from Lemma \ref{l:energy-est}. The following calculations are an adaption of the calculations performed First estimate in Section \ref{s:4}. \begin{itemize} \item[--] At first we observe by Young's inequality \begin{align*} &\tau\int_\Omega \mathbf{f}^k\cdot\mathbf v^k\,\mathrm dx \leq \delta\|\mathbf v^k\|_{L^2(\Omega;\mathbb{R}^d)}^2+C_\delta\|\mathbf{f}^k\|_{L^2(\Omega;\mathbb{R}^d)}^2,\\ &\tau\int_\Omega D_k(\mathbf v)\cdot D_k(\mathbf{d})\,\mathrm dx \leq \delta\|\mathbf v^k\|_{L^2(\Omega;\mathbb{R}^d)}^2 +\delta\|\mathbf v^{k-1}\|_{L^2(\Omega;\mathbb{R}^d)}^2+C_\delta\|D_k(\mathbf{d})\|_{L^2(\Omega;\mathbb{R}^d)}^2,\\ &-\tau\int_\Omega \mathbf{f}^k\cdot D_k(\mathbf{d})\,\mathrm dx \leq C\|\mathbf{f}^k\|_{L^2(\Omega;\mathbb{R}^d)}^2+C\|D_k(\mathbf{d})\|_{L^2(\Omega;\mathbb{R}^d)}^2. \end{align*} By choosing $\delta>0$ sufficiently small, the term $\delta\|\mathbf v^k\|_{L^2(\Omega;\mathbb{R}^d)}^2$ is absorbed by the left-hand side of \eqref{discr-total-ineq}. The remaining terms are bounded due to \eqref{hyp:data}. \item[--] We continue with the next term on the right-hand side of \eqref{discr-total-ineq} \color{black} by using that $\mathbf v^k=D_k(\mathbf{d})$ a.e. on $\partial\Omega$, the trace theorem and Young's inequality \begin{align*} \qquad&\tau\int_\Omega a(c^{k-1},z^{k-1})\mathbb{V}\varepsilon(\mathbf v^k):\varepsilon(D_k(\mathbf{d}))\,\mathrm dx\\ &=-\tau\int_\Omega\mathbf v^k\cdot\dive\big(a(c^{k-1},z^{k-1})\mathbb{V}\varepsilon(D_k(\mathbf{d}))\big)\,\mathrm dx +\tau\int_{\partial\Omega} \mathbf v^k\cdot\big(a(c^{k-1},z^{k-1})\mathbb{V}\varepsilon(D_k(\mathbf{d}))n\big)\, \mathrm{d} S\\ &=-\tau\int_\Omega\mathbf v^k\cdot\Big(\big(a_{,c}(c^{k-1},z^{k-1})\nabla c^{k-1} +a_{,z}(c^{k-1},z^{k-1})\nabla z^{k-1}\big)\mathbb{V}\varepsilon(D_k(\mathbf{d}))\Big)\,\mathrm dx\\ &\quad-\tau\int_\Omega\mathbf v^k\cdot a(c^{k-1},z^{k-1})\dive\big(\mathbb{V}\varepsilon(D_k(\mathbf{d}))\big)\,\mathrm dx +\tau\int_{\partial\Omega} D_k(\mathbf{d})\color{black}\cdot\big(a(c^{k-1},z^{k-1})\mathbb{V}\varepsilon(D_k(\mathbf{d}))n\big)\, \mathrm{d} S\\ &\leq \delta\|\mathbf v^k\|_{L^2(\Omega;\mathbb{R}^d)}^2 +C_\delta\|\varepsilon(D_k(\mathbf{d}))\|_{L^\infty(\Omega;\mathbb{R}^{d\times d})}^2 \Big(\|a_{,c}(c^{k-1},z^{k-1})\|_{L^\infty(\Omega)}^2\|\nabla c^{k-1}\|_{L^2(\Omega;\mathbb{R}^d)}^2\\ &\hspace*{20em}+\|a_{,z}(c^{k-1},z^{k-1})\|_{L^\infty(\Omega)}^2\|\nabla z^{k-1}\|_{L^2(\Omega;\mathbb{R}^d)}^2\Big)\\ &\quad+C_\delta\|a(c^{k-1},z^{k-1})\|_{L^\infty(\Omega)}^2\|\tau\dive(\mathbb{V}\varepsilon(D_k(\mathbf{d})))\|_{L^2(\Omega;\mathbb{R}^d)}^2\\ &\quad+C\|D_k(\mathbf{d}^k)\|_{H^1(\Omega;\mathbb{R}^d)}^2\|a(c^{k-1},z^{k-1})\|_{L^\infty(\Omega)}\|\tau\varepsilon(D_k(\mathbf{d}))n\big)\|_{H^1(\Omega;\mathbb{R}^{d\times d})}^2. \end{align*} Taking Hypothesis (IV) and \eqref{dirichlet-data} into account, we ultimately find that \color{black} \begin{align*} &\tau\int_\Omega a(c^{k-1},z^{k-1})\mathbb{V}\varepsilon(\mathbf v^k):\varepsilon(D_k(\mathbf{d}))\,\mathrm dx\\ &\qquad\leq\delta\|\mathbf v^k\|_{L^2(\Omega;\mathbb{R}^d)}^2 +C_\delta\big(\|\nabla c^{k-1}\|_{L^2(\Omega;\mathbb{R}^d)}^2+\|\nabla z^{k-1}\|_{L^2(\Omega;\mathbb{R}^d)}^2+1\big). \end{align*} For small $\delta>0$ the first term on the right-hand side can be absorbed into \color{black} the left-hand side of \eqref{discr-total-ineq}. \item[--] Moreover, we estimate the $\tau\int W_{,\varepsilon}^\omega(\ldots):\ldots$-term on the right-hand side of \eqref{discr-total-ineq} as follows \begin{align*} \qquad&\tau\int_\Omega W_{,\varepsilon}^\omega(c^{k},\varepsilon(\mathbf{u}^k),z^k):\varepsilon(D_k(\mathbf{d}))\,\mathrm dx\\ & \quad\leq \delta\|b(c^{k},z^{k})\|_{L^\infty(\Omega)}\int_\Omega \underbrace{\frac 12b(c^{k},z^{k})\mathbb{C}(\varepsilon(\mathbf{u}^k)-\varepsilon^*(c)):(\varepsilon(\mathbf{u}^k)-\varepsilon^*(c))}_{=W(c^k,\varepsilon(\mathbf{u}^k),z^k)}\,\mathrm dx +C_\delta\|\varepsilon(D_k(\mathbf{d}))\|_{L^2(\Omega;\mathbb{R}^{d\times d})}^2, \end{align*} which can be absorbed by the left-hand side of \eqref{discr-total-ineq} for small $\delta>0$. \item[--] Finally, \begin{align*} &-\tau\int_\Omega\rho\mathcal T_M(\vartheta^k)\dive(D_k(\mathbf{d}))\,\mathrm dx \leq\tau\rho\|\dive(D_k(\mathbf{d}))\|_{L^\infty(\Omega)}\int_\Omega|\vartheta^k|\,\mathrm dx. \end{align*} \end{itemize} In the end, by \color{black} choosing $\tau>0$ small enough depening only on $\rho$ and the data $\mathbf{d}$, the right-hand side can be absorbed by the left-hand side of \eqref{discr-total-ineq} (recall that $\vartheta^k$ is positive). \end{proof} \begin{remark} We see that the calculation above takes advantage of the fact that the $W_{,\varepsilon}(\ldots)$-term in the discrete force balance equation is discretized fully implicit. \end{remark} \subsubsection{\textbf{Step 2: Limit passage $M\to\infty$.}} \label{sss:4.2.2.} \noindent In the following we focus on the limit passage $M\to\infty$ and keep $M$ as a subscript in $c_M^k$, $\mu_M^k$, $z_M^k$, $\mathbf u_M^k$ and $\vartheta_M^k$. By adapting the proof in \cite[Proof of Lemma 4.4 - Step 4]{RocRos14} to our situation, we obtain enhanced estimates for $(\vartheta_M^k)_M$. \begin{lemma} \label{lemma:theteEstDiscr} The following estimate holds uniformly in $M\in\mathbb{N}$: \begin{align} \label{eqn:thetaEstDiscr} \|\vartheta_M^{k}\|_{H^1(\Omega)}\leq C. \end{align} \end{lemma} \begin{proof} In \cite[Proof of Lemma 4.4 - Step 4]{RocRos14} estimate \eqref{eqn:thetaEstDiscr} is obtained in two steps which can be both applied in our case since the additional variable $c$ enjoys the same regularity properties and estimates as $z$. At first \eqref{discr-syst-appr-teta} is tested by $\mathcal T_M(\vartheta_M^{k})$ leading to the estimates \begin{align*} \|\mathcal T_M(\vartheta_M^{k})\|_{H^1(\Omega)}+\|\mathcal T_M \color{black} (\vartheta_M^{k})\|_{L^{3\kappa+6}(\Omega)}\leq C. \end{align*} Secondly, \eqref{discr-syst-appr-teta} is tested by $\vartheta_M^{k}$ leading to the claimed estimate \eqref{eqn:thetaEstDiscr}. \end{proof} \begin{lemma} \label{lemma:discrConvergence} For given $\nu>0$\, there exist functions \begin{align*} &c^k\in W^{1,p}(\Omega), \color{black} &&\mu^k\in W^{1,\varrho}(\Omega), &&z^k\in W^{1,p}(\Omega)\text{ with }z\in[0,1]\text{ a.e. in }\Omega, \\ &\vartheta^k\in H^1(\Omega)\text{ with }\vartheta^k\geq\underline\vartheta>0\text{ a.e. in }\Omega, &&\mathbf u^k\in W^{1,\varrho}(\Omega;\mathbb{R}^d) \end{align*} such that for a subsequence $M\to\infty$ \begin{subequations} \label{eqn:discrConv} \begin{align} &c_M^k \to c^k\text{ strongly in }W^{1,p}(\Omega),\label{eqn:strongConvCDiscr}\\ &\mu_M^k \to \mu^k\text{ strongly in }W^{1,\varrho}(\Omega),\label{eqn:strongConvMuDiscr}\\ &z_M^k \to z^k\text{ strongly in }W^{1,p}(\Omega),\label{eqn:strongConvZDiscr}\\ &\vartheta_M^k \rightharpoonup \vartheta^k\text{ weakly in }H^{1}(\Omega), \label{eqn:strongConvTDiscr}\\ &\mathbf u_M^k \to \mathbf u^k\text{ strongly in }W^{1,\varrho}(\Omega;\mathbb{R}^d).\label{eqn:strongConvUDiscr} \end{align} \end{subequations} \end{lemma} \begin{proof} First of all, observe that \color{black} the a priori estimates in Lemma \ref{lemma:firstEstDiscr} and Lemma \ref{lemma:theteEstDiscr} imply \eqref{eqn:discrConv} with weak instead of strong topologies. The strong convergence \eqref{eqn:strongConvZDiscr} may be shown by rewriting \eqref{discr-syst-appr-z} as a variational inequality \begin{equation} \begin{aligned} &-\int_\Omega|\nabla z_M^{k}|^{p-2}\nabla z_M^{k}\cdot \nabla (\zeta-z_M^{k})\,\mathrm dx \\ &\qquad\leq\int_\Omega\Big(\frac{z_M^{k}- z^{k-1} }{\tau}+(\conv{\sigma})'(z_M^k) + (\conc{\sigma})'(z^{k-1})+\nu|z_M^k|^{\varrho-2}z_M^k\Big)(\zeta-z_M^k)\,\mathrm dx\\ &\qquad\quad+\int_\Omega\Big(\breve{W}_{3,z}^\omega( c^{k}, \varepsilon(\mathbf u^{k-1}), z_M^{k}) +\invbreve{W}_{3,z}^\omega( c^{k},\varepsilon(\mathbf u^{k-1}),z^{k-1})- \mathcal T_M(\vartheta_M^k)\Big)(\zeta-z_M^{k})\,\mathrm dx \label{eqn:varIneqZDiscr} \end{aligned} \end{equation} holding for all $\zeta\in W^{1,p}(\Omega)$ with $0\leq \zeta\leq z^{k-1}$ a.e. in $\Omega$. To proceed we can argue by recovery sequences: By now we know the following: \begin{align*} &0\leq z_M^k\leq z^{k-1}\\ &\qquad\downarrow\qquad\downarrow\qquad\text{ weakly in }W^{1,p}(\Omega)\text{ as }M\to\infty.\\ &0\leq z^k\;\leq z^{k-1} \end{align*} Due to the compact embedding $W^{1,p}(\Omega)\hookrightarrow \mathrm{C}^0(\overline\Omega)$, we find another sequence denoted by $\widetilde z_M^k$ such that \begin{align*} &\widetilde z_M^k\to z^k\text{ strongly in }W^{1,p}(\Omega)\text{ as }M\to\infty\quad\text{ and }\quad 0\leq\widetilde z_M^k\leq z^{k-1}. \end{align*} We may take, for instance, $\widetilde z_M^k:=\max\{z^k-\delta_M,0\}$ for suitable values $\delta_M>0$ with $\delta_M\to 0$ as $M\to\infty$. We test \eqref{eqn:varIneqZDiscr} with the admissible function $\zeta=\widetilde z_M^k$. Taking into account the already proved weak convergences \eqref{eqn:discrConv} as well as the growth properties of the functions $(\conv{\sigma})'$ and $\breve{W}_{3,z}^\omega$ (cf.\ \eqref{est-quoted-5.3}), we manage to pass to the limit on the right-hand side of \eqref{eqn:varIneqZDiscr} and conclude that \begin{align} \label{label-2-added} &\limsup_{M\to\infty}\int_\Omega-|\nabla z_M^k|^{p-2}\nabla z_M^k\cdot \nabla (\widetilde z_M^k-z_M^k)\,\mathrm dx\leq 0. \end{align} By exploiting the uniform $p$-convexity of the $\|\cdot\|_{L^p(\Omega)}^p$-function and strong $W^{1,p}(\Omega)$-convergence of the recovery sequence, from \eqref{label-2-added} we deduce that \color{black} $\|\nabla(\widetilde z_M^k-z_M^k)\|_{L^p(\Omega)}\to 0$ as $M\to\infty$. Together with $\|\nabla(\widetilde z_M^k-z^k)\|_{L^p(\Omega)}\to 0$, property \eqref{eqn:strongConvZDiscr} is shown. To prove the strong convergences \eqref{eqn:strongConvCDiscr}, \eqref{eqn:strongConvMuDiscr} and \eqref{eqn:strongConvUDiscr}, we use a $\limsup$--argument. We adapt the proof from \cite[Proof of Lemma 4.4 - Step 4]{RocRos14} to our situation: \begin{itemize} \item[--] Let $\Lambda\in L^{\varrho/(\varrho-1)}(\Omega;\mathbb{R}^d)$ be a weak cluster point of $|\nabla\mu_M^k|^{\varrho-2}\nabla\mu_M^k$. Testing \eqref{discr-syst-appr-c} with $\mu_M^k$ yields by exploiting a lower semicontinuity \color{black} argument \begin{align*} \limsup_{M\to\infty}\nu\int_\Omega|\nabla\mu_M^k|^{\varrho}\,\mathrm dx ={}&\limsup_{M\to\infty}\int_\Omega-\frac{c_M^k-c^{k-1}}{\tau}\mu_M^k-m(c^{k-1},z^{k-1})|\nabla\mu_M^k|^2-\nu|\mu_M^k|^2\,\mathrm dx\\ \leq{}&\int_\Omega-\frac{ c^k-c^{k-1}}{\tau}\mu^k-\liminf_{M\to\infty}\int_\Omega m(c^{k-1},z^{k-1})|\nabla\mu_M^k|^2\,\mathrm dx-\int_\Omega\nu|\mu|^2\,\mathrm dx\\ \leq{}&\int_\Omega-\frac{ c^k-c^{k-1}}{\tau}\mu^k-m(c^{k-1},z^{k-1})|\nabla\mu|^2-\nu|\mu|^2\,\mathrm dx. \end{align*} However, the right-hand side equals $\nu\int_\Omega\Lambda\cdot\nabla\mu\,\mathrm dx$ by passing to the limit $M\to\infty$ in \eqref{discr-syst-appr-mu} and testing the limit equation with $\mu$. In conclusion, taking into account the previously proved convergences we have that \begin{align*} \limsup_{M\to\infty}\int_\Omega|\nabla\mu_M^k|^{\varrho}\,\mathrm dx \leq \int_\Omega\Lambda\cdot\nabla\mu\,\mathrm dx, \end{align*} which results in \eqref{eqn:strongConvMuDiscr}. \item[--] Convergence \eqref{eqn:strongConvCDiscr} can be gained with a similar argument as above, whereas \eqref{eqn:strongConvUDiscr} can be shown as in \cite[Proof of Lemma 4.4 - Step 4]{RocRos14}. \end{itemize} \end{proof} We are now in the position to carry out the limit passage as $M\to\infty$ and conclude the existence of a solution to an intermediate approximate version of the time-discrete system \eqref{PDE-discrete}, only featuring the higher regularizing terms and the $\omega$-regularizations, i.e.\ \eqref{discr-syst-appr2} below. \color{black} \begin{lemma}[Existence of the time-discrete system for $\nu>0$ and $M\to\infty$] \label{lemma:5.7} Let the assumption from Lemma \ref{l:exist-approx-discr} be fulfilled. Then\, for every $\nu>0$\, there exists a weak solution \begin{align*} \{(c^k, \mu^k,z^k,\vartheta^k,\mathbf u^k)\}_{k=1}^{K_\tau}\subseteq W^{1,p}(\Omega)\times W^{1,\varrho}(\Omega)\times W^{1,p}(\Omega)\times H^1(\Omega)\times W^{1,\varrho}(\Omega;\mathbb{R}^d) \end{align*} to the following time-discrete PDE system: \begin{subequations} \label{discr-syst-appr2} \begin{align} &D_k(c)=\dive\Big(m(c^{k-1},z^{k-1})\nabla\mu^k\Big)+\nu\dive\Big(|\nabla\mu^k|^{\varrho-2}\nabla\mu^k\Big)-\nu\mu^k &&\text{in }W^{1,\varrho}(\Omega)', \label{discr-syst-appr-c2}\\ &\mu^k=-\Delta_p(c^k)+(\conv{\phi}_\omega)'(c^k)+(\conc{\phi})'(c^{k-1}) +\breve{W}_{1,c}^\omega(c^k,\varepsilon(\mathbf u^{k-1}), z^{k-1})\notag\\ &\qquad +\invbreve{W}_{1,c}^\omega(c^{k-1},\varepsilon(\mathbf u^{k-1}), z^{k-1})-\vartheta^k+D_k(c)+\nu|c^k|^{\varrho-2}c^k &&\text{in }W^{1,p}(\Omega)', \label{discr-syst-appr-mu2}\\ &D_k(z)-\Delta_p(z^k)+\xi^k+(\conv{\sigma})'(z^k) + (\conc{\sigma})'(z^{k-1})+\nu|z^k|^{\varrho-2}z^k\notag\\ &\quad=-\breve{W}_{3,z}^\omega( c^{k},\varepsilon(\mathbf u^{k-1}),z^k)-\invbreve{W}_{3,z}^\omega( c^{k},\varepsilon(\mathbf u^{k-1}),z^{k-1})+\vartheta^k\notag\\ &\quad\text{with }\xi^k\in \partial I_{Z^{k-1}}(z^k) &&\text{in }W^{1,p}(\Omega)', \label{discr-syst-appr-z2}\\ &D_k(\vartheta) + \mathcal{A}^k(\vartheta^k)+D_k(c)\vartheta^k+D_k(z)\vartheta^k+\rho\vartheta^k\dive(D_k(\mathbf u))\notag\\ &\quad=g^k+|D_k(c)|^2+|D_k(z)|^2+ a(c^{k-1},z^{k-1})\varepsilon(D_k(\mathbf u)):\mathbb{V}\varepsilon(D_k(\mathbf u))\notag\\ &\qquad+m(c^{k-1},z^{k-1})|\nabla\mu^k|^2 &&\text{in }H^{1}(\Omega)', \label{discr-syst-appr-teta2}\\ &D_k(D_k(\mathbf u))-\dive\Big(a(c^{k-1},z^{k-1})\mathbb{V}\varepsilon(D_k(\mathbf u)) + W_{,\varepsilon}^\omega( c^{k},\varepsilon(\mathbf u^k),z^k)\big) -\rho\vartheta^k\mathds 1 \Big)\notag\\ &\qquad-\nu\dive\Big(|\varepsilon(\mathbf u^k-\mathbf{d}^k)|^{\varrho-2}\varepsilon(\mathbf u^k-\mathbf{d}^k)\Big)=\mathbf{f}^k &&\text{in }W_0^{1,\varrho}(\Omega;\mathbb{R}^d)' \label{discr-syst-appr-u2} \end{align} \end{subequations} satisfying the initial conditions \eqref{discre-initial-cond}, the boundary condition $\mathbf{u}^k=\mathbf{d}^k$ a.e. on $\partial\Omega$ and the constraints \color{black} \eqref{discre-constraints}. \end{lemma} \begin{proof} At the beginning we notice that \begin{align} \label{eqn:TtetaConv} \mathcal T_M(\vartheta_M^k)\to\vartheta^k\text{ strongly in }L^{p^*-\epsilon}(\Omega)\text{ for all } \epsilon\in (0,p^*-1] \color{black} \text{ as }M\to\infty, \end{align} which follows from the pointwise convergence $\mathcal T_M(\vartheta_M^k)\to\vartheta^k$ as $M\to\infty$ a.e. in $\Omega$ and the \color{black} uniform boundedness of $\|\mathcal T_M(\vartheta_M^k)\|_{L^{p^*}(\Omega)}$ with respect to $M$. We see that with the help of Lemma \ref{lemma:discrConvergence} and \eqref{eqn:TtetaConv}, also taking into account the growth properties of $W_{,\varepsilon}^\omega$ (cf.\ \eqref{oh-yes-quote}), \color{black} we can pass to $M\to\infty$ along \color{black} a subsequence in \eqref{discr-syst-appr-c} for $c$ and \eqref{discr-syst-appr-u} for $\mathbf{u}$ \color{black} and obtain \eqref{discr-syst-appr-c2} and \eqref{discr-syst-appr-u2}, respectively. The limit passages for the remaining equations are carried out as follows: \begin{itemize} \item[--] It follows from $\| c_M^k \|_{W^{1,p}(\Omega)} \leq C$ and from the Lipschitz continuity of $\beta_\omega$ that $(\conv{\phi}_\omega)'(c_M^k) = \beta_\omega(c_M^k) + \lambda c_M^k$ is bounded, uniformly in $M$ and $k=1,\ldots,K_\tau$, in $L^\infty(\Omega)$. This and the growth properties of $\breve{W}_{1,c}^\omega$ and $\invbreve{W}_{1,c}^\omega$ (cf.\ \eqref{est-quoted-5.1}--\eqref{est-quoted-5.2}), \color{black} together with Lemma \ref{lemma:discrConvergence} and convergence \color{black} \eqref{eqn:TtetaConv}, enable us to pass to $M\to\infty$ in equation \eqref{discr-syst-appr-mu} for $\mu$. \color{black} We find \eqref{discr-syst-appr-mu2}. \item[--] The limit passage in equation \eqref{discr-syst-appr-z} for $z$ \color{black} is managed via the variational formulation \eqref{eqn:varIneqZDiscr}. To this end we pick an arbitrary test-function $\zeta\in W^{1,p}(\Omega)$ with $0\leq \zeta\leq z^{k-1}$ and construct the recovery sequence \begin{align*} \zeta_M:=\max\{z^{k-1}-\delta_M,0\} \end{align*} for suitable values $\delta_M>0$ with $\delta_M\to 0$ such that $0\leq \zeta_M\leq z^{k-1} $ is fulfilled for all $M\in\mathbb{N}$. Now, testing \eqref{eqn:varIneqZDiscr} with $\zeta_M$ and passing to $M\to\infty$ with the help of Lemma \ref{lemma:discrConvergence} and \eqref{eqn:TtetaConv} yields \eqref{discr-syst-appr-z2}. \item[--] By exploiting Lemma \ref{lemma:discrConvergence}, property \eqref{eqn:TtetaConv} and a comparison argument as done in \cite[Lemma 4.4, Step 3]{RocRos14} we find \begin{align*} \mathcal A_M^k(\vartheta_M^k)\rightharpoonup \mathcal A^k(\vartheta^k) \ \ \text{ weakly \color{black} in }H^1(\Omega)' \text{ as }M\to\infty. \end{align*} This allows us to pass to the limit $M\to\infty$ in equation \eqref{discr-syst-appr-teta} for $\vartheta$ \color{black} in order to obtain \eqref{discr-syst-appr-teta2}. \end{itemize} \end{proof} \subsubsection{\textbf{Step 3: Limit passage ${ \boldsymbol \nu } { \boldsymbol \downarrow }{\bf 0}$. \color{black}}} \label{sss:4.2.3.} \noindent We now address the limit passage $\nu \downarrow 0$ and denote by $(c_\nu^k,\mu_\nu^k,z_\nu^k,\mathbf{u}_\nu^k)_\nu$ the family of solutions to system \eqref{discr-syst-appr2} found in Lemma \ref{lemma:discrConvergence}. By lower semicontinuity, estimates \eqref{est-5.7} from Lemma \ref{lemma:firstEstDiscr} are thus inherited by the functions $(c_\nu^k,\mu_\nu^k,z_\nu^k,\mathbf{u}_\nu^k)_\nu$. \color{black} Furthermore, we obtain a uniform $H^1(\Omega)$-estimate for $(\vartheta_\nu^k)_\nu$. Indeed, since the higher order terms $$ \nu\dive\big(|\nabla \mu_\nu^k|^{\varrho-2}\nabla\mu_\nu^k\big)-\nu\mu_\nu^k, \ldots \ldots, -\nu\dive\big(|\varepsilon(\mathbf{u}_\nu^k-\mathbf{d}^k)|^{\varrho-2}\varepsilon(\mathbf{u}_\nu^k-\mathbf{d}^k))\color{black} $$ vanish as $\nu \downarrow 0$, \color{black} we loose \color{black} the $L^2(\Omega)$-estimate for the right-hand side of the discrete temperature equation \eqref{discr-syst-appr-teta2}. Therefore, to prove this $H^1$-bound for $\vartheta_\nu^k$ we have to resort to the arguments from the proof of the \emph{Second a priori estimate} in Sec.\ \ref{s:4}, and in particular fully exploit the coercivity properties of the function $\mathsf{K}$. \color{black} \begin{lemma} \label{lemma:theteEstDiscr-nu} The following estimates holds uniformly in $\nu>0$: \begin{align} \label{eqn:thetaEstDiscr-nu} \|\vartheta_\nu^k \|_{H^1(\Omega)}\leq C,\qquad \|(\vartheta_\nu^k)^{(\kappa+\alpha)/2} \|_{H^1(\Omega)} \leq C_\alpha \quad\text{for all }\alpha\in(0,1). \end{align} \end{lemma} \begin{proof} We test \eqref{discr-syst-appr-teta2} by $(\vartheta_\nu^k)^{\alpha-1}$, with $\alpha \in (0,1)$. With the very same calculations as for the \emph{Second a priori estimate} (cf.\ \eqref{all-in-all} and \color{black} also the proof of Prop.\ \ref{prop:aprio-discr} later on), we conclude \[ \begin{aligned} &c\int_\Omega \mathsf{K}(\vartheta_\nu^k) |\nabla( \vartheta_\nu^k)^{\alpha/2}|^2 \, \mathrm{d} x + c\int_\Omega \left( \left| \varepsilon\left( D_k ( u_\nu^k \color{black})\right) \right|^2 + |\nabla \mu_\nu^K|^2 \right) \color{black} (\vartheta_\nu^k)^{\alpha-1}\, \mathrm{d} x + c \int_\Omega \left( \left| D_k( z_\nu^k \color{black} )\right|^2 + \left| D_k( c_\nu^k \color{black})\right|^2 \right) (\vartheta_\nu^k)^{\alpha-1}\, \mathrm{d} x\\ &\leq C + C\int_\Omega (\vartheta_\nu^k)^{\alpha+1} \, \mathrm{d} x\,. \end{aligned} \] Then, with the same arguments as in Sec.\ \ref{s:4}, we arrive at $\int_\Omega |\nabla (\vartheta_\nu^k)^{(\kappa+\alpha)/2}|^2 \, \mathrm{d} x \leq C$ for a constant independent of $\nu$. Ultimately, we conclude \eqref{eqn:thetaEstDiscr-nu}. \end{proof} By comparison arguments based on the \textit{Third estimate} we then \color{black} obtain uniform estimates for $(\mathbf u_\nu^k)_\nu$ and for $(\mu_\nu^k)_\nu$ with respect to $\nu$. \begin{lemma} The following estimates hold uniformly in $\nu>0$: \begin{align} \label{eqn:uMuEstDiscr-nu} \|\mathbf u_\nu^k \|_{H^1(\Omega;\mathbb{R}^d)} + \|\mu_\nu^k\|_{H^1(\Omega)}\leq C. \end{align} \end{lemma} \begin{proof} We proceed as in the \textit{Third estimate} in Section \ref{s:4}: Testing the time-discrete heat equation \eqref{discr-syst-appr-teta2} with $\tau$, and subtracting the resulting equation from the incremental energy inequality \eqref{discr-total-ineq} (the limit version $M\to\infty$). In particular we obtain boundedness with respect to $\nu$ of \begin{align*} \int_\Omega a(c^{k-1},z^{k-1})\frac{\varepsilon(\mathbf u_\nu^k-\mathbf u^{k-1})}{\tau}:\mathbb{V}\frac{\varepsilon(\mathbf u_\nu^k-\mathbf u^{k-1})}{\tau}\,\mathrm dx + \int_\Omega m(c^{k-1},z^{k-1})|\nabla\mu_\nu^k|^2\,\mathrm dx\leq C. \end{align*} Hence $\|\varepsilon(\mathbf u_\nu^k)\|_{L^2(\Omega;\mathbb{R}^{d\times d})}$ and $\|\nabla\mu_\nu^k\|_{L^2(\Omega)}$ are bounded in $\nu$. Korn's inequality applied to $\mathbf{u}_\nu^k-\mathbf{d}^k$ shows the first part of the claim, namely boundedness of $\|\mathbf u_\nu^k \|_{H^1(\Omega;\mathbb{R}^d)}$. The proof of the second part makes use of the Poincar\'e inequality. To this end boundedness of the spatial mean of $\mu_\nu^k$ has to be shown. Testing the time-discrete equation \eqref{discr-syst-appr-mu2} with $1/|\Omega|$ shows \begin{align*} \Xint-_\Omega\mu_\nu^k\,\mathrm dx={}&\Xint-_\Omega(\conv{\phi}_\omega)'(c_\nu^k)+(\conc{\phi})'(c^{k-1}) +\breve{W}_{1,c}^\omega(c_\nu^k,\varepsilon(\mathbf u^{k-1}),z^{k-1})+\invbreve{W}_{1,c}^\omega(c^{k-1},\varepsilon(\mathbf u^{k-1}),z^{k-1})\,\mathrm dx\\ &+\Xint-_\Omega-\vartheta_\nu^k+\frac{c_\nu^k-c^{k-1}}{\tau}+\nu|c_\nu^k|^{\varrho-2}c_\nu^k\,\mathrm dx. \end{align*} By the known boundedness properties of $(c_\nu^k)_\nu$, $(\mathbf u_\nu^k)_\nu$, $(z_\nu^k)_\nu$ and $(\vartheta_\nu^k)_\nu$, and the growth of $\breve{W}_{1,c}^\omega$, $\invbreve{W}_{1,c}^\omega$ (cf.\ \eqref{est-quoted-5.1}--\eqref{est-quoted-5.2}), \color{black} and of $(\conv{\phi}_\omega)'$ (affine-linear growth in $c$ due to Yosida approximation with parameter $\tau$), we then infer boundedness of $\Xint-_\Omega\mu_\nu^k$. Together with boundedness of $\|\nabla\mu_\nu^k\|_{L^2(\Omega)}$ we conclude \color{black} the second part of the claim by the Poincar\'e inequality. \end{proof} We then have the following counterpart to Lemma \ref{lemma:discrConvergence}, which reflects the lesser regularity of the solution components $\mu^k$ and $\mathbf{u}^k$ as a result of the limit passage as $\nu \downarrow 0$. Its proof is a straightforward adaptation of the argument developed for Lemma \ref{lemma:discrConvergence}. \begin{lemma} \label{lemma:discrConvergence-nu} There exist $(c^k,\mu^k, z^k,\vartheta^k,\mathbf u^k) \in W^{1,p}(\Omega) \times H^1(\Omega)\times W^{1,p}(\Omega) \times H^1(\Omega)\times H^1(\Omega;\mathbb{R}^d)$\color{black} and a (not relabeled) subsequence $\nu \downarrow 0$ such that convergences \eqref{eqn:strongConvCDiscr}, \eqref{eqn:strongConvZDiscr}--\eqref{eqn:strongConvTDiscr} hold, as well as \begin{subequations} \label{eqn:discrConv-nu} \begin{align} &\mu_\nu^k \to \mu^k\text{ strongly in }H^1(\Omega),\label{eqn:strongConvMuDiscr-nu}\\ &\nu|\nabla\mu_\nu^k|^{\varrho-2}\nabla\mu_\nu^k \to 0 \text{ strongly in } L^{\varrho/(\varrho-1)} (\Omega;\mathbb{R}^{d}),\label{conv-nu-muk-zero}\\ &\mathbf u_\nu^k \to \mathbf u^k\text{ strongly in }H^1(\Omega;\mathbb{R}^d),\label{eqn:strongConvUDiscr-nu}\\ & \nu |\varepsilon(\mathbf u_\nu^k-\mathbf{d}^k)|^{\varrho-2}\varepsilon(\mathbf u_\nu^k-\mathbf{d}^k) \to 0 \text{ strongly in } L^{\varrho/(\varrho-1)} (\Omega;\mathbb{R}^{d\times d}). \label{conv-nu-uk-zero} \end{align} \end{subequations} \end{lemma} We are now in the position to carry out the \underline{\textbf{limit passage as ${\boldsymbol \nu }{\boldsymbol \downarrow} {\bf 0}$ \color{black} in system \eqref{discr-syst-appr2}}}. The aguments for taking the limits in \eqref{discr-syst-appr-c2}, \eqref{discr-syst-appr-mu2}, \eqref{discr-syst-appr-z2}, and \eqref{discr-syst-appr-u2} are completely analogous to those developed in the proof of Lemma \ref{lemma:5.7}. Hence we only comment on the limit passage in the discrete heat equation \eqref{discr-syst-appr-teta2}. Estimate \eqref{eqn:thetaEstDiscr-nu} allows us to conclude that, up to a subsequence, $(\vartheta_\nu^k)^{(\kappa+\alpha)/2} \rightharpoonup (\vartheta^k)^{(\kappa+\alpha)/2}$ in $H^1(\Omega)$, hence $(\vartheta_\nu^k)^{(\kappa+\alpha)/2} \to (\vartheta^k)^{(\kappa+\alpha)/2}$ in $L^{6-\epsilon}(\Omega)$ for all $\epsilon>0$, whence, taking into account the growth condition on $\mathsf{K}$, that \[ \mathsf{K}(\vartheta_\nu) \to \mathsf{K}(\vartheta) \qquad \text{in } L^\gamma(\Omega) \quad \text{with } \gamma = \frac{(6-\epsilon)(\kappa+\alpha)}{2\kappa} \quad \text{for all } \epsilon>0. \] This allows us to pass to the limit in the term $\mathsf{K}(\vartheta_\nu) \nabla \vartheta_\nu$, tested against $v \in W^{1,s}(\Omega)$ for some sufficiently large \color{black} $s>0$. All in all, we infer that $(c,\mu,z,\vartheta,\mathbf{u},\chi)$ solves \color{black} system \eqref{PDE-discrete}, with \eqref{eqn:discr2} and \eqref{eqn:discr3} in $W^{1,p}(\Omega)'$, and with the discrete heat equation \eqref{eqn:discr4} understood in $W^{1,s}(\Omega)'$. In the next step, we will address enhancements of the \color{black} regularities of $\mathbf{u}$ and $\mu$. As a by-product we will obtain the discrete heat equation \eqref{eqn:discr4} understood in the $H^1(\Omega)'$-sense. \subsubsection{\textbf{Step 4: ${\bf H^2}$-regularity of ${\bf \mathbf u^k}$ and ${ \boldsymbol \mu^{\bf k}}$ \color{black} and conclusion of the proof of Prop.\ \ref{prop:exist-discr}}} \noindent To complete the \underline{\textbf{proof of Proposition \ref{prop:exist-discr}}} we have to improve the regularity of $\mathbf u^k$ and $\mu^k$. This is achieved by transforming the corresponding equations in a way that enables us to apply standard elliptic regularity results. \begin{lemma} \label{lemma:4.16} We get $\mu^k\in H_N^2(\Omega)$ and $\mathbf u^k\in H^2(\Omega; \mathbb{R}^d)\color{black}$ for the functions obtained in Lemma \ref{lemma:discrConvergence-nu}. \end{lemma} \begin{proof} We will use an iteration argument as in \cite{RocRos14,hr} (see also \cite{bm}) and sketch the proof for the case $d=3$, since the calculations for $d=2$ are completely analogous. \color{black} We already know that $\mu^k\in H^1(\Omega)$ satisfies the elliptic equation $$ \int_\Omega m(c^{k-1},z^{k-1})\nabla\mu^k\cdot\nabla w\,\mathrm dx=\int_\Omega -D_k(c)w\,\mathrm dx \qquad\text{for all }w\in H^1(\Omega). $$ Substituting $w=\frac{\zeta}{m(c^{k-1},z^{k-1})}\in H^1(\Omega)$ for an arbitrarily chosen test-function $\zeta\in H^1(\Omega)$ yields \begin{align*} \int_\Omega \nabla\mu^k\cdot\nabla\zeta\,\mathrm dx =\int_\Omega\Big(\frac{-D_k(c)}{m(c^{k-1},z^{k-1})}+\frac{m_{,c}(c^{k-1},z^{k-1})\nabla c^{k-1}+m_{,z}(c^{k-1},z^{k-1})\nabla z^{k-1}}{ m(c^{k-1},z^{k-1})\color{black}}\cdot\nabla\mu^k\Big)\zeta\,\mathrm dx \end{align*} valid for all $\zeta\in H^1(\Omega)$. Note that, due to Hypothesis (II) and the fact that \color{black} $c^{k-1},z^{k-1}\in W^{1,p}(\Omega)$ and $\nabla \mu^k\in L^2(\Omega;\mathbb{R}^d)$, the function in the bracket on the right-hand side is in $L^{2p/(2+p)}(\Omega)$. Applying a higher elliptic regularity result for homogeneous Neumann problems with $L^{2p/(2+p)}(\Omega)$-right-hand side proves $\mu^k\in W^{2,2p/(2+p)}(\Omega)$ and thus $\nabla\mu^k\in L^{6p/(6+p)}(\Omega;\mathbb{R}^d)$. Due to $p>3$ we end up with $\mu^k\in H_N^2(\Omega)$ after repeating this procedure finitely many times (cf. \cite[Proof of Lemma 4.1]{hr}). The proof for obtaining $\mathbf u^k\in H^2(\Omega;\mathbb{R}^d)$ from the elliptic equation \eqref{discr-syst-appr-u} in $H_0^1(\Omega;\mathbb{R}^n)'$ works as in \cite[Proof of Lemma 4.4 - Step 6]{RocRos14} (cf.\ also \cite{hr}), \color{black} with the exception that one needs to take the Dirichlet data $\mathbf{d}^k\in H^2(\Omega;\mathbb{R}^d)$ into account. This is the very point where we need to assume that $\mathbb{V}=\omega \mathbb{C}$ for some $\omega>0$ (cf.~\eqref{eqn:assbV}).\color{black} \end{proof} The enhanced regularity for $\mathbf u^k$ yields, by a comparison argument in \eqref{eqn:discr4}, that \eqref{eqn:discr4} not only holds in $W^{1,s}(\Omega)'$ for large $s>1$ but even in $H^1(\Omega)'$. Finally, we end up with a quintuple $\{(c_\tau^{k}, \mu_\tau^{k},z_\tau^k,\vartheta_\tau^k,\ub_\tau^k)\}_{k=1}^{K_\tau}\subseteq W^{1,p}(\Omega)\times H_N^2(\Omega)\times W^{1,p}(\Omega)\times H^1(\Omega)\times H^2(\Omega;\mathbb{R}^d)$ satisfying the assertion stated in Proposition \ref{prop:exist-discr}. \par This concludes the proof. \color{black} \hfill $\square$ \subsection{Discrete energy and entropy inequalities} \label{ss:5.3} We introduce the left-continuous and right-continuous piecewise constant, and the piecewise linear interpolants for a given sequence $\{\mathfrak{h}_\tau^k\}_{k={ 0}}^{K_\tau}$ on the nodes $\{t_\tau^k\}_{k=0}^{K_\tau}$ (see \ref{time-nodes}) by \begin{align*} \left. \begin{array}{llll} & \pwc {\mathfrak{h}}{\tau}: (0,T) \to B & \text{defined by} & \pwc {\mathfrak{h}}{\tau}(t): = \mathfrak{h}_\tau^k \\ & \upwc {\mathfrak{h}}{\tau}: (0,T) \to B & \text{defined by} & \upwc {\mathfrak{h}}{\tau}(t) := \mathfrak{h}_\tau^{k-1} \\ & \pwl {\mathfrak{h}}{\tau}: (0,T) \to B & \text{defined by} & \pwl {\mathfrak{h}}{\tau}(t): =\frac{t-t_\tau^{k-1}}{\tau} \mathfrak{h}_\tau^k + \frac{t_\tau^k-t}{\tau}\mathfrak{h}_\tau^{k-1} \end{array} \right\} \qquad \text{for $t \in (t_\tau^{k-1}, t_\tau^k]$.} \end{align*} Furthermore, we denote by $\pwc{\mathsf{t}}{\tau}$ and by $\upwc{\mathsf{t}}{\tau}$ the left-continuous and right-continuous piecewise constant interpolants associated with the partition, i.e. $\pwc{\mathsf{t}}{\tau}(t) := t_\tau^k$ if $t_\tau^{k-1}<t \leq t_\tau^k $ and $\upwc{\mathsf{t}}{\tau}(t):= t_\tau^{k-1}$ if $t_\tau^{k-1} \leq t < t_\tau^k $. Clearly, for every $t \in [0,T]$ we have $\pwc{\mathsf{t}}{\tau}(t) \downarrow t$ and $\upwc{\mathsf{t}}{\tau}(t) \uparrow t$ as $\tau\downarrow 0$. \begin{proposition} \label{prop:energyEntropyIneq} Let the assumptions of Proposition \ref{prop:exist-discr} be satisfied. Then the time-discrete solutions $\{(c_\tau^{k},\mu_\tau^{k},z_\tau^k,\vartheta_\tau^k,\ub_\tau^k)\}_{k=1}^{K_\tau}$ to Problem \ref{def:time-discrete} fulfill for all $0\leq s\leq t\leq T$ \begin{itemize} \item[(i)] the discrete entropy inequality \begin{equation} \label{discr-entropy-ineq} \begin{aligned} &\begin{aligned} \int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)} \int_\Omega (\log(\underline\vartheta_\tau) + \underline c_\tau+\underline z_\tau)\partial_t\varphi_\tau \, \mathrm{d} x \, \mathrm{d} r &-\rho \int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)} \int_\Omega \dive(\partial_t\mathbf{u}_\tau)\overline\varphi_\tau \, \mathrm{d} x \, \mathrm{d} r\\ &-\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)} \int_\Omega \mathsf{K}(\overline\vartheta_\tau) \nabla \log(\overline\vartheta_\tau) \cdot \nabla\overline\varphi_\tau \, \mathrm{d} x \, \mathrm{d} r \end{aligned}\\ &\begin{aligned} \leq \int_\Omega (\log(\overline\vartheta_\tau(t))+\overline c_\tau(t)+\overline z_\tau(t)){\overline\varphi_\tau(t)} \, \mathrm{d} x &-\int_\Omega (\log(\overline\vartheta_\tau(s))+\overline c_\tau (s)+\overline z_\tau(s)){\overline\varphi_\tau(s)} \, \mathrm{d} x\\ &-\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)} \int_\Omega \mathsf{K}(\overline \vartheta_\tau)|\nabla\log(\overline\vartheta_\tau)|^2\overline\varphi_\tau\, \mathrm{d} x \, \mathrm{d} r \end{aligned}\\ &\quad-\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)} \int_\Omega \left(\overline g_\tau +|\partial_t c_\tau|^2+ |\partial_t z_\tau|^2 +a(\underline c_\tau,\underline z_\tau) \varepsilon(\partial_t\mathbf{u}_\tau):\mathbb{V} \varepsilon(\partial_t\mathbf{u}_\tau) +m(\underline c_\tau,\underline z_\tau)|\nabla \overline \mu_\tau|^2\right)\frac{\overline\varphi_\tau}{\overline\vartheta_\tau} \, \mathrm{d} x \, \mathrm{d} r\\ &\quad-\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)} \int_{\partial\Omega}\overline h_\tau \frac{\overline\varphi_\tau}{\overline\vartheta_\tau} \, \mathrm{d} S \, \mathrm{d} r, \end{aligned} \end{equation} for all $\varphi \in \mathrm{C}^0 ([0,T]; W^{1,d+\epsilon}(\Omega)) \cap H^1 (0,T; L^{({d^\star})'}(\Omega))$ for some $\epsilon>0$, with $\varphi \geq 0$; \item[(ii)] the discrete total energy inequality \begin{align} \label{discr-energy-ineq} \begin{aligned} &\mathscr{E}_\omega(\overline c_\tau(t),\overline z_\tau(t),\overline\vartheta_\tau(t),\overline\mathbf u_\tau(t),\overline \mathbf v_\tau(t))\\ &\qquad\leq\mathscr{E}_\omega (\overline c_\tau(s),\overline z_\tau(s),\overline\vartheta_\tau(s),\overline\mathbf u_\tau(s),\overline \mathbf v_\tau(s))\\ &\qquad\quad+\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)}\int_\Omega \overline g_\tau\,\mathrm dx\,\mathrm dr +\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)}\int_\Omega\overline{\bold f}_\tau \cdot\overline \mathbf v_\tau\,\mathrm dx\,\mathrm dr +\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)}\int_{\partial\Omega}\overline h_\tau\,\mathrm dS\,\mathrm dr\\ &\qquad\quad+\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)}\int_{\partial\Omega}({\boldsymbol{\sigma}}_\tau { \bf n \color{black}})\cdot\partial_t\mathbf{d}_\tau\, \mathrm{d} S\,\mathrm dr \end{aligned} \end{align} with the discrete stress tensor \begin{align*} {\boldsymbol{\sigma}}_\tau:=a(\upwc c{\tau},\upwc z{\tau})\mathbb{V}\varepsilon(\partial_t \mathbf{u}_\tau) +W_{,\varepsilon}^\omega(\pwc c{\tau},\varepsilon(\pwc \mathbf{u}{\tau}),\pwc z{\tau}) -\rho\pwc\vartheta{\tau}\mathds 1. \end{align*} \end{itemize} \end{proposition} \begin{proof} $\,$ \begin{itemize} \item[To (i):] The proof is based on \cite[Proof of Proposition 4.8]{RocRos14}. Testing the time-discrete heat equation \eqref{eqn:discr4} for time step $k$ with $\frac{\varphi_\tau^k}{\vartheta_\tau^k}\in H^1(\Omega)$ shows \begin{align*} &\int_\Omega\bigg(g_\tau^k+|D_{\tau,k}(c)|^2+|D_{\tau,k}(z)|^2+m(c_\tau^{k-1},z_\tau^{k-1})|\nabla\mu_\tau^{k}|^2\bigg)\frac{\varphi_\tau^k}{\vartheta_\tau^k}\,\mathrm dx\\ &+\int_\Omega a(c_\tau^{k-1},z_\tau^{k-1})\varepsilon(D_{\tau,k}(\mathbf u)):\mathbb{V}\varepsilon(D_{\tau,k}(\mathbf u))\frac{\varphi_\tau^k}{\vartheta_\tau^k}\,\mathrm dx +\int_{\partial\Omega}h_\tau^k\frac{\varphi_\tau^k}{\vartheta_\tau^k}\,\mathrm dS\\ &\leq\int_\Omega\bigg(\mathsf{K}(\vartheta_\tau^k)\nabla\vartheta_\tau^k\cdot\nabla\frac{\varphi_\tau^k}{\vartheta_\tau^k} +\bigg(\frac{1}{\tau}\big(\log(\vartheta_\tau^k)-\log(\vartheta_\tau^{k-1})\big)+D_{\tau,k}(c)+D_{\tau,k}(z)+\rho\dive(D_{\tau,k}(\mathbf u))\bigg)\varphi_\tau^k\,\mathrm dx \end{align*} by using the concavity estimate $$ \frac{\vartheta_\tau^k-\vartheta_\tau^{k-1}}{\vartheta_\tau^k} \leq\log(\vartheta_\tau^k)-\log(\vartheta_\tau^{k-1}). $$ Summing over $k=\frac{\overline{\mathsf t}_\tau(s)}\tau+1,\ldots,\frac{\overline{\mathsf t}_\tau(t)}\tau$ \color{black} and using discrete by-part-integration proves \eqref{discr-entropy-ineq}. \item[To (ii):] The total energy inequality is inherited by the incremental energy inequality \eqref{discr-total-ineq} of the $(M,\nu)$-regularized system in Lemma \ref{l:energy-est}. Indeed, let $0\leq s\leq t\leq T$. Passing to the limits $M\to\infty$ and $\nu\downarrow 0$ in \eqref{discr-total-ineq} by means of lower semicontinuity \color{black} arguments and then summing over $j=\frac{\overline{\mathsf t}_\tau(s)}{\tau}+1,\ldots,\frac{\overline{\mathsf t}_\tau(t)}{\tau}$ yields \begin{align*} &\mathscr{E}_\omega(\pwc c{\tau}(t),\pwc z{\tau}(t),\pwc \vartheta{\tau}(t),\pwc \mathbf{u}{\tau}(t),\pwc \mathbf v{\tau}(t))\\ &\qquad\leq\mathscr{E}_\omega (\pwc c{\tau}(s),\pwc z{\tau}(s),\pwc \vartheta{\tau}(s),\pwc \mathbf{u}{\tau}(s),\pwc \mathbf v{\tau}(s)) +\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)}\bigg(\int_\Omega \pwc g{\tau}\,\mathrm dx + \int_{\partial\Omega} \pwc h{\tau} \,\mathrm dS +\int_\Omega \pwc{\bold f}{\tau} \cdot\pwc\mathbf v{\tau}\,\mathrm dx\bigg)\,\mathrm dr\\ &\left. \begin{aligned} &\qquad\quad+\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)}\int_\Omega \partial_t \mathbf v_\tau\cdot \partial_t \mathbf{d}_\tau\,\mathrm dx\,\mathrm dr +\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)}\int_\Omega a(\upwc c{\tau},\upwc z{\tau})\mathbb{V}\varepsilon(\pwc \mathbf v{\tau}):\varepsilon(\partial_t \mathbf{d}_\tau)\,\mathrm dx\,\mathrm dr\\ &\qquad\quad+\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)}\int_\Omega W_{,\varepsilon}^\omega(\pwc c{\tau},\varepsilon(\pwc\mathbf{u}{\tau}),\pwc z{\tau}):\varepsilon(\partial_t\mathbf{d}_\tau)\,\mathrm dx\,\mathrm dr -\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)}\int_\Omega\rho\pwc\vartheta{\tau}\dive(\partial_t\mathbf{d}_\tau)\,\mathrm dx\,\mathrm dr\\ &\qquad\quad-\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)}\int_\Omega \pwc{\bold f}{\tau} \cdot \partial_t\mathbf{d}_\tau\,\mathrm dx\,\mathrm dr. \end{aligned} \right\} =:I_1 \end{align*} Finally, integration by parts in space and using \eqref{eqn:discr5} shows \begin{align*} I_1={}&\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)}\int_\Omega\Big(\underbrace{\partial_t \mathbf v_\tau -\dive\Big(a(\upwc c{\tau},\upwc z{\tau})\mathbb{V}\varepsilon(\pwc \mathbf v{\tau}) +W_{,\varepsilon}^\omega(\pwc c{\tau},\varepsilon(\pwc\mathbf{u}{\tau}),\pwc z{\tau}) -\rho\pwc\vartheta{\tau}\mathds 1\Big)-\pwc{\bold f}{\tau}}_{=0}\Big)\cdot \partial_t \mathbf{d}_\tau\,\mathrm dx\,\mathrm dr\\ &+\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)}\int_{\partial\Omega} \Big(a(\upwc c{\tau},\upwc z{\tau})\mathbb{V}\varepsilon(\pwc \mathbf v{\tau}) +W_{,\varepsilon}^\omega(\pwc c{\tau},\varepsilon(\pwc\mathbf{u}{\tau}),\pwc z{\tau}) -\rho\pwc\vartheta{\tau}\mathds 1\Big){ \bf n \color{black}} \cdot\partial_t \mathbf{d}_\tau\, \mathrm{d} S\,\mathrm dr\\ ={}&\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)}\int_{\partial\Omega}({\boldsymbol{\sigma}}_\tau { \bf n } \color{black})\cdot\partial_t\mathbf{d}_\tau\, \mathrm{d} S\,\mathrm dr. \end{align*} \end{itemize} \end{proof} \subsection{A priori estimates} \label{ss:5.4} The aim of this section is to customize the a priori estimates which we have developed \color{black} in Section \ref{s:4} \color{black} to the time-discrete setting described in Problem \ref{def:time-discrete}, for a time-discrete solution $(\overline c_\tau,\underline c_\tau, c_\tau,\overline\mu_\tau,\overline z_\tau,\underline z_\tau, z_\tau, \overline\vartheta_\tau, \underline\vartheta_\tau, \vartheta_\tau, \overline\mathbf u_\tau,\underline\mathbf u_\tau,\overline\mathbf v_\tau,\mathbf v_\tau)$ (recall that $\mathbf{v}^k = D_k (\mathbf{u})$ for all $k \in \{1, \ldots, K_\tau\}$. Let us mention in advance that, in this \color{black} time-discrete setting we are only able to estimate (cf.\ \eqref{thetaBound1} below) the \color{black} supremum of the total variation $\langle\log(\overline\vartheta_\tau),\varphi\rangle_{W^{1,d+\epsilon}}$ over all test-functions $\varphi\in W^{1,d+\epsilon}(\Omega)$ with $\|\varphi\|_{W^{1,d+\epsilon}(\Omega)}\leq 1$, which is a slightly weaker result than the \textbf{Seventh a priori estimate} in Section \ref{s:4} however strong enough to apply the compactness result proved in \color{black} \cite[Theorem A.5]{RocRos14}. \begin{proposition} \label{prop:aprio-discr} Let the assumptions of Proposition \ref{prop:exist-discr} be satisfied. Then the time-discrete solutions $\{(c_\tau^{k},\mu_\tau^{k},z_\tau^k,\vartheta_\tau^k,\ub_\tau^k)\}_{k=1}^{K_\tau}$ to Problem \ref{def:time-discrete} fulfill the following a priori estimates uniformly in $\omega>0$ and $\tau>0$: \begin{align} &\|\overline c_\tau\|_{L^\infty(0,T;W^{1,p}(\Omega))}+\|\underline c_\tau\|_{L^\infty(0,T;W^{1,p}(\Omega))}\leq C, \label{cBound1}\\ &\|c_\tau\|_{H^1(0,T;L^2(\Omega))\cap L^\infty(0,T;W^{1,p}(\Omega))}\leq C, \label{cBound2}\\ &\|\Delta_p(\overline c_\tau)\|_{L^2(0,T;L^{2}(\Omega))}\leq C, \label{cBound3}\\ &\|\overline\eta_\tau\|_{L^2(0,T;L^{2}(\Omega))}\leq C\qquad \text{with }\overline\eta_\tau:=\beta_\omega(\overline c_\tau), \label{etaBound}\\ &\|\overline\mu_\tau\|_{L^2(0,T;H^2(\Omega))}\leq C, \label{muBound}\\ &\|\overline z_\tau\|_{L^\infty(0,T;W^{1,p}(\Omega))}+\|\underline z_\tau\|_{L^\infty(0,T;W^{1,p}(\Omega))}\leq C, \label{zBound1}\\ &\|z_\tau\|_{H^1(0,T;L^2(\Omega))\cap L^\infty(0,T;W^{1,p}(\Omega))}\leq C, \label{zBound2}\\ &\|\overline\vartheta_\tau\|_{L^2(0,T;H^1(\Omega))\cap L^\infty(0,T;L^1(\Omega))}\leq C, \label{thetaBound1}\\ &\big\|(\overline\vartheta_\tau)^{\frac{\kappa+\alpha}{2}}\big\|_{L^2(0,T;H^1(\Omega))}\leq C_\alpha \text{ for all }\alpha\in(0,1), \label{thetaBound2}\\ &\|\log(\overline\vartheta_\tau)\|_{L^2(0,T;H^1(\Omega))}\leq C, \label{thetaBound3}\\ &\|\overline\mathbf u_\tau\|_{L^\infty(0,T;H^2(\Omega;\mathbb{R}^d))}+\|\underline\mathbf u_\tau\|_{L^\infty(0,T;H^2(\Omega;\mathbb{R}^d))}\leq C, \label{uBound1}\\ &\|\mathbf u_\tau\|_{H^1(0,T;H^2(\Omega;\mathbb{R}^d))\cap W^{1,\infty}(0,T;H^1(\Omega;\mathbb{R}^d))}\leq C, \label{uBound2}\\ &\|\mathbf v_\tau\|_{L^2(0,T;H^2(\Omega;\mathbb{R}^d))\cap H^1(0,T;L^2(\Omega;\mathbb{R}^d))}\leq C \label{vBound} \end{align} as well as \begin{align} \label{thetaBound4} &\sup_{\varphi\in W^{1,d+\epsilon}(\Omega), \|\varphi\|_{W^{1,d+\epsilon}(\Omega)} \color{black} \leq 1} \mathrm{Var}\big(\langle\log(\overline\vartheta_\tau),\varphi\rangle_{W^{1,d+\epsilon}};[0,T]\big) \leq C_\epsilon \quad\text{for all }\epsilon>0. \end{align} Under the additional assumption \eqref{range-k-admissible} we also have \begin{align} \label{thetaBoundAdd} \|\vartheta_\tau\|_{ \mathrm{BV}\color{black}([0,T];W^{2,d+\epsilon}(\Omega)')}\leq C_\epsilon \quad\text{for all }\epsilon>0. \end{align} \end{proposition} \begin{proof} The proof mainly follows the lines in Section \ref{s:4}. Besides this, the estimates for the time-discrete variables $z_\tau$, $\vartheta_\tau$ and $\mathbf u_\tau$ are based on \cite[Proof of Proposition 4.10]{RocRos14}. To avoid repetition we will refer to the estimates in Section \ref{s:4} when necessary. \begin{itemize} \item[(i)] The time-discrete total energy inequality from Proposition \ref{prop:energyEntropyIneq} (ii) implies the following estimates (see \textbf{First a priori estimate}): \begin{align*} \|\overline c_\tau\|_{L^\infty(0,T;W^{1,p}(\Omega))} +\|\nabla\overline z_\tau\|_{L^\infty(0,T;W^{1,p}(\Omega;\mathbb{R}^d))} +\|\overline\vartheta_\tau\|_{L^\infty(0,T;L^1(\Omega))} +\|\mathbf v_\tau\|_{L^\infty(0,T;L^2(\Omega;\mathbb{R}^d))} \color{black} \leq C. \end{align*} \item[(ii)] The \textbf{Second a priori estimate} is performed by testing the time-discrete heat equation \eqref{eqn:discr4} with $F'(\vartheta_\tau^k)=(\vartheta_\tau^k)^{\alpha-1}$ with $\alpha\in(0,1)$ and the concave function $F(\vartheta):=\vartheta^\alpha/\alpha$, we obtain \begin{align*} &\int_\Omega\Big(g_\tau^k+|D_{\tau,k}(c)|^2+|D_{\tau,k}(z)|^2+m(c_\tau^{k-1},z_\tau^{k-1})|\nabla\mu_\tau^{k}|^2\Big)F'(\vartheta_\tau^k)\,\mathrm dx\\ &+\int_\Omega a(c_\tau^{k-1},z_\tau^{k-1})\varepsilon(D_{\tau,k}(\mathbf u)):\mathbb{V}\varepsilon(D_{\tau,k}(\mathbf u))F'(\vartheta_\tau^k)\,\mathrm dx +\int_{\partial\Omega}h_\tau^k F'(\vartheta_\tau^k)\,\mathrm dS\\ &\leq\int_\Omega\frac{F(\vartheta_\tau^k)-F(\vartheta_\tau^{k-1})}{\tau}+\mathsf{K}(\vartheta_\tau^k)\nabla\vartheta_\tau^k\cdot\nabla(F'(\vartheta_\tau^k))\,\mathrm dx\\ &\qquad+\int_\Omega\Big(D_{\tau,k}(c)+D_{\tau,k}(z)+\rho\dive(D_{\tau,k}(\mathbf u))\Big)\vartheta_\tau^k F'(\vartheta_\tau^k)\,\mathrm dx \end{align*} by using the concavity \color{black} estimate $(\vartheta_\tau^k-\vartheta_\tau^{k-1})F'(\vartheta_\tau^k)\leq F(\vartheta_\tau^k)-F(\vartheta_\tau^{k-1})$. Multplication by $\tau$ and summing over $k=1,\ldots,\overline{\mathsf t}_\tau(t)/\tau$ shows for every $t\in(0,T]$ the precise time-discrete analogon to \eqref{eqn:secondEstPre}. With the same calculations as in Section \ref{s:4} we end up with \begin{align*} \|\overline\vartheta_\tau\|_{L^2(0,T;H^1(\Omega))}\leq C,\qquad \|(\overline\vartheta_\tau)^{(\kappa+\alpha)/2}\|_{L^2(0,T;H^1(\Omega))}\leq C_\alpha. \end{align*} \item[(iii)] By testing the time-discrete heat equation \eqref{eqn:discr4} with $\tau$, integrating over $\Omega$, summing over $k$ and subtracting the result from the total energy inequality \eqref{discr-energy-ineq} we obtain the \textbf{Third a priori estimate}: \begin{align*} \|\partial_t c_\tau\|_{L^2(Q)} + \|\nabla \overline\mu_\tau \|_{L^2(Q;\mathbb{R}^d)} + \|\partial_t z_\tau\|_{L^2(Q)}+\|\partial_t \mathbf{u}_\tau\|_{L^2(0,T; H^1(\Omega;\mathbb{R}^d))} \leq C \end{align*} as well as \begin{align*} \|\overline z_\tau \|_{L^\infty (0,T;W^{1,p}(\Omega))} +\|\overline \mathbf u_\tau \|_{L^\infty (0,T;H^{1}(\Omega;\mathbb{R}^d))} \leq C. \end{align*} \item[(iv)] The \textbf{Fourth a priori estimate} is obtained by testing the time-discrete force balance equation \eqref{eqn:discr5} by $-\tau\dive(\mathbb{V}\varepsilon(\mathbf v_\tau^k))$. The calculation in Section \ref{s:4} carry over to the time-discrete setting. However, let us point out that the discrete analogue of \eqref{est-added-0} is given by \color{black} the convexity estimate \begin{align*} -\int_0^{\overline{\mathsf t}_\tau(t)} \int_\Omega \partial_t \mathbf v_\tau\cdot \mathbf{\dive} (\mathbb{V}\varepsilon(\overline\mathbf v_\tau)) \, \mathrm{d} x \, \mathrm{d} s \geq{}& -\int_0^{\overline{\mathsf t}_\tau(t)} \int_{\partial\Omega}\partial_t \mathbf v_\tau\cdot (\mathbb{V}\varepsilon(\overline\mathbf v_\tau) { \bf n \color{black}}) \, \mathrm{d} S \, \mathrm{d} s\\ &+\int_\Omega \frac12 \varepsilon(\overline\mathbf v_\tau(t)):\mathbb{V} \varepsilon(\overline\mathbf v_\tau(t)) \, \mathrm{d} x -\int_\Omega \frac12 \varepsilon(\mathbf v^0):\mathbb{V} \varepsilon(\mathbf v^0) \, \mathrm{d} x. \end{align*} With analogous calculations we arrive at \begin{align*} \qquad&\|\mathbf u_\tau\|_{H^1(0,T;H^2(\Omega;\mathbb{R}^d))} +\|\overline\mathbf u_\tau\|_{L^\infty(0,T;H^2(\Omega;\mathbb{R}^d))}\leq C, \color{black}\\ &\|\mathbf v_\tau\|_{H^1(0,T;L^2(\Omega;\mathbb{R}^d))\cap L^\infty(0,T;H^1(\Omega;\mathbb{R}^d))\cap L^2(0,T;H^2(\Omega;\mathbb{R}^d))} +\|\overline\mathbf v_\tau\|_{L^\infty(0,T;H^1(\Omega;\mathbb{R}^d))\cap L^2(0,T;H^2(\Omega;\mathbb{R}^d))}\leq C. \end{align*} \item[(v)] For the \textbf{Fifth a priori estimate} we test \eqref{eqn:discr2} with $c_\tau^{k}-\mathfrak{m}_0$ where $\mathfrak{m}_0:=\Xint-_\Omega c^0\,\mathrm dx$. With exactly the same calculations as in Section \ref{s:4} we find \begin{align*} \|\overline\mu_\tau\|_{L^2(0,T;H^1(\Omega))}\leq C. \end{align*} \item[(vi)] A comparison in \eqref{eqn:discr2} as done in the \textbf{Sixth a priori estimate} gives \begin{align*} \| \Delta_p(\overline c_\tau)\|_{L^2(0,T; L^2(\Omega))} + \|\overline\eta_\tau \|_{L^2(0,T; L^2(\Omega))} \leq C. \end{align*} \item[(vii)] Estimate \eqref{thetaBound1} \color{black} can be shown by utilizing the calculations in \cite[Proof of Proposition 4.10 - Sixth estimate]{RocRos14} and additionally noticing that $\{\overline c_\tau\}_{\tau>0}$ is bounded in $ \mathrm{BV}\color{black}([0,T];L^2(\Omega))$ due to the \textit{Third estimate}. We thus obtain \eqref{thetaBound4}. \item[(viii)] The \textbf{Eighth a priori estimate} works as in Section \ref{s:4} and yields \eqref{thetaBoundAdd}. \item[(ix)] The \textbf{Ninth a priori estimate} works as in Section \ref{s:4} and yields \eqref{muBound}. \end{itemize} \end{proof} \begin{remark} We observe that \eqref{thetaBound4} implies the uniform bound \begin{align*} \|\log(\overline\vartheta_\tau)\|_{L^\infty(0,T;W^{1,d+\epsilon}(\Omega))}\leq C_\epsilon. \end{align*} Moreover, by interpolation we infer from \eqref{thetaBound1} that (see \eqref{estetainterp}) \begin{align*} \|\overline\vartheta_\tau\|_{L^h(Q)}\leq C \end{align*} with $h=8/3$ for $d=3$ and $h=3$ for $d=2$. \end{remark} \section{\bf Proof of Theorem \ref{thm:1}} \label{s:6} In this last section we are going to perform the limit passages as $\tau\downarrow 0$ and $\omega\downarrow 0$ in the time-discrete system \eqref{PDE-discrete}, for which the existence of solutions was proved in Proposition \ref{prop:exist-discr}. \color{black} This will lead us \color{black} to prove Theorem \ref{thm:1}. \subsection{Compactness} \label{ss:6.1} We shall adopt the notation from the previous section. In particular for fixed $\omega>0$ we let $(\overline c_\tau,\underline c_\tau, c_\tau,\overline\mu_\tau,\overline z_\tau,\underline z_\tau, z_\tau, \overline\vartheta_\tau, \underline\vartheta_\tau, \vartheta_\tau, \overline\mathbf u_\tau,\underline\mathbf u_\tau,\overline\mathbf v_\tau,\mathbf v_\tau)$ be a time-discrete solution on an equi-distant partition of $[0,T]$ with fineness $\tau>0$ according to Proposition \ref{prop:exist-discr}. \begin{lemma} \label{lemma:discr-conv} Let the assumptions from Proposition \ref{prop:exist-discr} be satisfied and $\omega>0$ be fixed. Then there exists a quintuple $(c,\mu,z,\vartheta,\mathbf u)$ satisfying \eqref{reg-c}--\eqref{reg-u} such that along a (not relabeled) subsequence, as $\tau \downarrow 0$, the following convergences hold: \color{black} \begin{align} &\pwc c{\tau},\,\upwc c{\tau} \stackrel{\star}{\rightharpoonup} c &&\text{ weakly-star in }L^\infty(0,T;W^{1,p}(\Omega)), \label{cConv1}\\ &c_\tau \stackrel{\star}{\rightharpoonup} c &&\text{ weakly-star in }L^\infty(0,T;W^{1,p}(\Omega))\cap H^{1}(0,T;L^2(\Omega)), \label{cConv2}\\ &\Delta_p(\pwc c{\tau}) \rightharpoonup \Delta_p(c) &&\text{ weakly in }L^2(0,T;L^2(\Omega)), \label{cConv3}\\ &\pwc c{\tau},\,\upwc c{\tau}\to c &&\text{ strongly in }L^s (0,T; W^{1,p}(\Omega))\text{ for all }s\in[1,\infty), \label{cConv4}\\ &\pwc \eta{\tau} \rightharpoonup \eta &&\text{ weakly in }L^2(0,T;L^2(\Omega))\text{ with }\eta=\beta_\omega(c)\text{ a.e. in }Q, \label{etaConv}\\ &\pwc \mu{\tau} \rightharpoonup \mu &&\text{ weakly in }L^2(0,T;H_N^2(\Omega)), \label{muConv}\\ &\pwc z{\tau},\,\upwc z{\tau} \stackrel{\star}{\rightharpoonup} z &&\text{ weakly-star in }L^\infty (0,T; W^{1,p}(\Omega)), \label{zConv1}\\ &z_\tau \stackrel{\star}{\rightharpoonup} z &&\text{ weakly-star in }L^\infty (0,T; W^{1,p}(\Omega)) \cap H^1 (0,T;L^2(\Omega)), \label{zConv2}\\ &\pwc z{\tau},\,\upwc z{\tau}\to z &&\text{ strongly in }L^\infty (0,T; X)\text{ for all $X$ such that }W^{1,p}(\Omega) \Subset X\subseteq L^2(\Omega), \label{zConv3}\\ &z_\tau \to z &&\text{ strongly in }\mathrm{C}^0([0,T]; X)\text{ for all $X$ such that }W^{1,p}(\Omega) \Subset X\subseteq L^2(\Omega), \label{zConv4}\\ &\pwc \vartheta{\tau}\rightharpoonup \vartheta &&\text{ weakly in }L^2 (0,T; H^1(\Omega)), \label{thetaConv1}\\ &\log(\pwc\vartheta{\tau}) \stackrel{\star}{\rightharpoonup} \log(\vartheta) &&\text{ weakly-star in } L^2 (0,T; H^1(\Omega)) \cap L^\infty (0,T; W^{1,d+\epsilon}(\Omega)') \quad \text{for every } \epsilon>0, \label{thetaConv2}\\ &\log(\pwc\vartheta{\tau}) \to \log(\vartheta) &&\text{ strongly in }L^2(0,T;L^s(\Omega)) \text{ for all $ s \in [1,6)$ if $d=3$, and all $s\in [1,\infty) $ if $d=2$,} \label{thetaConv3}\\ &\log(\pwc\vartheta{\tau}(t)) \rightharpoonup \log(\vartheta(t)) &&\text{ weakly in $H^1(\Omega)$ for almost all $t \in (0,T)$} \label{thetaConv4}\\ &&&\text{ (the chosen subsequence for $\tau\downarrow 0$ does not depend on $t$)},\notag\\ &\pwc\vartheta{\tau}\to \vartheta &&\text{ strongly in } L^q(\Omega\times (0,T)) \text{ for all }q\in [1,8/3)\text{ for }d=3\text{ and all }q\in [1, 3)\text{ if }d=2,\color{black} \label{thetaConv5} \end{align} \begin{align} &\pwc \mathbf{u}{\tau},\upwc \mathbf{u}{\tau} \stackrel{\star}{\rightharpoonup} \mathbf{u} &&\text{ weakly-star in }L^\infty(0,T;H^2(\Omega;\mathbb{R}^d)), \label{uConv1}\\ &\pwl \mathbf{u}{\tau} \stackrel{\star}{\rightharpoonup} \mathbf{u} &&\text{ weakly-star in }H^1(0,T;H^2(\Omega;\mathbb{R}^d))\cap W^{1,\infty}(0,T;H^1(\Omega;\mathbb{R}^d)), \label{uConv2}\\ &\pwc \mathbf{u}{\tau},\, \upwc \mathbf{u}{\tau} \to \mathbf{u} &&\text{ strongly in }L^\infty(0,T;X)\text{ for all $X$ such that }H^{2}(\Omega;\mathbb{R}^d) \Subset X\subseteq L^2(\Omega;\mathbb{R}^d), \label{uConv3}\\ &\pwl \mathbf{u}{\tau} \to \mathbf{u} &&\text{ strongly in }\mathrm{C}^0([0,T]; X \color{black})\text{ for all $X$ such that }H^{2}(\Omega;\mathbb{R}^d) \Subset X\subseteq L^2(\Omega;\mathbb{R}^d), \label{uConv4}\\ &\pwc \mathbf v{\tau} \rightharpoonup \mathbf{u}_{t} &&\text{ weakly in }L^2(0,T;H^2(\Omega;\mathbb{R}^d)), \label{vConv1}\\ &\mathbf v_\tau \rightharpoonup \mathbf{u}_t &&\text{ weakly in }H^1(0,T;L^2(\Omega;\mathbb{R}^d))\cap L^2(0,T;H^2(\Omega;\mathbb{R}^d)). \label{vConv2} \end{align} Under the additional assumption \eqref{range-k-admissible} we also have for all $\epsilon>0$ that $\vartheta\in \mathrm{BV} \color{black}([0,T]; W^{2,d+\epsilon}(\Omega)')$ and \begin{align} &\pwc \vartheta{\tau}\to \vartheta &&\text{ strongly in }L^2 (0,T; Y)\text{ for all $Y$ such that }H^{1}(\Omega) \Subset Y\subset W^{2,d+\epsilon}(\Omega)', \label{tetaConvAdd1}\\ &\pwc \vartheta{\tau}(t)\to \vartheta(t) &&\text{ strongly in }W^{2,d+\epsilon}(\Omega)'\text{ for all }t\in[0,T]. \label{tetaConvAdd2} \end{align} \end{lemma} \begin{proof} We immediately obtain \eqref{cConv1}, \eqref{cConv2}, \eqref{muConv}, \eqref{zConv1}, \eqref{zConv2}, \eqref{thetaConv1}, \eqref{uConv1}, \eqref{uConv2}, \eqref{vConv1} and \eqref{vConv2} from the estimates \eqref{cBound1}--\eqref{vBound} in Proposition \ref{prop:aprio-discr} by standard weak \color{black} compactness arguments. From the regularity result \cite[Thm.\ 2, Rmk.\ 3.5]{savare98}, we infer for every $1 \leq \delta\color{black}< \frac1p$ the enhanced regularity $\pwc c{\tau},\upwc c{\tau} \in L^2 (0,T; W^{1+\delta\color{black},p}(\Omega))$ together with the estimate \begin{align*} \|\pwc c{\tau}\|_{L^2(0,T;W^{1+\delta,p}(\Omega))}+\|\upwc c{\tau}\|_{L^2(0,T;W^{1+\delta,p}(\Omega))}\leq C_{\delta\color{black}}. \end{align*} In combination with \eqref{cBound1} and \eqref{cBound2}, \color{black} the application of the Aubin-Lions compactness theorem yields \eqref{cConv4}. Now we choose a subsequence $\tau\downarrow 0$ such that $\Delta_p(\pwc c{\tau})\rightharpoonup S$ in $L^2(Q)$ for an element $S\in L^2(Q)$ possible due to \eqref{cBound3}. Taking $\pwc c{\tau}\to c$ in $L^2(Q)$ into account, we may identify $S=\Delta_p(c)$ by the strong-weak closedness of the maximal monotone graph of $\Delta_p:L^2(Q)\to L^2(Q)$. We then conclude \eqref{cConv3}. Analogously, \eqref{etaConv} ensues from the strong-weak closedness of the graph of the maximal monotone operator (induced by $\beta_\omega$) $\beta_\omega : L^2(Q) \to L^2(Q) $. \color{black} In addition, \color{black} \eqref{zConv3}, \eqref{zConv4}, \eqref{uConv3} and \eqref{uConv4} follow from \eqref{zBound1}, \eqref{zBound2}, \eqref{uBound1} \color{black} and \eqref{uBound2} via Aubin-Lions compactness results (see \cite{simon}). It remains to show the convergences for $\pwc\vartheta{\tau}$ and $\log(\pwc\vartheta{\tau})$. Here we proceed as in \cite[Proof of Lemma 5.1]{RocRos14}. We use the boundedness properties \eqref{thetaBound3} and \eqref{thetaBound4}, and apply the compactness result \cite[Theorem A.5]{RocRos14} which is based on Helly's selection principle. We obtain a function $\lambda\in L^2(0,T;H^1(\Omega))\cap L^\infty(0,T;W^{1,d+\epsilon}(\Omega)')$ for all $\epsilon>0$ and a further, (again not relabeled), subsequence \color{black} such that \begin{align*} &\log(\pwc\vartheta{\tau})\stackrel{\star}{\rightharpoonup} \lambda\text{ weakly-star in }L^2(0,T;H^1(\Omega))\cap L^\infty(0,T;W^{1,d+\epsilon}(\Omega)'),\\ &\log(\pwc\vartheta{\tau}(t))\rightharpoonup \lambda(t)\text{ weakly in }H^1(\Omega) \quad\text{for a.a. \color{black} }t\in(0,T). \end{align*} Here the chosen subsequence for $\tau\downarrow 0$ does not depend on $t$ in the latter convergence. We also infer from above that $$ \log(\pwc\vartheta{\tau}(t))\to \lambda(t)\text{ strongly in }L^s(\Omega) \quad\text{for a.a. $t\in(0,T)$ and all $s$ from \eqref{thetaConv3}.} $$ By also exploiting the boundedness of $\|\log( \pwc\vartheta{\tau})\|_{L^2(0,T;H^1(\Omega))\cap L^\infty(0,T;W^{1,d+\epsilon}(\Omega)')}$ and the interpolation inequality \eqref{interpolationIneq} with $X=H^1(\Omega)$, $Y=L^s(\Omega)$ and $Z=W^{1,d+\epsilon}(\Omega)$ we infer that the sequence $\{\log(\pwc\vartheta{\tau})\}_{\tau}$ is uniformly integrable in $L^2(0,T;L^s(\Omega))$. Application of Vitali \color{black} convergence theorem proves $$ \log(\pwc\vartheta{\tau})\to \lambda\text{ strongly in }L^2(0,T;L^s(\Omega)) \quad\text{for all $s$ from \eqref{thetaConv3}.} $$ Comparison with \eqref{thetaConv1} yields $\lambda=\log(\vartheta)$ and hence \eqref{thetaConv2}, \eqref{thetaConv3} and \eqref{thetaConv4}. The uniform bound \eqref{thetaBound1} shows uniform integrability of $\{\pwc\vartheta{\tau}\}_{\tau}$ in $L^q(Q)$ with $q\in[1,8/3)$ for $d=3$ and $q\in[1,3)$ for $d=2$ (cf. \eqref{estetainterp}). \color{black} Vitali's convergence theorem proves the strong convergence \eqref{thetaConv5}. In particular we find $\pwc\vartheta{\tau}(t)\to \vartheta(t)$ strongly in $L^1(\Omega)$ for a.e. $t\in(0,T)$ (where the subsequence of $\tau\downarrow 0$ is independent of $t$). By the boundedness $\|\pwc\vartheta{\tau}(t)\|_{L^1(\Omega)}\leq C$ uniformly in $t$ and $\tau$ (see \eqref{thetaBound1}) we infer by lower semicontinuity \color{black} that $\vartheta\in L^\infty(0,T;L^1(\Omega))$. Furthermore, by considering a weak cluster point $(\overline\vartheta_\tau)^{\frac{\kappa+\alpha}{2}}\rightharpoonup S$ in $L^2(0,T;H^1(\Omega))$ and identifying $S=(\overline\vartheta_\tau)^{\frac{\kappa+\alpha}{2}}$ via a.e. limits from above we also obtain $\vartheta^{\frac{\kappa+\alpha}{2}}\in L^2(0,T;H^1(\Omega))$. Finally, under the additional assumption \eqref{range-k-admissible} convergences \color{black} \eqref{tetaConvAdd1} and \eqref{tetaConvAdd2} follow from an Aubin-Lions type compactness result for $\mathrm{BV}$-functions (cf.\ e.g.\ \cite[Chap.\ 7, Cor.\ 4.9]{Rou05}), combining estimate \color{black} \eqref{thetaBound1} together with the $\mathrm{BV}$-bound \eqref{thetaBoundAdd}. For further details \color{black} we refer to \cite[Proof of Lemma 5.1]{RocRos14}. \end{proof} \subsection{Conclusion of the proof of Theorem \ref{thm:1}} \label{ss:6.2} Here is the outline of the proof: \begin{enumerate} \item First, for fixed $\omega>0$ we will pass to the limit as $\tau\downarrow 0$, (along the same subsequence for which the convergences in Lemma \ref{lemma:discr-conv} hold), in the time-discrete system \eqref{PDE-discrete}. We will thus obtain an \emph{entropic weak solution} (in the sense of Definition \ref{def-entropic}), to the (initial-boundary value problem for the) PDE system \eqref{eqn:PDEsystem}, where the maximal monotone operator $\beta$ and the elastic energy density $W$ are replaced by their regularized versions $\beta_\omega$ and $W^\omega$. \item Secondly, we will tackle the limit passage as $\omega \downarrow 0$. \end{enumerate} Observe that the limit passages $\tau \downarrow 0 $ and $\omega\downarrow 0$ cannot be performed simultaneously, \color{black} because in the time-discrete system from Problem \ref{def:time-discrete} the (partial) derivatives of the convex- and the concave-decompositions \color{black} \eqref{eqn:convConcSplittingWc} may ``explode'' as $\omega\downarrow 0$. However, the convex-concave splitting shall \color{black} disappear in the limit $\tau\downarrow 0$ for fixed $\omega>0$ in the corresponding PDE system. \paragraph{\bf Limit passage $\tau\downarrow 0$} First of all, \color{black} we mention that from the time-discrete damage equation \eqref{eqn:discr3} we derive the following inequalities (for details we refer to \cite[Section 5.2]{hk1}; see also \cite[Proof of Theorem 1]{RocRos14} and \cite[Proof of Theorem 4]{RocRos12}): \begin{align} &\textit{-- damage energy-dissipation inequality:}\text{ for all $t \in (0,T]$, for $s=0$, and for almost all $0< s\leq t$:} \notag\\ &\qquad \begin{aligned} \label{discr-energy-diss-ineq} &\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)}\int_\Omega |\partial_t z_\tau|^2 \, \mathrm{d} x \, \mathrm{d} r +\int_\Omega\left(\frac1p |\nabla\pwc z{\tau}(t)|^p + (\conv{\sigma})'(\pwc z{\tau}(t))+ (\conc{\sigma})'(\upwc z{\tau}(t))\right)\, \mathrm{d} x\\ &\qquad\leq\int_\Omega\left(\frac1p |\nabla\pwc z{\tau}(s)|^p+ (\conv{\sigma})'(\pwc z{\tau}(s))+ (\conc{\sigma})'(\upwc z{\tau}(s))\right)\, \mathrm{d} x\\ &\qquad\quad+\int_{\overline{\mathsf t}_\tau(s)}^{\overline{\mathsf t}_\tau(t)}\int_\Omega \partial_t z_\tau \left(-\breve{W}_{3,z}^\omega(\pwc c{\tau},\varepsilon(\upwc\mathbf{u}{\tau}),\pwc z{\tau}) - \invbreve{W}_{3,z}^\omega(\pwc c{\tau},\varepsilon(\upwc\mathbf{u}{\tau}),\upwc z{\tau})+\pwc \vartheta{\tau}\right)\, \mathrm{d} x \, \mathrm{d} r; \end{aligned}\\ &\textit{-- damage variational inequality:}\text{ for all $\zeta\in L^\infty(0,T;W^{1,p}(\Omega))$ with $0\leq \zeta\leq\upwc z{\tau}$:} \notag\\ &\qquad \begin{aligned} \label{var-ineq-z-bis} &\int_0^T\int_\Omega \Big(|\nabla \pwc z{\tau}|^{p-2} \nabla \pwc z{\tau} \cdot \nabla (\zeta-\pwc z{\tau}) \big((\partial_t z_\tau) + (\conv{\sigma})'(\pwc z{\tau})+ (\conc{\sigma})'(\upwc z{\tau})\big)(\zeta-\pwc z{\tau})\Big)\,\mathrm dx\,\mathrm dr\\ &+\int_0^T\int_\Omega\Big(\breve{W}_{3,z}^\omega(\pwc c{\tau},\varepsilon(\upwc\mathbf{u}{\tau}),\pwc z{\tau}) + \invbreve{W}_{3,z}^\omega(\pwc c{\tau},\varepsilon(\upwc\mathbf{u}{\tau}),\upwc z{\tau}) - \pwc \vartheta{\tau}\Big)(\zeta-\pwc z{\tau})\,\mathrm dx\,\mathrm dr\geq 0. \end{aligned} \end{align} The limit passage $\tau\downarrow 0$ in the damage energy-dissipation inequality \eqref{discr-energy-diss-ineq}, in the damage variational inequality \eqref{var-ineq-z-bis}, in the entropy inequality \eqref{discr-entropy-ineq}, in the total energy inequality \eqref{discr-energy-ineq} and in the equation for the balance of forces \eqref{eqn:discr5} works exactly as outlined in \cite[Proof of Theorem 1]{RocRos14} by taking the growth properties \color{black} \eqref{est-quoted-5.1}--\eqref{est-quoted-5.4} into account (for \textbf{fixed} $\omega>0$\color{black}) and needs no repetition here. We end up with properties (ii), (iii), (iv) and (v) of Definition \ref{def-entropic}, keeping in mind that $W(c,\varepsilon(\mathbf{u}),z)$, $W_{,z}(c,\varepsilon(\mathbf{u}),z)$ and $W_{,\varepsilon}(c,\varepsilon(\mathbf{u}),z)$ are replaced by their \color{black} $\omega$-regularized versions $W^\omega(c,\varepsilon(\mathbf{u}),z)$, $W_{,z}^\omega(c,\varepsilon(\mathbf{u}),z)$ and $W_{,\varepsilon}^\omega(c,\varepsilon(\mathbf{u}),z)$, respectively. Let us comment that in the limit $\tau\downarrow 0$ of \eqref{var-ineq-z-bis} we are only able to obtain a ``one-sided variational inequality'' which still suffices to obtain a weak solution in the sense of Definition \ref{def-entropic} (see \eqref{var-ineq-z}). Furthermore, following \color{black} the approach from \cite[Proof of Theorem 4.4]{hk1}, the subgradient $\xi\in L^2(0,T;L^2(\Omega))$ fulfilling $\xi \in \partial I_{[0,+\infty)}(z)$ a.e.\ in $Q$ \color{black} can be specified precisely as \begin{align*} \xi=-\mathbf{1}_{\{z=0\}}\Big(\sigma'(z) + \pd{z}(c,\varepsilon(\mathbf u), z)-\vartheta\Big)^+ \qquad \text{a.e. in\;}\, Q, \color{black} \end{align*} where $\mathbf{1}_{\{z=0\}}:Q\to\{0,1\}$ denotes the characteristic function of the set $\{z=0\}\subseteq Q$ and $(\cdot)^+:=\max\{0,\cdot\}$. It remains to show the limit passage as $\tau\downarrow 0$ in the Cahn-Hilliard system \eqref{eqn:discr1}--\eqref{eqn:discr2}. This can be achieved via standard convergence methods by exploiting the convergences shown in Lemma \ref{lemma:discr-conv} and noticing the growth properties \eqref{est-quoted-5.1}--\eqref{est-quoted-5.4}. This leads to property (i) from Definition \ref{def-entropic} where $W_{,c}(c,\varepsilon(\mathbf{u}),z)$ and $\beta$ should be replaced by $W_{,c}^\omega(c,\varepsilon(\mathbf{u}),z)$ and $\beta_\omega$, respectively. \\ \paragraph{\bf Limit passage $ {\boldsymbol \omega} {\boldsymbol \downarrow} {\bf 0}$ \color{black}} In the subsequent argumentation we let $S_\omega=(c_\omega,\mu_\omega,z_\omega,\vartheta_\omega,\mathbf{u}_\omega)$ be an $\omega$-regularized weak solution, i.e. an entropic \color{black} weak solution in the sense of Definition \ref{def-entropic} where the $W$, $W_{,c}$, $W_{,\varepsilon}$, $W_{,z}$ and $\beta$-terms are replaced by $W^\omega$, $W_{,c}^\omega$, $W_{,\varepsilon}^\omega$, $W_{,z}^\omega$ and $\beta_\omega$, respectively. We observe that the a priori estimates in Proposition \ref{prop:aprio-discr} are inherited by the weak solutions $S_\omega$ via lower semicontinuity \color{black} arguments. Hence we obtain the same convergence properties for $\omega\downarrow 0$ as for $\tau\downarrow 0$ in Lemma \ref{lemma:discr-conv} where \eqref{etaConv} should be replaced by \begin{align} \pwc \eta{\omega} \rightharpoonup \eta \quad\text{ weakly in }L^2(0,T;L^2(\Omega))\text{ as }\omega\downarrow 0\text{ with }\eta\in\beta(c)\text{ a.e. in }Q. \label{etaConv2} \end{align} Indeed, to prove \color{black} \eqref{etaConv2}, let $\eta_{\omega}=\beta_\omega(c_\omega)\rightharpoonup S$ in $L^2(Q)$ for $\omega\downarrow 0$ for some \color{black} element $S\in L^2(Q)$. By convexity of the operator $\widehat\beta_\omega:L^2(Q)\to\mathbb{R}$ we find \begin{align} \label{convBeta} \forall w\in L^2(Q):\quad \widehat\beta_\omega(c_\omega)+\langle \beta_\omega(c_\omega), w-c_\omega\rangle_{L^2(Q)}\leq \widehat\beta_\omega(w). \end{align} Since $\{\beta_\omega\}$ is the Yosida-approximation of $\beta$ we conclude that (cf. \cite[Lemma 5.17]{Rou05}) \begin{align} \forall w\in L^2(Q):\quad \widehat\beta_\omega(w)\to \widehat\beta(w)\text{ strongly in $L^2(Q)$ as }\omega\downarrow0 \qquad\text{ and }\qquad\liminf_{\omega\downarrow0}\widehat\beta_\omega(c_\omega)\geq \widehat\beta(c). \label{auxConv} \end{align} Thus by \eqref{etaConv2} and \eqref{auxConv} we can pass to the limit $\omega\downarrow 0$ for a subsequence in \eqref{convBeta} and obtain $\eta\in\partial\beta(c)$. The main feature for the passage $\omega\downarrow 0$ in the PDE system is the following observation: From \eqref{cBound1} and \eqref{cBound2} we infer via the compact embedding $W^{1,p}(\Omega)\Subset L^\infty(\Omega)$ that for all $\omega>0$ \begin{align*} \|c_\omega\|_{L^\infty(Q)}\leq C. \end{align*} An important consequence is that in combination with the definition of $\mathcal R_\omega$ in \eqref{Rtrunc} we find for all sufficiently small $\omega>0$ that $\mathcal R_\omega(c_\omega)=c_\omega$ a.e. in $Q$ and thus \begin{align*} \left. \begin{aligned} &W(c_\omega,\varepsilon(\mathbf{u}_\omega),z_\omega)=W^\omega(c_\omega,\varepsilon(\mathbf{u}_\omega),z_\omega), &W_{,c}(c_\omega,\varepsilon(\mathbf{u}_\omega),z_\omega)=W_{,c}^\omega(c_\omega,\varepsilon(\mathbf{u}_\omega),z_\omega),\\ &W_{,\varepsilon}(c_\omega,\varepsilon(\mathbf{u}_\omega),z_\omega)=W_{,\varepsilon}^\omega(c_\omega,\varepsilon(\mathbf{u}_\omega),z_\omega), &W_{,z}(c_\omega,\varepsilon(\mathbf{u}_\omega),z_\omega)=W_{,z}^\omega(c_\omega,\varepsilon(\mathbf{u}_\omega),z_\omega). \end{aligned} \quad\right\}\quad \text{a.e. in }Q. \end{align*} Then, the limit \color{black} passage $\omega\downarrow 0$ in the $\omega$-regularized versions of (i)-(v) in Definition \ref{def-entropic} works as for $\tau\downarrow 0$. This concludes the proof of Theorem \ref{thm:1}. \color{black} \hfill $\square$ \noindent {\bf \large Acknowledgments.} Christian Heinemann and Christiane Kraus have been partially supported by ECMath SE 4 and SFB 1114. \color{black} The work of Elisabetta Rocca was supported by the FP7-IDEAS-ERC-StG Grant \#256872 (EntroPhase), by GNAMPA (Gruppo Nazionale per l'Analisi Matematica, la Probabilit\`a e le loro Applicazioni) of INdAM (Istituto Nazionale di Alta Matematica), and by IMATI -- C.N.R. Pavia. Riccarda Rossi was partially supported by a MIUR-PRIN'10-'11 grant for the project ``Calculus of Variations'', and by GNAMPA (Gruppo Nazionale per l'Analisi Matematica, la Probabilit\`a e le loro Applicazioni) of INdAM (Istituto Nazionale di Alta Matematica). \bibliographystyle{alpha}
{ "redpajama_set_name": "RedPajamaArXiv" }
5,406
\section{Introduction} During follow-up inspection of the error box of the LIGO/Virgo alert G299232 on 2017 August 27.017, MASTER Global Robotic Net~\citep{2010AdAst2010E..30L} discovered an optical transient named MASTER OT~J033744.97+723159.0~\citep{GCN21780,roberts2018}. On 2017 August 29, the spectrum of MASTER OT J033744.97+723159.0 was obtained with the Xinglong 2.16-m telescope of National Astronomical Observatory of China~\citep{2017ATel10681....1R}. The object was classified as Type IIb Supernova (SN) by cross-correlating with a library of spectra (SNID;~\citealt{2007ApJ...666.1024B}). On 2017 September 6, M.~Caimmi reported the discovery of a supernova with 0.24-m telescope from Valdicerro Observatory~\citep{2017TNSTR.973....1C}. The supernova received the IAU designation AT 2017gpn and was identified as MASTER OT J033744.97+723159.0. \section{Observations and data reduction} We performed 20 epochs of observations ($B$ and $R$ filters) with CCD-photometer on the Zeiss-1000 telescope of the Special Astrophysical Observatory of the Russian Academy of Sciences. The aperture photometry was performed using standard procedures of ESO-MIDAS software package. It includes standard image processing such as bias subtraction and flat field correction, removing the traces of cosmic particles, and stacking of individual frames into the summary image. SN 2017gpn is located in $\sim$0.039 degrees ($>20$ kpc) from the center of the potential host galaxy NGC1343, so the galaxy's contamination is negligible. The line-of-sight reddening is adopted to be $E(B - V) = 0.017$ mag~\citep{2011ApJ...737..103S}. Since no Landolt or any other standards were available for this region, we use the Pan-STARRS magnitudes for comparison stars. The magnitudes of comparison stars were re-calculated from $g,r,i$ to $B,R$ with use of Lupton transformation equations\footnote{http://www.sdss3.org/dr8/algorithms/sdssUBVRITransform.php}. For better modelling we combined our photometric data with publicly available $B$ and $R$ light curves (LCs) of SN~2017gpn obtained with PIRATE robotic telescope~\citep{roberts2018}. \section{Modelling} The numerical light curve modelling is performed with one-dimensional multifrequency radiation hydrodynamical code \textsc{STELLA}. The full description of the code can be found in~\cite{Blinnikov1998,Blinnikov2006}; a public version of \textsc{STELLA} is also included with the \textsc{MESA} distribution~\citep{Paxton2018}. Our best-fit numerical model is shown by solid line in the Fig.~\ref{fig:model}. The parameters of the model are: pre-SN mass $M = 3.5~\rm M_\odot$, pre-SN radius $R = 50~\rm R_\odot$, mass of hydrogen envelope $M_{env} = 0.06~\rm M_\odot$. The explosion energy is $E = 1.2\times10^{51}$ erg. The 0.11~$\rm M_\odot$ of $^{56}$Ni is totally mixed through the ejecta. The compact remnant is $1.41~\rm M_\odot$ neutron star. The theoretical photospheric velocities are in a good agreement with the observed one ($\sim$14800 km~s$^{-1}$ from SiII~635.5nm absorption line, \citealt{2017ATel10681....1R}). \section{Discussion and conclusions} The parameters we found are consistent with the results of hydrodynamical modelling of other typical Type IIb supernovae. However, in different hydrodynamical models of SN~IIb there is some variance in radius of pre-SN star (from 30-50~R$_\odot$ to 700~$R_\odot$, e.g.,~\citealt{1994ApJ...429..300W,Blinnikov1998,2015ApJ...811..147F}). To check if it is possible to reproduce the observed LCs of SN~2017gpn with a model of higher radius, we changed the radius in our best-fit model to $R=400~\rm R_\odot$ and variated the degree of $^{56}$Ni mixing. Concentrating all the $^{56}$Ni in the central part of ejecta, we were able to nearly reproduce the observed LCs~(Fig.~\ref{fig:model}). This stresses the importance of $^{56}$Ni mixing in such kind of studies. By adopting the date of explosion from the models (Aug 20 for our best-fit model and Aug 16 for the model with $R=400~\rm R_\odot$), we can conclude that SN~2017gpn is unlikely to be connected to the LIGO/Virgo G299232 alert. \begin{figure}[h!] \begin{center} \includegraphics[scale=0.75,angle=0]{model_fin.pdf} \caption{The result of modelling the observed LCs of SN~2017gpn (points are our data and crosses are the data taken from~\citealt{roberts2018}). Our best-fit model is shown by solid lines ($M = 3.5~\rm M_\odot$, $R = 50~\rm R_\odot$, $E = 1.2\times10^{51}$ erg, $M_{^{56}Ni} = 0.11~\rm M_\odot$, mixed). For comparison the model with $R = 400~\rm R_\odot$ ($M_{^{56}Ni} = 0.11~\rm M_\odot$, no mixing) is presented.\label{fig:model}} \end{center} \end{figure} \acknowledgments M.V.P. and S.I.B. acknowledge support from RSF grant 18-12-00522 for modelling. A.S.M. is grateful to O.I.~Spiridonova and the Zeiss-1000 staff for the help in observations. \bibliographystyle{aasjournal}
{ "redpajama_set_name": "RedPajamaArXiv" }
8,365
Quzluy-e Olya (, also Romanized as Qūzlūy-e 'Olyā; also known as Kozlu-giaria, Qowzlū-ye Bālā, Qowzlū-ye 'Olyā, Qozlū, and Qūzlū) is a village in Kani Bazar Rural District, Khalifan District, Mahabad County, West Azerbaijan Province, Iran. At the 2006 census, its population was 402, in 68 families. References Populated places in Mahabad County
{ "redpajama_set_name": "RedPajamaWikipedia" }
8,693
Home Motorcycle Racing News MotoGP 2016 German MotoGP Preview | Another Marquez Blitzkrieg? 2016 German MotoGP Preview | Another Marquez Blitzkrieg? Honda's Marc Marquez 2016 German MotoGP Preview Honda's Marc Marquez Since joining the premier MotoGP class in 2013 with Repsol Honda, Marc Marquez has dominated the German Grand Prix at Sachsenring. Ahead of this weekend's 2016 Germany GP, round nine of 18, Marquez has continually provided his own MotoGP Blitzkrieg at the 2.28-mile Sachsenring track that contains 13 corners – some of the tightest on the 2016 MotoGP calendar. Piloting an RC213V at one of the slowest tracks in GP, the 23-year-old Marquez has not only won all 30-lap races there, he has also completed the task from pole and set the fastest lap time each time (currently 1:20.336 in 2005). Visit the Ultimate MotorCycling MotoGP Page Marquez, who has won in Germany at every race since 2010, including victories in the former 125cc and Moto2 classes, also enters this weekend's race with the points lead; Marquez has 145 points, 24 ahead of the reigning MotoGP Champion, Movistar Yamaha MotoGP's Jorge Lorenzo. "We're going to the German GP aiming for a podium position and of course possibly to fight for the win. The Sachsenring is normally a very good track for me that I like very much, maybe because it has so many left-hand corners, a bit like a dirt track," Marquez says. "On the other hand, it's another one on the calendar where the weather has sometimes played a crucial role in the past, so we'll see how it goes there. We've learned from the past and we're approaching this season race by race, trying to be ready and make the most of what we have and what the situation requires. In any case I'm happy we'll be back in action shortly and I'll do my best to get another positive result before the summer break." Yamaha's Jorge Lorenzo Two weeks ago at Assen MotoGP, Marquez finished second in a rain-soaked race where only 13 riders cross the finish line. Marquez could have challenged eventual winner Estrella Galicia 0,0 Marc VDS' Jack Miller, but displayed patience usually only found in seasoned riders. Marquez proved that he has grown up, not needing to challenge for every win in a season. As for 21-year-old Miller, he became the first Australian to claim a race win since 2012 (Casey Stoner), and became the 10th youngest member in the class to win. Following his first-ever MotoGP win, he said "this makes it clear that we do know how to ride a motorbike and I'm not an idiot." Lorenzo finished 10th in Assen, but was not upset about the finish due to the conditions at the Dutch track. Lorenzo seeks another win ahead of the summer break, and has some luck in Germany – just not any wins. His has earned five podiums in Germany, including four second-place finishes. "We are facing the last round of the first part of the season and we are eager to have a great result before summer holiday. I didn't expect such a difficult race in Assen but ultimately we were able to get some valuable points for the championship," Lorenzo says. "Now we are visiting Sachsenring, a circuit where I've never won. It's always a challenging track for both me and the YZR-M1, but this year – with different electronics and tires – you never know. Every race is a question mark so let's see if this time we can be more competitive in Germany!" In third, 42 points behind Marquez, is nine-time World Champion Valentino Rossi. As the 37-year-old Italian chases a 10th world title, he enters Sachsenring with nine podiums at the German circuit, including four wins across the premier class. His last victory in Germany was in 2009, though he garnered an additional two wins since. Rossi has three DNFs so far this season – Texas, Italy and the Netherlands – so he has to seriously amp up performance. History proves this is possible; back in 1998 Mick Doohan (Honda) earned the title with three DNFs. "I really want to get back on track. Over these last few days I've rested at home, but now I want to leave for the race at Sachsenring," Rossi says. "In Assen we worked well throughout the weekend and we were really fast, both in wet and dry conditions, but the race was not very lucky for us. "At the Sachsenring circuit we will have to work well as we have been doing during all weekends this season. We are fast and I'm comfortable with the YZR-M1 and with the Michelin tires. I like this track and Sunday I will do my best to make it a good race." Ducati's Andrea Iannone In fourth, 59 points behind Marquez, is the other Repsol Honda pilot, Dani Pedorsa. Like Rossi, the Spaniard also crashed out of Assen MotoGP, but enters Germany with some momentum due to past performances at Sachsenring. Pedrosa has won there six times, including four times in the MotoGP class. He has work to do, though, as he continues to chase a first premier-class title. Only seven points behind is Rossi's future teammate on the factory Yamaha, current Team Suzuki Ecstar's Maverick Vinales. Just back from appearances in World Ducati Week at Misano, the Ducati Team of Andrea Iannone and Andrea Dovizioso continues to struggle. The GP16 pilots both crashed out at Assen, and are far back in points – Iannone eighth with 52, and Dovizioso 11th with 43. Thanks to Octo Pramac Yahknich Ducati's Scott Redding's third-place finish in Assen, Ducati seeks its 100th premier-class podium this week in Germany. The top Independent Teams rider is Monster Tech 3 Yamaha's Pol Espargaro, who is sixth in points with 72, 14 ahead of Avintia Racing Ducati's Hector Barbera. The latter is seventh overall, but second in the Independent Team battle. MotoGP reports that the 2016 season has been one of massive improvements for both Aprilia and Stefan Bradl (Aprilia Racing Team Gresini), the combination scoring points in all but one round and ending inside the top 10 on four occasions. In 2015 Bradl was forced to miss his home round due to injury, but there are no such issues for the Moto2 Champion in 2015. 2016 Sachsenring MotoGP on TV beIN will provide live coverage of Sachsenring MotoGP at the following times (all ET): Live Qualifying: 8 a.m. Saturday, July 16 Qualifying (replay): 12:30 p.m. Saturday Qualifying (replay): 12:30 a.m. Sunday, July 17 Live MotoGP Race: 8 a.m. Sunday MotoGP (replay), 2 p.m. Sunday MotoGP (replay), 10 p.m. Sunday 2016 MotoGP Point Standings (after eight of 18 rounds): Maverick VIÑALES Pol ESPARGARO Hector BARBERA Eugene LAVERTY Stefan BRADL Jack MILLER Bradley SMITH Cal CRUTCHLOW Michele PIRRO Tito RABAT Yonny HERNANDEZ 2016 German MotoGP Preview – Photo Gallery Yamaha's Valentino Rossi Honda's Dani Pedrosa Suzuki's Maverick Vinales Previous articleHarley's Andrew Hines Takes Record Win at Chicago NHRA Next articleNorth for the Fourth In Michigan on a Yamaha Seca 750
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
3,006
Gay Post November 27, 2013 13 reasons why Cher's new video 'Take it Like a Man' is the gayest thing ever Queer car wash, underwear models, lifeguards, and a twerking dance off. Yup, gayest thing ever. As if Cher could get any gayer, her new video for Take it Like a Man has proven that the Queen of the Queers not only still has it, but has set the standard of gay for another generation. Here's why: 13. The video opens with a group of men wandering around the hood in nothing but underwear. 12. There is a big gay car wash where most of the soap and water ends up on the boys. 11. There is a rival group of guys, also in their underwear, who are scandalized in the gayest way possible: sassy lollipop removal from mouth. 10. One of the guys gets picked up and used to scrub a car. 9. The boys somehow get shipwrecked, also in their underwear. 8. Only to be saved by a group of hunky lifegueards wearing the tightest bathing suits imaginable 7. There are so many lingering crotch shots. 6. Back at the car wash the boys are actually just washing each other. 5. There is a dance off 4. Between guys in their underwear. 3. They are twerking. 2. One of the teams is called the Hot Bottoms. 1. It's Cher for God's sake. Follow Travis on Twitter at @TravMyers. Follow Women's Post on Twitter at @WomensPost. Ste. Anne's Spa is the perfect escape to relaxation What is the cure to our modern, hectic world? This autumn, take a trip to Grafton and check out Ste. Anne's Spa, Canada's favourite spa. A quick jaunt from Toronto, you can treat yourself to mud wraps, massages and facials, among other relaxing options. The estate features both a eucalyptus steam room and a fieldstone grotto, where you can hang out in the hot tub before cooling off in the plunge pool. For the ladies, the change room offers a unique surprise: colour therapy hot tubs. Yes, you can soak your weary bones in a fuchsia sea. The special treat for the season? Mocha fusion. Using coffee as a natural exfoliant, this spa treatment will replenish your skin from toes to scalp and clear your mind. The ultimate full-body experience. Ste. Anne's also offers wellness classes, giving guests the chance to participate in yoga, stretch or meditation classes. Under the tutelage of Jenn Hall, guests are taught the various poses involved in several branches of yoga. These classes are also occasionally offered as a special retreat, the best way to get the full Ste. Anne's wellness experience. If you want more than just a spa visit, there are numerous options. Guests can visit the Spa Bakery and catch one of the culinary demonstrations. Watch as Pastry Chef Khushroo guides you through the process of creating amazing desserts. Or take a lovely guided walk (or snowshoe!) through the Northumberland Hills. Enjoy the peaceful surroundings as you visit nearby forests, creeks and even the local farm. You can even arrange for an equine experience. Ste. Anne's maintains stables and lets guests interact with the horses in various ways. Groups are kept small, to ensure guests feel comfortable and willing to ask questions. Guests can choose several options: from watching the stablehands perform daily horse care, either morning muck or evening turn in; to grooming time, which gives visitors the chance to groom a horse; to the learn to ride program, a new program which offers visitors the chance to learn the eight horsemanship levels, from grooming and harnessing through to full diagonals. "The learn to ride program has just kicked off and two guests that have signed up for the five day program both made it through to the trotting stage," the stable caretaker, Kareylee White, says. And if you have a fear of horses? "All of our horses are very friendly, almost too friendly sometimes," White assures me. Whether you want to run away for a day or a weekend, Ste. Anne's Spa is the perfect escape from busy city life. Want to experience the Ste. Anne's experience for yourself? Head over to our St. Anne's Spa contest page to win a day spa for two! Living November 25, 2013 WATCH: Seth Rogen and James Franco made a gay parody of Kanye's 'Bound 2' video Call me crazy but I would take these two guys on a motorcycle over Kim and Kanye any day of the week. Health & Fitness November 20, 2013 Rob Ford's personal trainer is an ex-con who dealt steroids Rob Ford's personal trainer is an ex-con who dealt drugs — specifically, anabolic steroids, adding a new drug to the crack and weed we have become accustomed to hearing about Ford's involvement with. For those of you reading this who reside outside of Toronto: that thing you are doing now, where you are just shaking your head at the computer screen wondering how one man could consistently have such poor judgement, that is what the rest of us here have been doing every day since about mid March. The trainer, Valerio Moscariello, is currently banned from coaching in Canada and was picked up in Nevada and sentenced to five months in a federal prison for dealing steroids. The trainer's identity (he currently uses the alias Valerio Mosca) came to light after he posted an Instagram photo of a training session with the mayor that has since been removed. He also had a Twitter conversation about training Ford with porn star Nikki Benz where she invited the two to come party with her. Check out the text of a press release about Moscariello's conviction from the United States Attorney's Office, District of Nevada below. Man Sentenced To Five Months In Prison For Distributing Steroids Las Vegas, Nev. – A Canadian citizen residing in Henderson, Nevada, has been sentenced to five months in federal prison and three years of supervised release for his guilty plea to possession with the intent to distribute anabolic steroids, announced Daniel G. Bogden, United States Attorney for the District of Nevada. VALERIO MOSCARIELLO, age 31, of Toronto, Canada, pleaded guilty in August to the felony offense. He admitted to unlawfully possessing 27 units (270 cc) of anabolic steroids, Schedule III controlled substances, and to operating a website at www.juiceworld.com, that was accessible to the general public for the purpose of distributing these anabolic steroids. "Individuals who unlawfully distribute drugs, including anabolic steroids, over the Internet will be prosecuted and possibly sentenced to lengthy terms of imprisonment," said U.S. Attorney Bogden. "Purchasing controlled substances from unlicensed persons without a valid prescription is unsafe and could threaten your life or the lives of others." The sentence was handed down on Monday, October 31, 2005, by U.S. District Judge Roger L. Hunt, and included an enhancement under the federal sentencing guidelines for distributing the substances through mass-marketing by means of an interactive computer service. In February 2005, U.S. Postal Inspectors tracked a steroid distribution operation to the defendant's residence in Henderson. In June 2005, law enforcement officers executed a search of the residence and seized a quantity of anabolic steroids, including Primobolon Depot, Deca Nadrolone Decanoate, and Trenbolone Acetate. They also seized "buy-owe" sheets, materials such as small bottles and syringes consistent with the repackaging of these substances, and approximately $16,000 in cash. MOSCARIELLO must remove his website www.juiceworld.com from the internet and surrender the unlawful items seized from his home in June. He is presently in immigration custody awaiting a removal hearing. The case was investigated by Inspectors with the U.S. Postal Inspection Service, Special Agents with U.S. Immigration and Customs Enforcement, and Officers with the Henderson Police Department. It is being prosecuted by Assistant United States Attorney Crane M. Pomerantz. Politics November 20, 2013 WATCH: Strombo tries to call Rob Ford's office with hilarious results You've got to love the tenacity of Canada's own George Stromboulopoulos. The talkshow host, candidate in our Toronto Fantasy Election, and my future husband took up Rob Ford's invitation for calls from the one-off Ford Nation spectacle on Sun News Network Monday night. Watch the clip and let us know what you think, would you watch a one-on-one interview with Strombo and Rob Ford? Also, how lucky is that girl answering phones at Ford's office? Toronto Fantasy Election Imagine for a moment that the Fords request for a snap municipal election came to pass. Imagine still that, instead of just the doldrum politicians in the race that some of Toronto's best and brightest in the fields of entertainment and literature stepped up to the plat and took a swing at the mayoralty. We came up with the Toronto Fantasy Election to satisfy our desire to see somebody, anybody else leading this city. Let's take a look at who is in the running. If you've already got your mind made up scroll to the bottom of this article to cast your vote. A living literary legend, the champion of women, libraries, and everything we love, Atwood took on the Fords not too long ago when it came to saving libraries. An incessant tweeter with an unparalleled imagination she's already got the fan base ('Atwood Nation' anyone?) and the brainpower to lead. Would she make a good mayor for the City of Toronto? Olivia Chow It is no secret that Chow, a City Hall veteran who stepped up federally and currently sits as an NDP MP, has her eyes on Toronto's top spot. Although no official announcement has been made from her camp it is widely believed that after a successful exploratory committee she has already put together a bare bones campaign team. Her politics are a huge departure from the Ford era of tax cuts and bike lane removals. Would she make a good mayor for the City of Toronto? Colin Mochrie This comedic genius would fit right into City Hall with some of the slapstick antics that have been going on there lately. Mochrie is an alumnus of Toronto's Second City and worked on TV shows like Whose Line is it Anyway? and This Hour Has 22 Minutes. Maybe it is time Toronto elected mayor who is actively trying to be a laughingstock instead of one that isn't purposely keeping the world in stitches. Would he make a good mayor for the City of Toronto? A centrist-conservative who has spent most of his life in politics, John Tory is the kind of guy we all would have expected to be mayor right now. While he may have never smoked crack he could be a perfect fit for the office he has tried at before. Perhaps we are now paying the price for overlooking the mayor-next-door all these years. Would he make a good mayor for the City of Toronto? George Stroumboulopoulos The former MuchMusic VJ turned CBC host, part-time CNN host and full-time dreamboat is a fixture of Toronto, having been the crush of every teenage girl to pass through the megacity from 2000 to 2005. Passionate about human rights along with arts and culture he would most likely govern on the left end of the spectrum and very likely get a lot more people watching the Rogers Cable feed of City Hall debates. Would he make a good mayor for the City of Toronto? The trainwreck nuclear meltdown supernova currently taking place at City Hall really needs no introduction. Masochists, we've included his name on the list should you care to vote for him. We don't need to ask if he would make a good mayor for the City of Toronto because we already know the answer, and it is no. Cast your vote in Toronto's Fantasy Election below Follow Sarah Thomson @ThomsonTO WATCH: Mayor Ford charges at crowd and knocks over Cllr. Pam McConnell #TOpoli with Sarah Thomson, feat. Travis Myers, Andrea Houston, and Josh Rachlis 32 of the best protest signs from the #SaveToronto rally today at City Hall Everyday it is something new with Rob Ford. Like today, when the spectacle at City Hall got to be too much and Rob Ford charged at the crowd knocking over Councillor Pam McConnell. Seriously? Yep. Okay. That is my mayor. It is almost exhausting to watch this go on and on every day. Watch the video and let us know what you think, is Rob Ford utterly and completely out of control, or is there any semblance of a mayor left to salvage from this international trainwreck spectacle? WATCH: Doug Ford blows his stack in council defending brother, Mayor charges at opponent Just when you think things can't get any more insane in the circus that surrounds Rob Ford, like, you know, the massive protest outside City Hall today, new police files that allege prostitutes have visited the Mayor's office, and the now doldrum crack cocaine use that Rob Ford has admitted to, things got a little more crazy. In today's special debate surrounding Ford's recent actions his brother, Councillor Doug Ford, blew his stack by repeatedly demanding to know if Councillor Denzil Minnan-Wong has smoked marijuana — to the point where the speaker had to shut off his microphone and call a recess. After this was all said and done the Mayor stood up and charged at Minnan-Wong in a stance of physical intimidation while the gallery cried out that he is a bully. Minnan-Wong, once a fierce advocate of the Fords on Council, has turned his back on the administration after the most recent slew of revelations surrounding the mayor's crack use. Minnan-Wong has also expressed some interest in running for mayor himself. Watch the video and let us know what you think, is this behaviour (even divorced from the context of crack cocaine) acceptable in an elected leader? Living November 7, 2013 5 reasons (of many) to wear a red poppy this Remembrance Day This year a pacifist group in Ottawa rolled out the idea that the red poppy — you know, the one we wear to remember all the sacrifices made by our soldiers to protect our freedom — is a warmongering badge of evil and should be cast aside in favour of a white poppy. It is easy to forget in these relatively peaceful times the reasons why we wear the poppy, and oddly enough for that we should be thankful. Thankful that we have a generation so insulated from the horrors of war that they think we should do away with the poppy pin of remembrance in favour of a white pin of peace. But there are many reasons we wear the red pin, and maybe some folks need a reminder. The red poppy is a symbol of peace just as much as any other, and the reason we wear it is to remember the horrors of war and the selfless sacrifices made by those who have protected our nation, our safety, and our freedom so that no one will ever have to endure them again. Here are five of the many, many reasons to wear a red poppy this Remembrance Day. 1. Wear the poppy for the Battle of Vimy Ridge On April 9, 1917, an Easter Monday, 100,000 Canadian troops fighting within the British forces stormed a ridged area outside of the town of Vimy, France in a horrible snowstorm. Of those 100,000 Canadians 3,598 were killed and 7,004 were wounded. These were soldiers who, for the first time, were fighting for more than the British Crown — they were fighting for Canada. The spirit of our nation was created in the trenches of Vimy Ridge as our soldiers fought and died to protect Canada, and for that we should remember them by wearing a poppy. 2. Wear the poppy for the Second Battle of Ypres This battle, waged in Belgium, was fought by Canadians within British forces alongside the French and Belgians. The battle marked the first time poison gas was used in the large scale on the western front of the war. The results were catastrophic. 70,000 men were wounded, dead, or missing after the use of chlorine gas, a chemical agent dispersed through the air that suffocated the soldiers (many of whom were conscripts) and ate away at the tissue in the lungs and eyes of soldiers until they either stumbled out into the battlefield to be shot or chocked to death on their own blood. All in the name of freedom. Wear a red poppy to remember them. 3. Wear a the poppy for Flanders Fields Regardless of how many times you had to read the poem in elementary school take a moment to pause and think about it. At an American military cemetery John McCrae passed through the day after he his friend died in the Second Battle of Ypres. McCrae described the battle as a "nightmare" where for two straight weeks on one side was the never ending gunfire and the other side the piles of dead soldiers. McRae performed the burial of his friend and the next day while sitting in the back of an ambulance he wrote the iconic poem which describes the horrors of war juxtaposed with the gift of peace that the fallen give to the living. By wearing the red poppy you are remembering the sacrifices made by all those who were laid to rest in Flanders Fileds and swearing that these deaths were not in vain. Wear the red poppy to remember them and everything they did so that you may live in peace. 4. Wear a red poppy for the Holocaust To argue against red poppy is not only an insult to all of those who died fighting for the freedom of Canadians and others around the world, it is an insult to those who died and survived the Holocaust. Millions of people were being helplessly exterminated before the Allied forces liberated them. These are people who were murdered while our soldiers fought to free them, Take a look at the numbers. 6 million Jews were murdered. 12.5 million Slavs were murdered. 15,000 gays were murdered. 2 million Poles were murdered. 1.5 million Romani were murdered. 250,000 million disabled people were murdered. Countless thousands of others were murdered. When you wear the red poppy you are remembering the brave fight our soldiers made to free those they could save and remembering those they could not. 5. Wear a red poppy to help Canadian veterans today The poppies worn on lapels were first crafted by disabled veterans, who gave so much for us and our country, so that they could earn a small amount of money to support themselves and their families. The poppy campaign is not run by the Royal Canadian Legion to benefit veterans, many of whom need the income and support. The least you can do is respect the sacrifices they made for us here today by donating the change in your pocket for a red poppy. Gay Post November 6, 2013 FASHION: Style experts review Ford's crack announcement outfit "If I looked as good as him I wouldn't step down either." With the news of Rob Ford's crack use reverberating around City Hall this week in Toronto only one question remains that even the Toronto Police and the journalistic teams of the city's best papers haven't been asking: was Rob Ford wearing the right outfit to announce he has used crack? We consulted with the style experts to find out! Mayor Ford kept things simple in a black suit when he met reporters for both his proclamation and subsequent press conference. Remember that when the weather begins to get colder more muted tones are always a great way to get your message across. Black and white help RoFo give off an impression of both professionalism and style while apologizing to the people of Toronto for his crack cocaine use. Rob Ford, a portrait of fashion in Canada. Photo by Canadian Press. "Don't be surprised if you see a lot of the girls on Church street wearing XXXL suit jackets with poppies this season," says Toronto drag queen and tireless fashionista Barbie Jo Bontemps. "Style always seems to trickle down from fashion icons like Ford to the drag queens and then hit the runway in Paris." Always remember that for a larger man, black is your best friend. No one was thinking about Ford's ongoing weight struggle when he wore this number at the crack press conference yesterday! RoFo also seems to have done away with dress shirts that are his proper neck size and is starting a new trend of leaving the top button undone and covering it with his tie. Keep an eye on this — we may see much of the same at next year's Fasion Week. Ford's simple black suit looks like a rack number from Brooks Brothers or Moore's, a smart move that gives him all the style without the designer price tag, leaving him with a bit more pocket money to spend freely — and still look good doing it! One important fashion accessory in the fall is a poppy pin in support of the Royal Canadian Legion. Ford contrasts his black suit jacket well with the red pin. To get the RoFo look and show you care about veterans, freedom, and style at your next big drug related press conference pick up your own pin at many subway stations, shopping centre entrances, and Legion halls near you for as little as a dollar! For an extra bit of flair try to get your hands on the slightly more rare felt covered poppy. Ford also rocks a City of Toronto pin. While you may have never been mayor or the fourth largest city in North America you can still complete the look. Check out similar broaches on Etsy for around five bucks. Don't forget the flare! there is no need to be a bore when holding a press conference about the string of deceit and lies that he fed to voters in the aftermath of his crack tape becoming public knowledge, so Ford spiced up his outfit with an NFL team tie. Multi-coloured ties work great to breakup solid coloured suits on men! "Vintage NFL ties are in this season." "Vintage NFL ties are in this season," says Toronto based artist and style icon Andrea Pelletier, while wearing her Dallas Texans scarf. Queen West fashionistas have already been seen sporting other Father's Day gifts as haute fashion lately including re-darned socks and coffee mugs. The tie gives Ford a chance to have a little fun with the outfit while showing off a bit of his personality. Did you know RoFo coached a highschool football team for a number of years? You do now! By showing off his love of football on his tie Rob is letting us know he's just a normal guy who loves football and was eventually asked to remove himself from the team he coached due to months of bad press. Try to include a little bit of your own self into the story your clothes tell. Don't let your fun tie get out of control! Ford's multi-coloured neckpiece straddles the limit, but he made sure not to let the patterns get any busier — his look lets him have a little fun but still be taken seriously as he tells Torontians he has smoked crack and lied about it for half a year. With his brother Doug flanking him wearing a similar outfit we can't help but wonder who wore it better, and the tie instantly pushes it in RoFo's favour. Not since Mary-Kate and Ashley Olsen have we seen such a stylish set of siblings with their own take on what makes them look good. ""His graceful elegance and fashion forward thinking were the real takeaway from this press conference," says Bontemps. "When it comes to fashion, it's clear that yesterday was a pure victory for Mayor Ford. The public won't soon forget his debonair portrayal of this season's latest vestments." "If I looked as good as him," says Bontemps, "I wouldn't step down either." Our verdict on RoFo's outfit:
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
5,026
\subsection*{LSZ for Matrix Theory} The $N=2$ Matrix theory Hamiltonian \be H= \ft 1 2 P^0_\mu P^0_\mu + \Bigl ( \ft 12 \vec{P}_\mu \cdot \vec{P}_\mu + \, \ft 14 (\vec{X}_\mu \times \vec{X}_\nu)^2 + \, \ft i 2 \vec{X}_\mu\cdot \vec{\theta}\, \gamma_\mu\times \vec{\theta}\Bigr )\, \label{MTHam} \ee is a sum of an interacting $SU(2)$ part describing relative motions and a free $U(1)$ piece pertaining to the centre of mass. We use a vector notation for the adjoint representation of $SU(2)$, $\vec{X}_\mu=(Y^I_\mu,x_\mu)$ and $\vec{\theta}=(\theta^I,\theta^3)$ (with $I=1,2$ and $\mu=1,\ldots ,9$) and may choose a gauge in which $Y^I_9=0$. The model has a potential with flat directions along a valley floor in the Cartan sector $x_\mu$ and $\theta^3$. The remaining degrees of freedom transverse to the valley are supersymmetric harmonic oscillators in the variables $Y^I_\mu$ ($\mu\neq9$) and $\theta^I$. Upon introducing a large gauge invariant distance $x=(\vec{X}_9\cdot\vec{X}_9)^{1/2}=x_9$ as the separation of a pair of particles, the Hamiltonian \eqn{MTHam} was shown \cite{PW} to possess asymptotic two particle states of the form \be |p^1_\mu,{\cal H}^1;p^2_\mu,{\cal H}^2\rangle=|0_B,0_F\rangle\, \ft{1}{x_9}e^{i(p^1-p^2) \cdot x}e^{i(p_1+p_2)\cdot X^0} |{\cal H}^1\rangle_{\theta^0+\theta^3}\,|{\cal H}^2 \rangle_{\theta^0-\theta^3}\label{state} \ee Here $p^{1,2}_\mu$ and ${\cal H}^{1,2}$ are the momenta and polarizations of the two particles. The state $|0_B,0_F\rangle$ is the ground state of the superharmonic oscillators and the polarization states are the $\underline{44}\oplus\underline{84} \oplus\underline{128}$ representation of the $\theta^0\pm \theta^3$ variables, corresponding to the graviton, three-form tensor and gravitino respectively. For the computation of scattering amplitudes one may now form the $S$-matrix in the usual fashion $ S_{fi}\, =\, \langle {\rm out}| \exp \{-iHT\} |{\rm in}\rangle \, $ with the desired in and outgoing quantum numbers according to~(\ref{state}) \footnote{The asymptotic states above are constructed with respect to a large separation in the same direction for both in and outgoing particles, i.e.\ eikonal kinematics. More general kinematical situations are handled by introducing a rotation operator into the $S$-matrix \cite{PW1}.}. The object of interest is then the vacuum to vacuum transition amplitude \be e^{i\Gamma(x'_\mu,x_\mu,\theta^3)}= {}_{x^\prime_\mu}\langle 0_B,0_F| \exp \{-iHT\} | 0_B,0_F\rangle_{x_\mu}. \label{trans} \ee Note that the ground states actually depend on the Cartan variables $x_\mu$ and $x'_\mu$ through the oscillator mass. Also, both the left and right hand sides depend on the operator $\theta^3$. Our key observation is rather simple. In field theory one is accustomed to expand around a vanishing vacuum expectation value when computing the vacuum to vacuum transition amplitude for some field composed of oscillator modes. In quantum mechanics the idea is of course exactly the same, and therefore if one is to represent \eqn{trans} by a path integral one should expand the super oscillators transverse to the valley about a vanishing vev. One may then write the Matrix theory $S$-matrix in terms of a path integral with the stated boundary conditions \be e^{i\Gamma(v_\mu,b_\mu,\theta^3)}= \int_{{\vec{X}}_\mu=(0,0,x_\mu),\, {\vec{\theta}}=(0,0,\theta^3)} ^{{\vec{X}}_\mu=(0,0,x_\mu'),\, {\vec{\theta}}=(0,0,\theta^3)} {\cal D}(\vec{X}_\mu,\vec{A},\vec{b},\vec{c},\vec{\theta})\, \exp(i\,\int_{-T/2}^{T/2}L_{\rm SYM}). \ee The Lagrangian $L_{\rm SYM}$ is that of the supersymmetric Yang--Mills quantum mechanics with appropriate gauge fixing to which end we have introduced ghosts $\vec{b}$, $\vec{c}$ and the Lagrange multiplier gauge field $\vec{A}$. The effective action $\Gamma(v_\mu,b_\mu,\theta^3)$ is most easily computed via an expansion about classical trajectories $X^3_\mu(t)\equiv x_\mu^{\rm cl}(t) =b_\mu+v_\mu t$ and constant $\theta^3(t)=\theta^3$ which yields the quoted boundary conditions through the identification $b_\mu=(x'_\mu+x_\mu)/2$ and $v_\mu=(x'_\mu-x_\mu)/T$. Up to an overall normalization ${\cal N}$, our LSZ reduction formula for Matrix theory is simply \bea S_{fi}&=&\delta^9(k'_\mu-k_\mu)e^{-ik_\mu k_\mu T/2}\nonumber\\ &&\hspace{0cm} \int d^9x' d^9x \,{\cal N}\, \exp(-iw_\mu x'_\mu +iu_\mu x_\mu) \langle {\cal H}^3| \langle {\cal H}^4|e^{i\Gamma(v_\mu,b_\mu,\theta^3)} |{\cal H}^1\rangle |{\cal H}^2\rangle \label{superS} \eea The leading factor expresses momentum conservation for the centre of mass where we have denoted $k_\mu=p_\mu^1+p_\mu^2$ and $k'_\mu=p_\mu^3+p_\mu^4$ for the in and outgoing particles, respectively, and similarly for the relative momenta $u_\mu=(p_\mu^1-p_\mu^2)/2$ and $w_\mu=(p_\mu^4-p_\mu^3)/2$. In a loopwise expansion of the Matrix theory path integral one finds $\Gamma(v_\mu,b_\mu,\theta^3)=v_\mu v_\mu T/2+ \Gamma^{(1)} +\Gamma^{(2)}+\ldots$ of which we consider only the first two terms in order to compare our results with tree level supergravity. Inserting this expansion into~(\ref{superS}) and changing variables $d^9x' d^9x \rightarrow d^9 (Tv) d^9 b$, the integral over $Tv_\mu$ may be performed via stationary phase. Dropping the normalization and the overall centre of mass piece the $S$-matrix then reads \be S_{fi}=e^{-i[(u+w)/2]^2 T/2} \int d^9b \, e^{-i q_\mu b_\mu}\, \langle {\cal H}^3| \langle {\cal H}^4| e^{i\Gamma(v_\mu=(u_\mu+w_\mu)/2,b_\mu,\theta^3)} |{\cal H}^1\rangle |{\cal H}^2\rangle \label{sfi} \ee where $q_\mu=w_\mu-u_\mu$. It is important to note that in~(\ref{sfi}) the variables $\theta^3$ are operators $\{\theta^3_\a,\theta^3_\b\}=\delta_{\a\b}$ whose expectation between polarization states $|{\cal H}\rangle$ yields the spin dependence of the scattering amplitude. The loopwise expansion of the effective action should be valid for the eikonal regime, i.e. large impact parameter $b_\mu$ or small momentum transfer $q_\mu$. As we shall see below, this limit is dominated by $t$-channel physics on the supergravity side. \subsection*{D0 Brane Computation of the Matrix Theory Effective Potential} We must now determine the one-loop effective Matrix potential $\Gamma (v,b,\theta^3)$, namely the $v^4/r^7$ term and its supersymmetric completion. Fortunately the bulk of this computation has already been performed in string theory by \cite{mss1,mss2} who applied the Green-Schwarz boundary state formalism of \cite{gregut1} to a one-loop annulus computation for a pair of moving D0-branes. They found that the leading spin interactions are dictated by a simple zero modes analysis and their form is, in particular, scale independent. This observation allows to extrapolate the results of \cite{mss1,mss2} to short distances and suggest a Matrix theory description for tree-level supergravity interactions. Following \cite{mss1,mss2}, supersymmetric D0-brane interactions are computed from the correlator \be {\cal V}=\frac{1}{16}\int_0^\infty \!\!dt \, \langle B,\vec{x}=0|e^{-2\pi t\alpha^{\prime} p^+(P^--i\partial/\partial x^+)} e^{(\eta Q^-+\tilde{\eta}\tilde{Q}^-)}e^{V_B}|B,\vec{y}=\vec{b} \rangle \label{cyl} \ee with $Q^-,\tilde{Q}^-$ being the SO($8$) supercharges broken by the presence of the D-brane, $|B\rangle$ the boundary state associated to D0-branes and $V_B=v_i\oint_{\tau=0}\!d\sigma\left(X^{[1}\partial_{\sigma}X^{i]} +\frac{1}{2}S\,\gamma^{1i}S\right)$ is the boost operator where the direction 1 has to be identified with the time (see \cite{mss1,mss2} for details). Expanding (\ref{cyl}) and using the results in section four of \cite{mss2}, one finds the following compact form for the leading one-loop Matrix theory potential (normalizing to one the $v^4$ term and setting $\alpha^\prime=1$) \bea {\cal V}_{\rm 1-loop}&=& \Bigl [ v^4 + 2i\, v^2\,v_m\ps{m}{n}\, \partial_n -2\, v_p v_q \ps{p}{m}\ps{q}{n}\,\partial_m \partial_n\nonumber\\ &&\quad -\frac{4i}{9}\, v_q\ps{q}{m}\ps{n}{k}\ps{p}{k}\,\partial_m\partial_n\partial_p \nonumber \\ &&\quad + \frac{2}{63}\, \ps{m}{l}\ps{n}{l}\ps{p}{k}\ps{q}{k}\, \partial_m\partial_n\partial_p \partial_q\Bigr ]\, \frac{1}{r^7} \label{pot} \eea where $\theta=(\eta^a, \tilde \eta^{\dot a})$ should be identified with $\theta^3/2$ of the last section. The general structure of this potential was noted in \cite{harv} and its first, second and last terms were calculated in \cite{dkps},\cite{Kraus97} and \cite{static} respectively. Naturally it would be interesting to establish the supersymmetry transformations of this potential; for a related discussion see \cite{pss1}. \subsection*{Results} Our Matrix computation is completed by taking the quantum mechanical expectation of the effective potential \eqn{pot} between the polarization states of \eqn{sfi}. Clearly one can now study any amplitude involving gravitons, three--form tensors and gravitini. We choose to compute a $h_1 + h_2 \rightarrow h_4 + h_3$ graviton-graviton process, and thus prepare states \bea |{\rm in}\rangle&=& \ft{1}{256}\, h^1_{mn}\, (\ld{1}\gamma_{m}\ld{1})(\ld{1}\gamma_n\ld{1}) \, h^2_{pq}\, (\ld{2}\gamma_{p}\ld{2})(\ld{2}\gamma_q\ld{2})\, |-\rangle \, . \label{in} \nn\\ \langle{\rm out}|&=& \ft{1}{256}\, \langle -|\, h^4_{mn}\, (\la{1}\gamma_{m}\la{1})(\la{1}\gamma_n\la{1}) \, h^3_{pq}\, (\la{2}\gamma_{p}\la{2})(\la{2}\gamma_q\la{2}) \label{out} \eea Note that (following \cite{PW}) we have complexified the Majorana centre of mass and Cartan spinors $\theta^0$ and $\theta^3$ in terms of $SO(7)$ spinors $\lambda^{1,2}=(\theta^0_+\pm\theta^3_+ +i\theta^0_-\pm i\theta^3_-)/2$ where $\pm$ denotes projection with respect to $\gamma_9$. Actually the polarizations in \eqn{out} are seven dimensional but may be generalized to the nine dimensional case at the end of the calculation. We stress that these manoeuvres are purely technical and our final results are $SO(9)$ covariant. The creation and destruction operators $\lambda^\dagger_{1,2}$ and $\lambda_{1,2}$ annihilate the states $\langle-|$ and $|-\rangle$, respectively. The resulting one loop eikonal Matrix theory graviton-graviton scattering amplitude is comprised of 68 terms and (denoting e.g.\ $(q h_1h_4 v)=q_\mu h^1_{\mu\nu} h^4_{\nu\rho}v_\rho$ and $(h_1 h_4)=h^1_{\mu\nu}h^4_{\nu\mu}$) is given by \begin{eqnarray} {\cal A}&\,\,= \,\,\frac{\textstyle 1}{\textstyle q^2}\,\,\Biggl\{\,\,& \ft12(h_1 h_4)(h_2 h_3) v^4 + 2\Bigr[(q h_3 h_2 v) (h_1 h_4) - (q h_2 h_3 v) (h_1 h_4)\Bigr] v^2 \nn\\&&\hspace{-.23cm} + (vh_2v) (qh_3q)(h_1 h_4) + (vh_3v) (qh_2q)(h_1 h_4) - 2(qh_2v) (qh_3v)(h_1 h_4) \nn\\&&\hspace{-.23cm} - 2 (qh_1h_4v) (qh_3h_2v) + (qh_1h_4v) (qh_2h_3v) + (qh_4h_1v) (qh_3h_2v) \nn\\&&\hspace{-.23cm} + \ft{1}{2}\Bigl [(qh_1h_4h_3h_2q) - 2(qh_1h_4h_2h_3q) + (qh_4h_1h_2h_3q) - 2(qh_2h_3q)(h_1 h_4) \Bigr ] v^2 \nn\\&&\hspace{-.23cm} - (qh_2v) (qh_3q) (h_1h_4) + (qh_2q) (qh_3v) (h_1h_4) - (qh_1q) (qh_2h_3h_4v) + (qh_1q) (qh_3h_2h_4v) \nn\\&&\hspace{-.23cm} - (qh_4q) (qh_2h_3h_1v) + (qh_4q) (qh_3h_2h_1v) - (qh_1v) (qh_4h_2h_3q) + (qh_1v) (qh_4h_3h_2q) \nn\\&&\hspace{-.23cm} - (qh_4v) (qh_1h_2h_3q) + (qh_4v) (qh_1h_3h_2q) + (qh_1h_4q) (qh_2h_3v) - (qh_1h_4q) (qh_3h_2v) \nn\\&&\hspace{-.23cm} +\ft18 \Bigl[ (qh_1q) (qh_2q) (h_3h_4) +2 (qh_1q) (qh_4q) (h_2h_3) +2 (qh_1q) (qh_3q) (h_2h_4) \nn\\&&\hspace{-.23cm} + (qh_3q) (qh_4q) (h_1h_2) \Bigr] + \ft12\Bigl[ (qh_1q) (qh_4h_2h_3q) - (qh_1q) (qh_2h_4h_3q) \nn\\&&\hspace{-.23cm} - (qh_1q) (qh_4h_3h_2q) - (qh_4q) (qh_1h_2h_3q) + (qh_4q) (qh_1h_3h_2q) - (qh_4q) (qh_2h_1h_3q) \Bigr] \nn\\&&\hspace{-.23cm} + \ft14\Bigl[ (qh_1h_3q) (qh_4h_2q) + (qh_1h_2q) (qh_4h_3q) + (qh_1h_4q) (qh_2h_3q) \Bigr] \, \Biggr\} \nn\\&&\hspace{-.73cm} +\,\, \Bigl[h_1 \longleftrightarrow h_2\, , \, h_3 \longleftrightarrow h_4 \Bigr] \label{Ulle} \end{eqnarray} We have neglected all terms within the curly brackets proportional to $q^2\equiv q_\mu q_\mu$, i.e. those that cancel the $1/q^2$ pole. These correspond to contact interactions in the D0 brane computation, whereas this calculation is valid only for non-coincident branes. \subsection*{$D=11$ Supergravity} The above leading order result for eikonal scattering in Matrix theory is easily shown to agree with the corresponding eleven dimensional field theoretical amplitude. Tree level graviton--graviton scattering is dimension independent and has been computed in \cite{San}. We have double checked that work by a type IIA string theory computation and will not display the explicit result here which depends on eleven momenta $p^i_M$ (with $i=1,\ldots,4$) and polarizations $h^i_{MN}$ subject to the de Donder gauge condition $p^i_N h^i{}_M{}^N-(1/2)p^i_M h^i{}_N{}^N=0$ (no sum on $i$). Matrix theory, on the other hand, is formulated in terms of on shell degrees of freedom only, namely transverse physical polarizations and euclidean nine-momenta. Going to light-cone variables for the eleven momenta $p^i_M$ we take the case of vanishing $p^-$ momentum exchange \footnote{We denote $p_\pm=p^\mp =(p^{10}\pm p^0)/\sqrt{2}$ and our metric convention is $\eta_{MN}={\rm diag} (-,+\ldots,+)$.}, i.e. the scenario of our Matrix computation, \bea p_M^1=(-\ft12\,(v_\mu-q_\mu/2)^2 ,\, 1\, , v_\mu-q_\mu/2 ) &\quad& p_M^2=(-\ft12\, (v_\mu-q_\mu/2)^2 ,\, 1\, , -v_\mu+q_\mu/2) \nn\\ p_M^4=(-\ft12\, (v_\mu+q_\mu/2)^2 ,\, 1\, , v_\mu+q_\mu/2) &\quad & p_M^3=(-\ft12\, (v_\mu+q_\mu/2)^2 ,\, 1\, , -v_\mu-q_\mu/2) \, .\label{kinematics} \eea By transverse Galilean invariance we have set to zero the nine dimensional centre of mass momentum. We measure momenta in units of $p_-$ which we set to one. For this kinematical situation conservation of $p_+$ momentum clearly implies $v_\mu q_\mu=0$. Note that the vectors $u_\mu$ and $w_\mu$ of~(\ref{superS}) are simply $u_\mu=v_\mu-q_\mu/2$ and $w_\mu=v_\mu+q_\mu/2$ We reduce to physical polarizations by using the residual gauge freedom to set $h^i_{+M}=0$ and solve the de Donder gauge condition in terms of the transverse traceless polarizations $h^i_{\mu\nu}$ for which one finds $h^i_{-M}=-p^i_\nu h^i_{\nu M}$. Agreement with the Matrix result \eqn{Ulle} is then achieved by taking the eikonal limit $v_\mu>>q_\mu$ of the gravity amplitude in which the $t$-pole contributions dominate\footnote{In the above parametrization, the Mandelstam variables are $t=q_\mu^2=-2p^1_M p_4^M$, $s=4v_\mu^2+q^2_\mu=2p^1_M p_2^M$ and $u=4v_\mu^2=-2p^1_M p_2^M=s-t$.}. One then reproduces exactly \eqn{Ulle} as long as any pieces cancelling the $t$-pole (i.e. the aforementioned $q^2$ terms) are neglected. Although we have only presented here a Matrix scattering amplitude restricted to the eikonal regime, we nevertheless believe the agreement found is rather impressive. \subsection*{Acknowledgements} We thank B. de Wit, S. Moch, K. Peeters and J. Vermaseren for discussions. Our computation made extensive use of the computer algebra system FORM \cite{Jos}.
{ "redpajama_set_name": "RedPajamaArXiv" }
1,785
Saccharum officinarum is a large, strong-growing species of grass in the genus Saccharum. Its stout stalks are rich in sucrose, a simple sugar which accumulates in the stalk internodes. It originated in New Guinea, and is now cultivated in tropical and subtropical countries worldwide for the production of sugar, ethanol and other products. Saccharum officinarum is one of the most productive and most intensively cultivated kinds of sugarcane. It can interbreed with other sugarcane species, such as Saccharum sinense and Saccharum barberi. The major commercial cultivars are complex hybrids. About 70% of the sugar produced worldwide comes from S. officinarum and hybrids using this species. Description Saccharum officinarum, a perennial plant, grows in clumps consisting of a number of strong unbranched stems. A network of rhizomes forms under the soil which sends up secondary shoots near the parent plant. The stems vary in colour, being green, pinkish, or purple and can reach in height. They are jointed, nodes being present at the bases of the alternate leaves. The internodes contain a fibrous white pith immersed in sugary sap. The elongated, linear, green leaves have thick midribs and saw-toothed edges and grow to a length of about and width of . The terminal inflorescence is a panicle up to long, a pinkish plume that is broadest at the base and tapering towards the top. The spikelets are borne on side branches and are about long and are concealed in tufts of long, silky hair. The fruits are dry and each one contains a single seed. Sugarcane harvest typically occurs before the plants flower, as the flowering process causes a reduction in sugar content. Taxonomy Saccharum officinarum was first domesticated in New Guinea and the islands east of the Wallace Line by Papuans, where it is the modern center of diversity. Beginning at around 6,000 BP it was selectively bred from the native Saccharum robustum. From New Guinea it spread westwards to Island Southeast Asia after contact with Austronesians, where it hybridized with S. spontaneum. The Hawaiian word for this species is kō. Uses Portions of the stem of this and several other species of sugarcane have been used from ancient times for chewing to extract the sweet juice. It was cultivated in New Guinea about 8,000 years ago for this purpose. Extraction of the juice and boiling to concentrate it was probably first done in India more than 2,000 years ago. Saccharum officinarum and its hybrids are grown for the production of sugar, ethanol, and other industrial uses in tropical and subtropical regions around the world. The stems and the byproducts of the sugar industry are used for feeding to livestock. Pigs fed on sugarcane juice and a soy-based protein supplement produced stronger piglets that grew faster than those on a more conventional diet. As its specific name (officinarum, "of dispensaries") implies, it is also used in traditional medicine both internally and externally. See also Domesticated plants and animals of Austronesia References External links officinarum Flora of New Guinea Crops originating from Asia Flora of the Dominican Republic Plants described in 1753 Taxa named by Carl Linnaeus Flora without expected TNC conservation status
{ "redpajama_set_name": "RedPajamaWikipedia" }
7,249
Q: Rearrange CSV, delete the old one and re-write Let's say I have a CSV file Name,Age john,20 ana,30 steven,25 frank,27 What about if I want to order it in descending order depending on "age" value, to have something like this Name,Age ana,30 frank,27 steven,25 john,20 and then delete all the CSV rows and rewrite them depending on the new order? I know hot to create hashes from the CSV and order it, but not how to achieve the described result. A: Ruby has a CSV library. You can use it to read, sort, and write your CSV files. The CSV class is a wrapper around CSV::Table. CSV::Table deals with the data itself, while CSV mostly provides the ability to read from files. require 'csv' orig_table = CSV.table("test.csv"); orig_table is a CSV::Table object. Unfortunately you can't directly sort its rows in place, but you can sort them into an Array of CSV::Row objects and put that into a new CSV::Table. sorted_rows = orig_table.sort { |a,b| b[:age] <=> a[:age] } sorted_table = CSV::Table.new(sorted_rows) Then you can get a CSV string out of that with to_csv and write it back to the file. File.open("test.csv", "w").write(sorted_table.to_csv)
{ "redpajama_set_name": "RedPajamaStackExchange" }
7,733
{"url":"http:\/\/gauravtiwari.org\/category\/math\/problems\/page\/2\/","text":"# Category Archives: Problems\n\n## Do you multiply this way!\n\nBefore my college days I used to multiply\u00a0this way. But as time passed, I learned new things. In a Hindi magazine named \u201cBhaskar Lakshya\u201d, I read an article in which a columnist ( I can\u2019t remember his name) suggested how [\u2026]\n\n## Just another way to Multiply\n\nMultiplication is probably the most important elementary operation in mathematics; even more important than usual addition. Every math-guy has its own style of multiplying numbers. But have you ever tried multiplicating by this way? Exercise: $88 \\times 45$ =? [\u2026]\n\n## How Genius You Are?\n\nLet have a Test: You need to make a calculation. Please do neither use a calculator nor a paper. Calculate everything \u201cin your brain\u201d. Take 1000 and add 40. Now, add another 1000. Now add 30. Now, add 1000 again. [\u2026]\n\n## A Problem On Several Triangles\n\nA triangle $T$ is divided into smaller triangles such that any two of the smaller triangles either have no point in common, or have a vertex in common, or actually have an edge in common. Thus no two [\u2026]\n\n## Two Interesting Math Problems\n\nProblem1: Smallest Autobiographical Number: A number with ten digits or less is called autobiographical if its first digit (from the left) indicates the number of zeros it contains,the second digit the number of ones, third digit number of twos and [\u2026]\n\n## Chess Problems\n\nIn how many ways can two queens, two rooks, one white bishop, one black bishop, and a knight be placed on a standard $8 \\times 8$ chessboard so that every position on the board is under attack by at [\u2026]\n\n## How many apples did each automattician eat?\n\nFour friends Matt, James, Ian and Barry, who all knew each other from being members of the Automattic, called Automatticians, sat around a table that had a dish with 11 apples in it. The chat was intense, and they ended [\u2026]","date":"2014-10-21 19:55:06","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.6091668009757996, \"perplexity\": 1816.887795322096}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 5, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2014-42\/segments\/1413507444774.49\/warc\/CC-MAIN-20141017005724-00349-ip-10-16-133-185.ec2.internal.warc.gz\"}"}
null
null
Meet With A Trusted Homebuyer Today, locally in the Bradenton, Florida area. David buys houses in any area, no matter the condition or situation. Remember, We Buy Houses In Bradenton! No Real Estate Fees Or Commissions. Why pay someone 6-7% commission, to access the money that you earned? That is like going to the bank to withdraw some money and the teller taking a 6-7% finders fee. Something seems off, right? When we make you an offer we will pay all closing costs and charge no commissions or fees. Do not pay another mortgage payment or electric bill. Every month your home is on the market that is another payment or bill to be paid. No Strangers In Your Home. Placing your house on the market means new homebuyers and headaches. Do you really need the added stress of showing strangers your home on the weekends? More so, strange visitors in your household is disruptive and uncomfortable for your family as well. Overcoming the complaints, queries, and opinions of first-time homebuyers can be frustrating to deal with. Fortunately, David Buys Houses is a real cash buyer willing to take on the added stress of first-time homebuyers for you. No Repairs Necessary. A realtor will demand repairs to be made before showing the house. What happens if you do not have the time or money for the repairs? Sell your house "As Is" today, do not fix a single thing, we will make all the necessary repairs. Skip The Loan Approval. Circumstances show there are a number of people who want to buy your house but do not have the money. A homeowner must qualify for a loan and have anywhere from 10-30% down! How many newlyweds do you know that have that kind of cash laying around? Why wait for the cash? Reap the rewards of convenience when working with me. Selling your house fast with me means flexible closing dates and a speedy sale. Do you live in Florida, specifically Bradenton, and want to learn more about how to sell your house fast? Click the button below to fill out a form, once I receive your form I will call you to make an offer. As soon as we okay the paperwork, consider the house sold. It is really that simple. Who knew selling a house without a realtor is so easy?!?
{ "redpajama_set_name": "RedPajamaC4" }
3,355
With a stay at FabHotel Lotus Koregaon Park in Pune (Koregaon Park), you'll be minutes from Osho Teerth Gardens and Rotary Riverside Joggers Park. Featured amenities include dry cleaning/laundry services, a 24-hour front desk, and luggage storage. Free self parking is available onsite.
{ "redpajama_set_name": "RedPajamaC4" }
2,392
{"url":"https:\/\/codereview.stackexchange.com\/questions\/37966\/handling-templates-for-a-web-application","text":"Handling templates for a web application\n\nI am writing up a class to handle various templates to be used in a web application. Each template contains various placeholders which will need to be replaced at the time of the build. I am wondering if the method I am using is the best solution, or if I should consider something else. At the moment I have two separate functions addPlaceholder() and addMultiplePlaceholders(). Each performs as their names suggest.\n\nThe function addPlaceholder() has two parameters one, the key $k, must be a string while the other, the value $v, can be anything. These get stored in the classes placeholder array.\n\nThe function addMultiplePlaceholders() has only one parameter, $array, and it should be an associative array. This function calls addPlaceholder() for each index of $array to store the corresponding values. The reason for the call to an existing function is to help alleviate duplicitous code, and when only a single placeholder is in use to bypass the foreach loop.\n\nIs this the best method to accomplish this? Would it be better to forgo addPlaceholder() and just use addMultiplePlaceholders() for everything? Perhaps, overloading addPlaceholder() would be a better solution having one version which accepts the two parameters, and another which accepted the array. However, I would be in the same situation as now having to decide whether to duplicate the code for adding the placeholder to the class property, or calling the other function.\n\npublic function addPlaceholder($k,$v){\nif(empty($k)){ throw new Exception('Class Template->addPlaceholder() '.'The placeholder string cannot be empty.'); } if(isset($this->placeholders[$k])){ throw new Exception('Class Template->addPlaceholder() '.'['.$k.'] is a duplicate entry');\n}\n\n$this->placeholders[$k] = $v; } public function addMultiplePlaceholders($array){\nif(!is_associative($array)){ \/\/ if $array is not an associative array throw exception\nthrow new Exception(\"The supplied parameter value is not an associative array\");\n}\n\nforeach($array as$key => $value){ try{ addPlaceholder($key,$value); }catch(Exception$e){\n$msg =$e->getMessage();\npreg_replace(preg_quote('Class Template->'), '', $msg); throw new Exception(\"Class Template->addMultiplePlacerholders()->\".$msg);\n}\n}\n}\n\n\u2022 Your solution sounds fine. By the way, \"duplicitous\" means \"treacherous\" and has nothing to do with duplicates :) Dec 23 '13 at 18:51\n\nTo answer just the question in the title; Your solution seems fine to me; addMultiplePlaceholders() is a utility method that acts as a wrapper for addPlaceholder()\n\nNote there's a bug in your code; addMultiplePlaceholders() calls addPlaceholder() without $this-> 'prefix', so your code won't work However, there is room for improvement. I'll try to summarize what I'd do differently. Remove redundant information from your Exception messages Exceptions are 'smart' messages in that they already contain all the meta-information you need to find where they occurred (e.g. line number, file etc). Don't clutter your messages with that information, It'll only complicate your code (for example the preg_replace). Just include the bare error-message. See the documentation on Exceptions. If your going to output a message, make sure they cannot be mis-interpreted. Being consistent in your messages will prevent confusion and make it easier to debug your code later on, for example: throw new Exception('The placeholder string cannot be empty'); Is confusing; What is the placeholder string? The name ($k)? Value ($v)? Outputting the meta-information can be done when printing the exception, for example; Just use this to throw your exception; throw new Exception('Placeholder name cannot be empty'); And output it in your Exception-handler with this; printf( \"An Exception has occurred: '%s' in file '%s' on line '%s',$e->getMessage(),\n$e->getFile(),$e->getLine()\n);\n\n\/\/ --> An Exception has occurred: 'The placeholder string cannot be empty' in file 'Template.php' on line '52'\n\n\nSince you're no longer including the name of the Class\/Method in your messages, you can completely remove the 'try\/catch' from your 'addMultiplePlaceholders'.\n\nUse the right 'type' of Exception\n\nTo be more expressive\/specific when throwing Exceptions, consider using special Exceptions for a situation, for example, the SPL Exceptions. In this case an InvalidArgumentException will be appropriate\n\nConsider what really is an exception\n\nInside addPlaceholder, you're throwing an exception if a key is already set. Personally, I wouldn't do this (it does feel like 'business logic'). Technically, overwriting the value of a placeholder that was already set won't cause any problem. Disallowing overwriting existing placeholders may limit you in situations that you want to overwrite a placeholder.\n\nHowever, renaming addPlaceholder to setPlaceholder may be more appropriate, to indicate that you can both 'add' or 'update\/replace' a placeholder.\n\nUse descriptive variable\/argument-names\n\nThe arguments for addPlaceholder are not very descriptive. Additionally, the variable names you're using seem to be picked based on the the 'inner workings' of the method (key\/value pairs), which should be of no interest when using that method. I'd suggest something more describing the purpose of the arguments, for example:\n\npublic function addPlaceholder($name,$value)\n{\n\/\/...\n}\n\n\nLikewise, replace '$array' to something more describing its purpose, for example public function addMultiplePlaceholders($nameValuePairs)\n{\n\/\/...\n}\n\n\/\/ or..\n\npublic function addMultiplePlaceholders($placeholders) { \/\/... } If you want to explicitly indicate you're expecting an array (and have PHP check this for you), you can add a type-hint, like this: public function addMultiplePlaceholders(array$placeholders)\n{\n\/\/...\n}\n\n\nFinally, naming this method addMultiplePlaceholders may be a bit verbose. Since the method is plural, it's probably already clear that it is used to add multiple placeholders, so simply calling it addPlaceholders may be clear enough.\n\nDo not describe code that is self-explanatory. In general, try to omit inline comments in all cases, unless a piece of code really is confusing.\n\nIn most cases, don't describe the code, but describe the purpose \/ intent of a function\/method inside a PhpDoc comment.\n\nFor example:\n\nif(!is_associative($array)){ \/\/ if $array is not an associative array throw exception\nthrow new Exception(\"The supplied parameter value is not an associative array\");\n}\n\n\nThis comment is just repeating the code! Also, inline code is not used by IDE's and will not be included in automatically generated documentation (e.g. phpdocumentor). It's best to move relevant information to a PhpDoc block\n\nFor example:\n\n\/**\n* Adds placeholder(s) passed as an associative array ('name' => value)\n*\n* Name should be a string, value can be any type of value\n*\n* This method is a wrapper for @see addPlaceholder()\n*\n* @param array $nameValuePairs * * @throws InvalidArgumentException if$nameValuePairs is not an associative array\n* or an empty name is used for a placeholder\n* @return void\n*\/\npublic function addMultiplePlaceholders(array $nameValuePairs) { \/\/... } Modified code This is the code after applying my suggestions \/** * Adds or updates a placeholder * * @param string$name Name of the placeholder. Cannot be empty\n* @param mixed $value Value of the placeholder * * @throws InvalidArgumentException if$name is invalid (empty)\n*\n* @return void\n*\/\npublic function setPlaceholder($name,$value)\n{\nif (empty($name)) { throw new InvalidArgumentException('Placeholder$name cannot be empty.');\n}\n\n$this->placeholders[$name] = $value; } \/** * Adds or updates placeholder(s), passed as an associative array ('name' => value) * * Name should be a string, value can be any type of value * * This method is a wrapper for @see addPlaceholder() * * @param array$placeholders placeholder(s), passed as an associative array ('name' => value)\n*\n* @throws InvalidArgumentException if $placeholders is not an associative array * or an empty name is used for a placeholder * @return void *\/ public function setPlaceholders(array$placeholders)\n{\nif (!is_associative($nameValuePairs)) { throw new InvalidArgumentException('Parameter$placeholders is not an associative array');\n}\n\nforeach (placeholders as $name =>$value) {\n$this->addPlaceholder($name, $value); } } \u2022 Wow, and thank you!! This response is far more than I had expected. I have been doing some research on the how to properly use exceptions this is my first attempt. Again, thank you. Dec 28 '13 at 5:25 \u2022 @BrookJulias you are welcome. My description of Exception handling is nowhere near complete, but it might give you some ideas. Be sure to check out the source code of some popular frameworks (e.g. Symphony, CakePHP). It's a good source to obtain ideas and learn. Dec 28 '13 at 16:33 Two remarks: I share your \"feeling\" that this is somehow a \"code duplication\": Writing addPlaceHolder(array(\"key\" => someValue)) is equally understandable and not that much more effort to write than addPlaceHolder(\"key\", someValue\") Also providing only the version with multiple key\/value pairs will encourage \"clients\" to pass values \"in a single step\" which I consinder to be a good thing (otherwise, filling the template might be unnecessarily cluttered and thus seeing which values go into the template might be more difficult). Therefore I'd probably go with only one function. Also the addMultiplePlaceholders function can be easily implemented using PHP's array_merge builtin. Detecting duplicate keys can be done using array_intersect_keys, and you can also simply check for the \"empty\" key in the argument array. So the function could look like public function addPlaceholders($placeholders){\nif(!is_associative($array)){ throw new Exception(\"The supplied parameter value is not an associative array\"); } if (array_key_exists(\"\",$placeholders)) {\nthrow new Exception('The placeholder string cannot be empty.');\n}\n$intersect = array_intersect_key($placeholders, $this->placeholders); if (count($intersect) > 0) {\nthrow new Exception('Duplicate entries: ' . implode(', ', array_keys($intersect))); }$this->placeholders = array_merge($this->placeholders,$placeholders);\n}","date":"2021-09-18 17:49:05","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 1, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.22173045575618744, \"perplexity\": 3792.4384184220703}, \"config\": {\"markdown_headings\": false, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2021-39\/segments\/1631780056548.77\/warc\/CC-MAIN-20210918154248-20210918184248-00417.warc.gz\"}"}
null
null
Q: Capturing ARSCNView with virtual objects - iOS I have an ARSCNView with virtual objects drawn. The virtual objects are drawn on the user's face. The session has the following configuration: let configuration = ARFaceTrackingConfiguration() configuration.worldAlignment = .gravityAndHeading sceneView.session.run(configuration) This ARSCNView is part of a video call. If we send back the pixel buffer like below, public func session(_ session: ARSession, didUpdate frame: ARFrame) { videoSource.sendBuffer(frame.capturedImage, timestamp: frame.timestamp) } The virtual objects are not shown to my caller. One of the things I tried is, to not rely on ARSessionDelegate's callback but use DispatchSourceTimer to send events. func startCaptureView() { // Timer with 0.1 second interval timer.schedule(deadline: .now(), repeating: .milliseconds(100)) timer.setEventHandler { [weak self] in // Turn sceneView data into UIImage guard let sceneImage: CGImage = self?.sceneView.snapshot().cgImage else { return } self?.videoSourceQueue.async { [weak self] in if let buffer: CVPixelBuffer = ImageProcessor.pixelBuffer(forImage: sceneImage) { self?.videoSource.sendBuffer(buffer, timestamp: Double(mach_absolute_time())) } } } timer.resume() } The caller receives the data slowly with a choppy video experience and the images are not of the right size. Any suggestions on how to send data about the virtual object along with the captured frame? Reference: https://medium.com/agora-io/augmented-reality-video-conference-6845c001aec0 A: The reason the Virtual objects are not appearing is because ARKit provides only the raw image, so frame.capturedImage is the image captured by the camera, without any of the SceneKit rendering. To pass the rendered video you will need to implement an offscreen SCNRenderer and pass the pixel buffer to Agora's SDK. I would recommend you check out the Open Source framework AgoraARKit. I wrote the framework and it implements Agora.io Video SDK and ARVideoKit as dependancies. ARVideoKit is a popular library that implements an off-screen renderer and provides the rendered pixel buffer. The library implements WorldTracking by default. If you want to extend the ARBroadcaster class to implement faceTracking you could use this code: import ARKit class FaceBroadcaster : ARBroadcaster { // placements dictionary var faceNodes: [UUID:SCNNode] = [:] // Dictionary of faces override func viewDidLoad() { super.viewDidLoad() } override func setARConfiguration() { print("setARConfiguration") // Configure ARKit Session let configuration = ARFaceTrackingConfiguration() configuration.isLightEstimationEnabled = true // run the config to start the ARSession self.sceneView.session.run(configuration) self.arvkRenderer?.prepare(configuration) } // anchor detection override func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) { super.renderer(renderer, didAdd: node, for: anchor) guard let sceneView = renderer as? ARSCNView, anchor is ARFaceAnchor else { return } /* Write depth but not color and render before other objects. This causes the geometry to occlude other SceneKit content while showing the camera view beneath, creating the illusion that real-world faces are obscuring virtual 3D objects. */ let faceGeometry = ARSCNFaceGeometry(device: sceneView.device!)! faceGeometry.firstMaterial!.colorBufferWriteMask = [] let occlusionNode = SCNNode(geometry: faceGeometry) occlusionNode.renderingOrder = -1 let contentNode = SCNNode() contentNode.addChildNode(occlusionNode) node.addChildNode(contentNode) faceNodes[anchor.identifier] = node } }
{ "redpajama_set_name": "RedPajamaStackExchange" }
6,779
HALF PRICE Shakes, Floats & Ice Cream Slushes at Sonic Drive-In! Head over to Sonic Drive-In for HALF PRICE Shakes, Floats & Ice Cream Slushes EVERY DAY after 8pm! Click Here to find a location near you. *See store for complete details.
{ "redpajama_set_name": "RedPajamaC4" }
388
\section{Introduction} \label{intro} It has long been a scientific goal to study the poles of the Sun, illustrated by the NASA/ESA International Solar Polar Mission that was proposed over four decades ago, which led to ESA's Ulysses spacecraft \cite{Ulysses} (1990 to 2009). Indeed, with regard to the Earth, we took the first tentative steps to explore the Earth's polar regions only in the 1800s. Today, with the aid of space missions, key measurements relating to the nature and evolution of Earth's polar regions are being made, providing vital input to climate-change models. The poles of other planets have also been explored using spacecraft, revealing the ice caps of Mars and the unexpected and intriguing polar flow patterns of the gas giants Jupiter and Saturn. However, the polar regions that remain largely uncharted are the poles of our Sun, yet it is those polar regions that hold the key for our understanding of the activity cycle of our star and, thus, of its impact on the environment of all planets in our Solar System, including our own. Ulysses used a Jupiter fly-by to achieve an orbit with an inclination 80$^\circ$ out of the ecliptic plane, and, from distances as close as 1.34 AU was able to measure the in-situ particle and field environments above the Sun's polar regions; Ulysses did not carry instrumentation to image the Sun. However, the mission did provide seminal observations of different particle and field environments of the solar wind emerging from polar and equatorial regions. That said, to date we are blind as to what the solar poles actually look like and how they behave in enough detail to understand the role that the polar regions play in, for example, the solar cycle. In 2020, ESA's Solar Orbiter mission \cite{orbiter2020} was launched, with an array of 10 instruments to probe directly the solar wind, as previous missions have done, but also directly image the sources of the solar wind. Solar Orbiter is a solar encounter mission, with prime orbits allowing solar approaches to within Mercury's orbit every 150 days. The later phases of the mission use Venus gravity assists (VGA) to slowly raise the spacecraft orbit out of the ecliptic plane to reach 30$^\circ$ helio-latitude. The mission's focus is the study of links between solar wind measurements from the in-situ instrumentation and sources of the solar wind from the remote sensing instrumentation. This is the first time that remote sensing of the sources of the solar wind has been done hand in hand with in-situ measurements this close to the Sun. The out-of-ecliptic phase will provide the first ever view of the poles, towards the end of the mission lifetime. This paper builds on the knowledge that will be gained with Solar Orbiter science, and it describes a mission with the principal focus being the exploration of the poles -- for long durations. We describe the scientific drivers behind this mission and the technical challenges that lie ahead. \section{Science goals} \label{sec:science} There are four key science goals that this mission should address. These are: \begin{itemize} \item To study the interior of the solar polar regions to uncover the key role of magnetic flux transport in the solar cycle \item To study the global mass-loss of a star through discrete mass ejection processes \item To determine solar irradiance at all latitudes \item To explore solar activity at the poles and the impact on the solar wind \end{itemize} We will describe these goals in the following sub-sections. For each science goal we will describe the requirements necessary to fulfil the science. \subsection{Science goal 1: To study the interior of the solar polar regions to uncover the key role of magnetic flux transport in the solar cycle. } We are woefully ignorant of the large-scale circulation of plasma in the Sun and stars and how it interacts with stellar rotation and convection to generate i) the large-scale long-timescale (22 years) solar magnetic (sunspot number) cycle and ii) small-scale and short-timescale (months) appearance and evolution of magnetic active regions. For the Sun, where we can observe these flows at low helio-latitudes ($<$ 60$^\circ$), we have the beginnings of an observationally motivated model of how the magnetic field is generated by the dynamo process. On the theory side, we have various increasingly sophisticated dynamo models, and we have an improving suite of numerical simulations that are becoming progressively more realistic. However, what we are missing is an understanding of how magnetic flux interacts with rotating turbulent convection to give rise to the solar cycle below the solar surface and to form flux concentrations that then lead to the observed active regions on the surface of the Sun. By observing the Sun at high latitudes where the poleward flows converge, we will learn about the effects of rotation on convection and their combined effects on the magnetic field. The evolution of the magnetic flux is closely tied to the latitudinal differential rotation of the Sun and displays a complex dynamic behaviour. In addition to the systematic decrease in the rotation rate towards the pole, rotation varies through the solar magnetic activity cycle (the ``sunspot cycle") at most latitudes (the ``torsional oscillation''). The magnetic flux is dragged by the poleward meridional flow, which sets the duration of the cycle in some dynamo models. The observed poleward flows simply cannot continue to transport mass to the poles; mass cannot accumulate there without limit: there must be a (to date unobserved) return flow toward the equator within the Sun to conserve mass \cite{howelrsp,choudhuri2021}. Large-scale internal solar flows and differential rotation can be measured at low and mid helio-latitudes by the helioseismology of globally resonant sound waves \cite{jcd2001} while the local helioseismology of propagating waves \cite{gizon} can measure the variations of the larger scale, complex, temporally varying flows at low helio-latitudes. These studies have demonstrated the enormous power of these techniques at low latitudes where we currently have access. Helioseismology has revolutionized our views of the structure and dynamics of the convective region and the solar dynamo \cite{cameron,charbonneau}. In particular, helioseismology has revealed the differential rotation in the solar interior (e.g. \cite{thompson}), a key ingredient in dynamo models. Helioseismology also revealed the depth-dependence of the time-varying zonal flows, as well as the near-surface meridional circulation, constraints on the amplitudes of convective velocities, and inflows around active regions -- all of which have improved our understanding of the advection and stretching of the magnetic field through the solar cycle. These flows vary over the longest time scales and are different from cycle to cycle (Figure~\ref{fig:1}); they are important probes of the global dynamics, including the dynamics of the magnetic field. Torsional oscillations are variations of the solar differential rotation that present two distinct branches: a low-latitude branch that progresses over time from about $50^\circ$ toward the equator as sunspots do and a polar branch that progresses from $50^\circ$ toward the poles as diffuse magnetic fields do (Figure~\ref{fig:6}). Both branches are thought to be strongly linked to the magnetic cycle (Figure~\ref{fig:1}) but possibly through different physical processes \cite{rempel2007}. The high-latitude branch has been found notably less strong in Cycle 24 than Cycle 23. Assuming that this poleward branch is driven through Lorentz force feedback, \cite{rempel2012} found that the transition of the dynamo toward a lower state of magnetic energy is linked to a drop in the high latitudes' rotation rate leading to the apparent fading of the polar branch. In order to investigate this link between zonal flows and the magnetic field near the pole, it is thus important to get a better resolved view of both quantities at high latitudes. It is essential that these techniques be applied near the poles, where circulation patterns must return to the equator within the the interior, where the solar cycle begins, and where the high-speed solar wind escapes into the heliosphere. An emerging field of study is energy transport by global-scale waves (e.g. Rossby waves) and convective modes (e.g. giant cells); this is made possible by very long duration observations (many years) and should be extended over multiple cycles and particularly at high latitudes \cite{loeptien2018,liang2019,Hathaway2020}. \begin{figure} \includegraphics[scale=.56]{tors_osc_mag.eps} \caption{Zonal flow residuals, or torsional oscillations, 1\,\% of the solar radius below the visible surface from MDI and HMI f-mode splittings (adapted from \cite{corbard}). The white contours show the 2 Gauss limit of the unsigned radial magnetic field averaged over longitudes for each Carrington rotation covered by MDI and HMI magnetograms (see also Fig.~\ref{fig:6}).} \label{fig:1} \end{figure} The first indications of a new solar magnetic cycle occur at high latitudes where they are difficult to observe from the ecliptic. Figure~\ref{fig:1} shows the current observations of the magnetic field and the flows, which reach only to 70$^\circ$. These high latitude emergences are more direct probes of the newly wound-up magnetic field than the low latitude active regions that are built up over many years. The out-of-ecliptic views will also enable us to map flux emergences on the farside and study non-axisymmetric dynamo modes. The same views will enable the study of the polar magnetic field reversals in detail. \begin{figure} \includegraphics[width=\textwidth]{polar-rays-cy24.eps} \caption{Example p-mode ray paths (red curves) accessible to a solar polar mission. These ray paths could be used to measure the subsurface meridional flow at high latitudes. The meridional flow stream lines shown here in the northern hemisphere (black curves with arrows) were inferred using time--distance helioseismology and GONG observations over Solar Cycle 24 \cite{Gizon2020}. The region in grey highlights the base of the convection zone. [Image credit Zhi-Chao Liang.]} \label{fig:2} \end{figure} \subsubsection{Science goal 1: requirements} Technically, the helioseismic science objectives that drive a solar polar mission's requirements are orbit, data rate, and the total duration of the observations. Helioseismology requires nearly continuous, full-disc observations at a temporal cadence of roughly one minute in order to resolve the entire acoustic spectrum. The fundamental scales of solar dynamics and activity define two basic observing modes that are needed to achieve the helioseismology objectives: (1) relatively short (7\,--\,14 days) and high-resolution (500 $\times$ 500 km) Dopplergrams of sound waves and intensity images of individual convective granules, and (2) multiple intervals of long time intervals (36\,--\,72 days); at lower spatial resolution (6000~km $\times$ 6000~km) to investigate global dynamics with helioseismic techniques. The longest runs are needed to achieve the frequency precision to resolve the tachocline structure and dynamics. High-latitude viewing of solar p-modes is not only necessary to study the dynamics in the polar regions, it is also essential for studying the deep convection zone, including longitudinal structures in the tachocline (see Figure~\ref{fig:2}). An orbit with 60$^\circ$ inclination would allow determination of the flows in the polar regions of the upper convection zone. In addition, the high-latitude orbit will allow us to perform stereoscopic helioseismology, to extend the regions throughout the solar interior that are accessible. To achieve these objectives, the total mission duration should be several years. \subsection{Science goal 2: To study the global mass-loss of a star through discrete mass ejection processes} This science goal recognises the unique advantages of studying the processes giving rise to the mass loss from the Sun, in particular coronal mass ejections (CMEs), from a high-latitude perspective. Such an observational vantage point would enable an unprecedented opportunity for the study of the global mass loss from a star. CMEs, large-scale eruptions of hot plasma that may accelerate charged particles and can travel well beyond Earth's orbit, were discovered in the early 1970s using the space-borne coronagraphs aboard OSO-7 \cite{tousey1973} and Skylab \cite{koomen1974}. Since this time, a number of space missions have provided a wealth of coronagraph observations of CMEs (e.g. Figure~\ref{fig:3}), all from the ecliptic plane, and all but one, the Solar Terrestrial Relations Observatory (STEREO), from a near-Earth vantage point; the twin STEREO spacecraft orbit the Sun and, although observing from near the ecliptic plane, make observations from off the Sun-Earth line. \begin{figure} \includegraphics[scale=1.35]{CME_2011.eps} \caption{A CME detected using the STEREO/COR2 coronagraph in July 2011. Historically, CME observations have been made from the ecliptic plane. Geometrical considerations mean that CME observations from high helio-latitude would provide unique opportunities to study solar global mass loss with a 360$^\circ$ view with respect to the ecliptic plane. [Image credit NASA/COR2 team, see STEREO Gallery at \url{https://stereo.gsfc.nasa.gov/gallery/gallery.shtml}]} \label{fig:3} \end{figure} Many studies have shown that CMEs originate from the solar activity belts, defined by the location of active regions. The activity belts generally occupy restricted regions below 30$^\circ$ latitude but migrate equatorward with decreasing activity. Statistical analysis of 7000 CMEs imaged by the coronagraphs on the near-Earth SOHO spacecraft \cite{yashiro} clearly demonstrated the latitudinal variation of CMEs with the solar cycle. In addition, CMEs often show an equatorward deflection after eruption, resulting in a greater concentration near the equatorial streamer belt. Coronagraphs detect CMEs through Thomson scattering of photospheric white-light off free electrons in the CME structure. Coronagraphs are more sensitive to CMEs that are not directed towards the spacecraft, although the densest spacecraft-bound CMEs can be detected as faint ``halo" events emerging from behind the coronagraph occulting discs. Thus, from a polar platform, taking advantage of the observational geometry and the confinement of CMEs to the activity belts, for the first time, we have a vantage point from which we can potentially detect all CMEs effectively, and especially those Earth directed, and provide a global overview of CME activity of the Sun. This would not only provide oversight of mass ejection phenomena in terms of global distribution and frequency, it would also permit the investigation of the longitudinal structure and density distribution of the activity belts in the corona, as well as providing new insights into the kinematic and topological parameters of CMEs, including their complex magnetic structure. Such unique and powerful studies of CME processes from a polar perspective are therefore crucial for a deeper understanding of impacts on the Earth and other planets' space environments. Recently, \cite{xiong2018} synthesized the white-light emission of an Earth-directed CME using a 3D MHD code and derived its structure as observed from out of the ecliptic plane, demonstrating the feasibility of the investigations quoted above. In addition to CME studies, the polar vantage point provides an opportunity to observe, for the first time, the background corona/inner heliosphere from high-latitudes, enabling complete mapping of the longitudinal distribution over 360 deg and temporal evolution of the equatorial streamer belt over prolonged periods. These observations would also facilitate the derivation of the mass and energy flux carried away by the solar wind in the solar equatorial plane. Observations from over the pole would not only permit us to investigate outward-propagating CMEs, we will also be able to image the development of co-rotating interaction (CIR) regions in the background solar wind. These interaction regions are curved because the magnetic field lines that define their topology are curved due to solar rotation. The occurrence and structure of CIRs have been identified from images from the Heliospheric Imagers aboard STEREO \cite{alexis}. Whilst this is an important step, such images are still restricted to equatorial regions; a polar vantage point would enable a far superior view of CIR global structure and evolution, and interaction with the Earth and other planets. \cite{xiong2017} synthesized the out of ecliptic view (Figure~\ref{fig:4}), demonstrating the possibilities of imaging the 3D CIR structure from above. \begin{figure} \includegraphics[scale=.8]{CIR.eps} \caption{Simulated images of the CIR structure observed from out of the ecliptic plane \cite{xiong2017}. The images show normalised white-light brightness (I/I*) for elongations ($\epsilon$) out to 45 degrees. The concentric black and white circles correspond to the inner elongation and the solar-disc size respectively.} \label{fig:4} \end{figure} The occurrence rate of CMEs strongly varies over the solar cycle \cite{webb2017}. During solar maximum, the average CME rate is about 5 per day, and multiple CMEs can occur in close succession associated with the same active regions (homologous and sympathetic eruptions). This inevitably leads to the interaction of successive CMEs either close to the Sun or in interplanetary space. Such interaction is associated with many complex processes -- momentum exchange, magnetic reconnection, and propagation of magnetosonic shock waves through the ejecta, e.g. \cite{lugaz,manchester}. These phenomena also cause changes of the CME structure, such as its radial extent, expansion speed and field strength, which in turn affect its capacity for causing a geomagnetic disturbance. The most intense geomagnetic storms are associated with interacting CMEs propagating in the ecliptic \cite{vennerstrom,liu}. CME geo-effectiveness can also be enhanced when the CME is compressed from behind by a CIR or high-speed solar wind stream, resulting in enhanced magnetic fields (e.g. \cite{kilpua}). A polar perspective will provide a much better view of CME--CME and CME--CIR interaction processes in interplanetary space, where they occur and how they propagate. Viewpoints above the poles can allow a more complete determination of the fractional contribution of CMEs to the mass loss of the Sun, particularly in synergy with complementary in-situ measurements. These are very useful for the study of solar mass loss, including steady solar wind and transients events. The cumulative effect of this transient mass loss in the form of CMEs could be larger than current estimates, typically $<$ 10\% of the overall solar wind mass flux (about 2$\times$10$^{-14}$ M$_{\odot}$/yr). Mass loss via the solar wind and CMEs can be investigated as a function of solar magnetic activity, with the purpose of predicting the CME occurrence rate and associated mass loss rate of solar-type stars on the basis of their magnetic activity level \cite{mishra}. The relative contribution of stellar CMEs into the stellar wind could be high for active stars \cite{aarnio2011}; quantification of this is important in evaluating the global mass-loss from the stars. Stellar mass loss is also found to have a significant influence on stellar evolution since this may determine stellar spin down, with the consequential impact that rotation plays on stellar properties. In fact, it is widely accepted that the Sun, like other late-type stars that have overcome the disc-stellar interaction phase, undergoes loss of angular momentum during its main sequence lifetime mostly due to wind magnetic braking \cite{kraft,Mestel}. This is a mechanism generated by the stellar wind torque \cite{lanzafame,matt,finley}, due to the fact that ejected plasma remains magnetically connected to the stellar surface for several stellar radii, namely up to the Alfv\'enic radius estimated to be around 15\,--\,25 R$_\odot$ for the Sun \cite{kwon}. The same mechanism of wind magnetic braking has been proposed for CMEs \cite{aarnio2011}; the mass loss due to the cumulative effect of all CMEs should have a contribution to the total torque that spins down the star. This highlights the importance of determining the total mass-loss of the Sun, considering also the cumulative effect of all CMEs (including those observationally not yet resolved), also from the perspective of making predictions about the future evolution of our star. Difficulty in evaluating solar and stellar wind torques arises mostly due to the lack of observations (past, current, or planned) of mass-loss and stellar wind on a global scale and of the magnetic field geometry of these stars and their topological magnetic interconnection with their wind. The only hope for gaining some indication is by observing our closest star, and extending this to other late-type stars in the framework of the solar-stellar connection \cite{brun2015,brun2017}. To assess angular momentum loss rate, the magnetic topology of the star and, more specifically, the complexity of the surface magnetic field at all latitudes has been shown to be crucial in the coupling between a star and its wind \cite{garraffo}. Moreover, the global coverage of magnetic field measurements including also the poles, is expected to substantially improve current MHD models of the solar wind and CME propagation through interplanetary space. A viewpoint from above the solar poles, will provide huge benefits, for example avoiding (1) the projection effects on the line of sight magnetic field measurements that produce a large amount of noise at the poles when polar regions are observed from low latitudes near the ecliptic, and (2) the Sun's tilt angle periodically rendering those areas invisible to current and planned solar probes (i.e. Parker Solar Probe and Solar Orbiter). The need for a mission like the one we are proposing is also critical, considering that these kinds of measurements are not possible, not even in the near future, for late-type stars other than the Sun. Overall, it would be extremely valuable to compare CME events as seen from a polar view with similar instruments deployed in the ecliptic plane from other space science missions that are operational (e.g. space weather monitoring spacecraft at L1 and L5, carrying coronagraphs, EUV imagers, and heliospheric imagers). The successful scientific outcome of a dedicated solar polar mission would not be dependent on the coincidence of solar missions in the ecliptic plane, but, using such observations in concert would lead to significant additional advances. Such combined observations would enable analyses of CME phenomena in both longitude and latitude for the first time; 3D views of CMEs (and indeed CIRs) would be extremely important for studies of CME onset, propagation, and impact studies. Exploiting available spacecraft combinations would also give the opportunity for investigating the 3D global structure of the solar corona and heliosphere, and how this is influenced by solar activity and, in particular, CMEs. This new, combined observational approach would contribute significantly to the validation of global heliospheric models. \subsubsection{Requirements for Science Goal 2} The concept of a polar mission providing extensive coverage of the inner helio\-sphere, focused on the ecliptic plane, for extended periods of time, would be unique, and the scientific advantages of such a mission have been spelt out above. Our goal is to achieve: \begin{itemize} \item helio-latitudes of greater than or equal to 60$^\circ$; \item for many periods of tens of days each at the poles, across a long portion of a solar cycle; \item mission orbital parameters in terms of aphelion and perihelion values well within 1 AU, dictated by the required resolution of the remote sensing instrumentation. \end{itemize} The high-latitude view would allow crucial measurements of the directionality and changes of speed of the plasma erupted from the Sun, and unique tracking capabilities of events with respect to all Solar System bodies. If the spacecraft scientific payload included a coronagraph and a heliospheric imager, working in concert, the observation and tracking of CMEs and CIRs and their influence throughout the corona and inner heliosphere, over all longitudes, would be available for the first time. This places some requirements on the angular extent of the combined field of view (coronagraph plus heliospheric imager, without any gap): from 1\,--\,2 deg (inner edge of the coronagraph) to about 45 deg (outer edge of the heliospheric imager) from the centre of the Sun. The 45 deg limit assumes a distance of the spacecraft from the Sun of 1\,AU, enabling the Earth to appear at the edge of the field of view. This can be considered to be a minimum requirement. With an additional EUV imager and magnetograph the mission would include the magnetic and coronal imaging capabilities required to investigate the solar sources of the solar wind phenomena. Finally, the inclusion of particle and field in-situ instrumentation would allow measurements of the local plasmas to provide the in-situ `ground truth' measurements relevant to several of the lines of research mentioned above. \subsection{Science goal 3: To determine the solar irradiance at all latitudes} The total solar irradiance (TSI) has been measured since 1978 by a number of space instruments. All TSI space missions so far have been bound to the ecliptic, which coincides closely with the solar equatorial plane. While the ecliptic plane is the perfect vantage point for monitoring the essential climate variable TSI, in other words the energy input on planet Earth, a polar mission will be able to explore how the Sun's radiative output varies at all latitudes which ultimately allows the determination of the solar luminosity. We know that the TSI varies as a function of the solar activity cycle. The key driver of the solar activity cycle is understood to be the solar surface magnetic field which manifests itself as the dark sunspots, and bright faculae and network. The latter compensate for the dark sunspots. The polar regions remain a mystery, and the dominant source of radiative output at the poles is not well understood. Indeed, the interaction between the poles and the activity belt and how that changes with the cycle is not clear. Another interesting question is the long-term changes in the solar cycle -- the past two cycles of the Sun have been weaker than the previous ones. Measuring the solar irradiance during weak cycles at the minimum period may provide insight into the potential long-term minima such as the Maunder minimum -- what actually causes a reduction in the radiative output? This polar mission concept will measure the TSI and alongside that telescopes will observe the bright features on the Sun. Measurements from above the solar equator only provide a first-order estimate of the total energy output of the Sun, the solar luminosity. A TSI radiometer on board a solar polar orbiting mission would be able to measure the latitudinal distribution of the solar irradiance and how it varies with time. Accurate measurements of the solar luminosity will feed in to the understanding of solar-type stars whose orientation of the rotation axis is unknown, i.e. it is not known from which vantage point the stars are observed. In recent years, however, asteroseismology for example, has revealed that differential rotation also occurs on Sun-like stars (e.g. \cite{Benomar}). This study did show that this differential rotation can be larger in other stars than it is in our Sun. Understanding how the differential rotation on our Sun impacts the irradiance will be key in understanding the luminosities of other stars with different differential rotation, and different orientations. \begin{figure} \includegraphics{Knaack.eps} \caption{Visibility of two zonal latitude bands between $\pm$5$^\circ$and $\pm$30$^\circ$ latitude for different inclinations $i$ (defined here as the angle between the solar rotation axis and the line-of-sight); $i$ decreases in steps of $\Delta i \approx$13$^\circ$ from the upper left ($i$ = 90$^\circ$) to the lower right ($i$ = 0$^\circ$): Adapted from \cite{Knaack}.} \label{fig:5} \end{figure} A key question is whether the brightness of the poles is different than the brightness of the quiet areas elsewhere on the Sun. \cite{Knaack} studied the influence of the solar inclination, $i$, on the outgoing total and spectral solar flux. With their model they found that the total flux increases by about 0.15\,\% when measured from the poles ($i$ = 0$^\circ$, see Figure~\ref{fig:5} bottom right) with respect to $i$=90$^\circ$ (upper left). The authors also find that, while UV variability decreases slightly when observed from the poles, variability in the visible is expected to increase by up to 150\,\%. If this model is confirmed, this means that the shape of the spectrum depends on the inclination angle of the solar (or stellar) observation. Ultimately, this would then also have implications on the stellar magnetic activity index (S-index) of both the Sun and Sun-like stars. Currently, the S-index of the quiet Sun, S$_\textrm{QS}$, is understood to be independent of the inclination. However, if the shape of the spectrum is indeed a function of $i$, so would be the S-index. For comparing the Sun with solar-type stars the S-index plays a crucial role and, therefore knowing its exact value is essential. In summary, a dependence of the directional energy output of the Sun cannot be ruled out and its exact amplitude needs to be determined. This result would be crucial to constrain irradiance models for solar and stellar applications. \begin{figure} \includegraphics[scale=.3]{magbfly.eps} \caption{ Solar magnetic cycle from 1974 to the present. Opposite polarity is indicated by the blue and yellow. [Image credit D. Hathaway]} \label{fig:6} \end{figure} The luminosity of the Sun is $L=4\,\pi$ (1\,AU)$^{2} S$, with the assumption that the TSI is a suitable measure of the flux $S$. The flux measurements from a polar mission will directly give us the information as to how much S varies as a function of latitude and ultimately whether $L$ varies over the solar cycle. Figure~\ref{fig:6} illustrates the solar magnetic cycle which covers on average 22 years. During this time the poles undergo a magnetic field reversal. We know that TSI varies with the solar cycle and these measurements will address whether the solar luminosity also varies over the solar cycle. We will be able to measure if the brightness of the poles change over the solar cycle and what role the polar magnetic field strength plays. With the search for habitable planets there is increasing need to understand the place of the Sun amongst other solar-type stars. However, differences exist between the Sun and Sun-like stars that are not yet fully understood. A striking difference is the smaller variability of the Sun on solar cycle time scales, e.g. \cite{Lockwood,Radick}, compared to Sun-like stars. This difference might be partly due to the inherently different solar and stellar observations. Until now, all TSI and solar spectral irradiance (SSI) observations have been obtained from the vantage point of Earth. Stellar observations, however, are typically obtained as chromospheric or photometric variability and from random stellar inclinations. The fact that stars are observed from different viewpoints might explain part of the difference in solar cycle variability. Indeed, \cite{Knaack} showed that in the visible wavelengths, bright faculae are less effectively compensated by the sunspots if the Sun was observed at higher latitudes. This indicates that the comparison between the solar and stellar variability is rather complex. Therefore, to better link solar versus stellar variability it is essential to measure the solar flux from all viewpoints, most importantly from high latitudes. Ultimately, by better understanding solar and stellar variability we will be able to better constrain future solar variability and at the same time improve the characterization of solar-like host stars of Earth-like exoplanets. Our understanding of the changes of the radiative energy output of the Sun is based on the modeling of the radiation emerging from the surface components of the Sun. These include -- besides the quiet, mostly non-magnetic Sun -- the solar activity features such as sunspots, faculae, and the network. The changing area contribution of these features leads to an overall variation of the outgoing radiative flux. Our knowledge of the emergent radiation of the solar surface components stems from radiative transfer modeling of these components. A key basis for the radiative transfer modeling is semi-empirical atmosphere structures that are developed to reproduce spectral observations at intermediate spatial resolution \cite{fontenla1999,fontenla2009}. Depending on the spectral wavelength of interest, they are then used with different radiative transfer codes \cite{Ermolli,margit} to model the emergent spectrum. Recently, the radiative transfer modeling has been further extended to employing 3D MHD models \cite{Yeo}. There is however an important missing link. Due to the lack of observations, the detailed characterization of the solar atmosphere at the poles has so far not been possible. In particular, it is not known whether the photosphere at the poles is darker (cooler) or brighter (hotter) than the average quiet Sun. Moreover, we do not know the detailed properties of the polar plasma, i.e. its temperature structure, density, flow patterns. In order to infer these properties, it would be essential to observe the poles with intermediate to high spectral resolution. Based on the observed spectra it would then be possible to derive the atmospheric structure of the poles. Detailed observations of the poles will allow us then to improve irradiance reconstruction models -- further constrained by global flux measurements -- and ultimately to better link solar with stellar observations. \subsubsection{Requirements for Science Goal 3} The science goals for the latitudinal variation of total solar irradiance require as high an inclination as is possible (in an ideal world 90$^\circ$, but anything $>$60$^\circ$ would be a significant advance on existing/planned observations). Due to the importance of long-term measurements, it is necessary to collect observations over several years -- ideally over a solar cycle. \subsection{Science goal 4: To explore solar activity at the poles and its impact on the solar wind} Ulysses (1990\,--\,2009) undertook the only previous exploration of the heliosphere outside the ecliptic plane, making in-situ solar wind plasma, electromagnetic fields, and composition measurements. These measurements demonstrated the prevalence, particularly near-solar minimum, of fast, relatively uniform, solar wind at high northern and southern latitudes \cite{McComas1998,mccomas2003}, while more variable ``slow'' solar wind originates from the coronal streamer belt at low-latitudes. During solar minimum, the fast solar wind most likely originates from large polar coronal holes and subsequently expands with distance from the Sun to fill much of the hemispheric cavity, while the slow solar wind is more confined around the equatorial plane. Ulysses also demonstrated that the fast solar wind contains `open' magnetic flux, where the spacecraft is connected directly along magnetic field lines and thus provides the most direct physical connection to the solar wind origin in the solar atmosphere (e.g. \cite{Cranmer}). Although Ulysses polar observations were made at very high solar latitudes ($\approx$\,80$^\circ$), they were confined to a relatively large distance range (between 1.3 and 5.4 AU) from the Sun, due to the reliance on a Jovian gravity assist maneuver (GAM) to reach high latitudes. The out-of-the-ecliptic solar wind will also be sampled in situ by the Solar Orbiter mission. Like Ulysses, Solar Orbiter carries instruments to measure the solar wind plasma, electromagnetic fields, energetic particles, and composition, although significantly more modern variants. These measurements will be augmented by remote-sensing instruments providing imagery and spectroscopic information of the solar atmosphere. Solar Orbiter will make its measurements between 0.28 and $\approx$\,1\,AU. Thus, although the mission will not reach the high latitudes sampled by Ulysses, it will sample the solar wind much closer to its sources. There are strong scientific reasons to make solar wind measurements from a platform that combines both the high-latitude vantage point of Ulysses with the near-Sun, multi-instrument capabilities of Solar Orbiter. In the 1970s the Helios mission \cite{Rosenbauer} demonstrated that the sampling of pristine solar wind, free from the effects of in transit processing, requires in-situ measurements within a few tenths of an AU, a view confirmed through more recent imaging of the corona and inner heliosphere by, e.g., instruments on STEREO \cite{stereo}, and also also by the early results from the Parker Solar Probe mission \cite{Fox}. A further advantage of sampling solar wind from high-latitudes would be extended periods of measurement of the expanding fast solar wind in the absence of any stream interactions with the slower wind from the streamer belt. Open magnetic flux in this region provides a persistent and direct magnetic connection between the surface magnetic field and ejected plasma elements. This can be modeled using magnetograph data, which can shed light on the acceleration of the solar wind and energetic-particle events. Such measurements are crucial to providing constraints on solar wind formation processes on both a local and global scale and on the influence of the magnetic field close to the Sun. In particular, these measurements could be used to better determine how the spatial distribution of the fast solar wind properties varies with respect to the location/size/edge of the polar coronal holes. They would also help to determine the overall magnetic flux budget within the heliosphere, potentially resolving the ``open flux'' conundrum within existing measurements \cite{Linker}. These topics are expanded on further below. {\bf (i) Identification of fast solar wind sources and acceleration.} The Solar Orbiter mission philosophy recognises that direct, simultaneous measurements of both on-disc activity and in-situ features observed in the solar wind are needed to properly explore solar wind sources and related structures, such as polar plumes associated with polar coronal holes and the ambient solar wind from coronal holes. Launching such a combined package on a solar polar orbiting platform reaching very high latitudes would complete these studies at all latitudes. As noted, measurements from such high polar latitudes would enable study of the evolution of the solar wind from its source region to the observing platform with little or no effects of, e.g., the stream interactions that significantly influence dynamics of the solar wind at lower latitudes. Such pristine fast solar wind observations will enable us to unambiguously establish their connection to features observed in the polar corona, such as polar plumes and coronal hole jets. In turn, this allows us to investigate driving mechanisms. Moreover, such measurements of the high-latitude solar wind with modern instrumentation, would also fill in parameter space and augment studies of processes occurring within the solar wind itself that have been made nearer the ecliptic (Solar Orbiter) or at greater distances (Ulysses). For example, the level of plasma wave activity, turbulence, internal heating, and other kinematics within pristine fast solar wind from the polar coronal holes would provide a significant context in which to place similar measurements of more disturbed near-ecliptic regions. This is key knowledge needed to complete our understanding of how energy flows from a star to its surrounding environment. Observations from a polar vantage point have significant scientific advantages when combined with observations made from or near the ecliptic. Multiple vantage points significantly enhance our ability to undertake studies of the coronal structures that are the source of the solar wind. {\bf (ii) Global knowledge of the solar wind, its sources, and in-transit dynamics.} In addition to analyses of specific high-latitude solar wind sources and the processes involved in the release of the solar wind, synoptic polar observations from near-Sun high-latitude vantage points will complete an exploration of measurement space that will underpin a step-change in understanding the large-scale structure of the heliosphere and the physical processes therein. Data from a solar polar orbiting platform will reveal the full radial and longitudinal evolution of the solar wind and transient structures propagating through it, providing critical information on the fundamental nature of our star and its environment. For example, this would reveal the magnetic connectivity map across large volumes of space, which can be used to constrain theories of the role of the magnetic field in solar and heliospheric dynamics. Full measurements of the amount of open magnetic flux in the heliosphere can only be made by including measurements outside of the ecliptic. Combined with direct measurements of polar magnetic fields, their boundaries, and associated solar-wind source regions at high latitudes at the Sun, these out-of-ecliptic measurements will resolve inconsistencies in our understanding of the open flux. Such data can also be used to probe the global nature of key boundaries between the Sun and the heliosphere, such as the Alfv\'enic surface, and determine their roles in conditioning the outflowing solar wind plasma. Combined with ecliptic vantage points, the global nature of the interactions between solar wind streams with different sources, with different speeds, densities, etc., can be comprehensively studied. In particular, the way these interactions evolve with both time and distance can be disentangled, which is very challenging based on measurements from one vantage point only. The formation and evolution of CIRs and associated shocks, as well as CMEs, could in principle be tracked continuously over long periods of time from a polar vantage point (as discussed under Science Goal 2). These measurements will provide a unique picture of the global variations in the fine structure within the fast solar wind, the latitudinal dependence of the occurrence of turbulence and wave--particle interactions, the influence of the three-dimensional structure of the magnetic field on these variations, the plasma convection and circulation flows at and below the surface, and how these might vary as a function of solar activity. These are all important inputs for operational space weather forecasters. Finally, we consider the propagation of energetic particles within the heliosphere. There is now a general consensus that solar energetic particles are generated by disparate processes, including direct release from solar flares and more gradual generation at interplanetary shocks propagating through the heliosphere in association with CMEs and/or CIRs. However, our understanding of the transport of these particles through the heliosphere remains rudimentary. STEREO observations show that energetic particle events apparently spread rapidly beyond regions that are magnetically connected to their source \cite{SEP}. This surprising result suggests that there must be processes operating near the Sun that act to not only accelerate the particles to their high-energies, but also transport them in radius, longitude and, presumably, latitude. Observations from a polar vantage point would be able to confirm the latter, but the relatively simple magnetic structure expected at higher latitudes will provide testable constraints on theories of these transport processes. In addition, the propagation of energetic electron bursts through the inner heliosphere can be remotely sensed through observation of Type II and Type III radial emissions. Their observation from various radial and latitudinal locations, or indeed from multiple vantage points in and out of the ecliptic, will enable better understanding of propagation of energetic electrons through space. Conversely, direct observation of these electrons can provide information on the magnetic connectivity of the spacecraft to solar sources and reveal the timings for the actions of the release processes in the corona. This allows a more direct link with remote observations of potential source regions to be confirmed. In addition to the specific advantages to making measurements of the Sun and the heliosphere from a solar polar vantage point, we note a more general benefit from such data in both priming and validating magnetic and plasma simulations. Magnetograph measurements of the solar poles would significantly augment the accuracy of the lower boundary conditions generally used in global simulations of the solar atmosphere and modeling of the heliosphere. Moreover, measurements of the solar wind distributed in radial distance, latitude, and longitude provide a network of ground truth points to which to compare and refine the simulation results (e.g. predicted vs observed flow speeds, densities, timings of magnetic reversals, etc.). These interdisciplinary activities to improve the accuracy of the model output would significantly benefit operational space weather services. \subsubsection{Requirements for Science Goal 4} The primary science of this goal will be achieved by an inclination of $>$\,60$^\circ$. This goal is the one mostly driven by the distance to the Sun -- it requires the location to be $<$1AU. A combination of remote sensing and in-situ instruments are required. \section{Space mission concepts} There has been a drive for many years to explore the poles of the Sun. Table 1 summarises the range of ideas to date, and whether they fulfil the science goals stated above or not. The list may not be complete but is representative of the efforts to explore the solar poles and shows quite clearly that most (6 out of 8 of them) have not progressed beyond the concept stage -- mainly for technology readiness reasons. Only one has flown, namely Ulysses, which did not include the remote sensing payload required to address the goals described above. The remaining mission, Solar Orbiter, will achieve 32$^\circ$ helio-latitude, allowing some polar-oriented scientific studies towards the end of the mission but it was not designed to simultaneously and/or fully address all the goals presented here. To deliver the science that we present requires a new concept combining high latitude observations (above 60$^\circ$) with an appropriate orbit and remote sensing instrumentation. \begin{table} \resizebox{\textwidth}{!}{ \begin{tabular}{ l l l l l l l } Mission & Inclination & Flight Status & SG 1 & SG 2 & SG 3 & SG 4 \\ \hline Ulysses \cite{Wenzel} & 80$^{\circ}$ & Flown & no & no & no & In-situ \\ & & & & & &only \\ Solar Orbiter \cite{orbiter2020} & 32$^{\circ}$ & Launched & Short & Short & no & Short \\ & & & duration & duration & & duration \\ SPDEx \cite{vourlidas2018} & 75$^{\circ} $ & Concept & yes & yes & yes & yes \\ SPORT \cite{sport} & $\geq$ 60$^{\circ}$ & Concept & Only fields, & yes & no & yes \\ & & & no helioseismology & & & \\ Polaris \cite{polaris} & 75$^{\circ}$ & Concept & yes & yes & yes & yes \\ SPI \cite{liewer2008} & 75$^{\circ} $ &Concept & yes & yes & yes & yes \\ RAMSES \cite{ramses1998} & Above ecliptic & Concept & no & no & no & yes \\ European solar & 90$^{\circ}$ & Concept & yes & yes & yes & yes \\ STEREO mission \cite{bothmer1998} \\ \end{tabular} } \caption{A summary of missions that have been flown or planned to explore the poles. Only one polar mission has flown and it had no ability to remotely sense the poles. The ability of these missions to address each of the science goals (SG) presented in Section 2 is indicated. } \label{tab:1} \end{table} Each science goal described in this paper gives a summary of the requirements for the mission. These are based around the inclination from the ecliptic plane, and the duration of observing at the poles. These two main requirements will drive the technology of the mission concept. Table 2 summarises these requirements. \begin{table} \resizebox{\textwidth}{!}{ \begin{tabular}{l l l l l l } Science & Inclination & Distance & Duration & Pole to & Other \\ goal & & to Sun & & pole view? & \\ \hline SG1& $>$60\,$^{\circ}$ & $<$1AU & $>$36 days at the poles & yes & Ecliptic measurements beneficial \\ SG2 & $>$60\,$^{\circ}$ & $<$1 AU& Extended periods over solar cycle & yes & Ecliptic imaging and in-situ beneficial \\ SG3 & $>$60\,$^{\circ}$ & 1AU & Years (preferably solar cycle) & yes & Ecliptic TSI measurements beneficial \\ SG4 & $>$60\,$^{\circ}$ & $<$1 AU & 10s days at the pole in each orbit & no & Requires remote sensing and in-situ \end{tabular} } \caption{Key mission requirements for a polar mission} \label{tab:2} \end{table} \subsection{Technology Challenges} This section summarises the technical challenges of a mission to observe the poles, and how they might be achieved. A solar polar mission to exploit the scientific gains of observing the Sun and its near-space environment from high helio-latitudes must observe from above the poles for long durations. The goals, above, require a three-axis stabilised, solar-pointed platform carrying a package of remote sensing and in-situ instruments. The likely instruments would be an evolution of current instrument strengths on missions such as Solar Orbiter and STEREO. Here, we focus on the mission concept options with a view to the technology requirements. Several options would allow us to reach high helio-latitudes for long durations with increasing levels of technical difficulty. \subsubsection{Ulysses type orbit with remote sensing instruments} There has been one solar polar mission, the Ulysses mission. One option for a polar mission is to adopt an updated Ulysses-style orbital strategy, using a Jupiter gravity assist (JGA). Ulysses did not include solar remote sensing instrumentation. Observations were made over the polar regions only every 6 years in an orbit with aphelion 5.4 AU and perihelion 1.34 AU. A similar orbit, including remote sensing instruments, could achieve to some extent goals 2 and 3, but it is too restricted time-wise to achieve all of the scientific goals, and it is farther from the Sun than would be ideal for good spatial resolution imaging. Unlike Ulysses, we would require a three-axis stabilised spacecraft for the remote sensing instruments. In addition, a radioisotope thermoelectric generator (RTG) was used with Ulysses, given its distance from the Sun, and this may not be acceptable in the current climate. \subsubsection{Solar sail option} An option for a polar mission is to exploit future development of solar-sail technology to achieve a far more favourable orbital scenario much closer to the Sun and orbiting at high helio-latitudes for long periods. The concept proposed for the POLARIS mission \cite{polaris} uses a combination of a Venus Gravity Assist (VGA) and solar sail propulsion to place a spacecraft in a 0.48 AU circular orbit around the Sun with an inclination of 75$^\circ$ from the solar equator (see Figure~\ref{fig:7}). In this orbit, at least 59\,\% of the time would be spent at latitudes higher than the maximum latitude reached by Solar Orbiter. However, the sail size is significantly larger than has been achieved to date. So, whilst the solar sail option is considered the baseline for this concept, based on the POLARIS studies, the technological advances required are acknowledged. \begin{figure} \includegraphics[scale=.28]{trajMacdonald.eps} \caption{Post-Venus Gravity Assist solar sail trajectory to a polar orbit. The orbits of Earth, Venus, and Mercury are shown.} \label{fig:7} \end{figure} Figure~\ref{fig:8} shows the most recent solar sail mission, LightSail2 \cite{lightsail}, a crowdfunded project through the Planetary Society. The 32\,m$^2$ sail was deployed successfully in the Earth-orbiting mission to demonstrate orbital maneuvers using sail technology. The first successful solar sail technology demonstrator, IKAROS, was launched in 2010 with a 196\,m$^2$ sail. It completed its 5-year mission after an interplanetary journey that included a Venus encounter. Its principal aims were to demonstrate deployment and control of a large, thin solar sail, including attitude control through reflectance variation. Whereas the solar sail option does not require carrying a large mass of propellant, it does require storage and deployment of a solar sail over two orders of magnitude larger than any previously flown. \begin{figure} \includegraphics[scale=1.55]{lightsail.eps} \caption{An image taken on 25th July 2019 showing the successful deployment of the LightSail2 solar sail. (Image credit Planetary Society) } \label{fig:8} \end{figure} \subsubsection{Ion drive option} Ion engines have been developed for many years and are increasingly used for science and exploration. A recent example is the use of four 5\,kW T6 gridded ion engines developed by QinetiQ for ESA's BepiColombo mission. ESA is continuing the study of future technologies that would be necessary for exploration missions. These include studies on using Hall effect thrusters to keep a station around the Moon (CISLUNAR) \cite{cislunar}. Technologies like this could be explored to provide the long duration needs over the poles to obtain the science goals. \subsection{The way forward for technology} In the case that the solar sail option is selected, a potential small solar sail mission technology demonstrator could be flown as a precursor to the main mission. This would be based upon a light platform and a solar sail system of reasonable size ($>$10$\times$10\,m$^2$) for a small launcher (Vega like). This mission could carry a minimal payload that would bring significant scientific return given the technology development. The precursor mission would qualify in-flight all the required technology for the solar sail system. Possibilities for instruments for this option that have $\approx$\,2\,kg mass are a small EUV imager, a magnetometer, or a total solar irradiance monitor. Although we are talking about instrument payload options that look very much like heritage instruments from e.g. SOHO, STEREO, and Solar Orbiter, there would be a need for miniaturization of instruments given the difficulties of this orbit whilst maintaining scientific capability, and such miniaturisation would be required for all of the orbital scenario options. There would be trade-offs, e.g. between payload mass and the sail size required, for the solar sail option. Figure~\ref{fig:9} summarises the options and challenges listed above. Since this article considers missions for the period 2035\,--\,2050 we expect that technology for sails, ion engines, and miniaturization will have progressed enough for serious consideration. The abscissa of Figure~\ref{fig:9} relates to increasing TRL to the left and increasing scientific return to the right. The Ulysses-style option is based on a proven approach, i.e. the highest TRL, and, whilst its scientific return would be extremely significant, if not ground-breaking (because we are taking remote sensing observations from high helio-latitudes for the first time), the long-duration polar observation periods and solar vicinity afforded by the sail and ion drive options would likely provide much greater scientific return. Thus, those options are shown to the right, with arrows indicating the technology development strands required. The lighter orange oval shows an opportunity for a precursor mission as the technologies are developed. For all options we are looking for instrument miniaturisation to varying degrees. \begin{figure} \includegraphics[scale=.35]{figure9_new.eps} \caption{A schematic view of the mission scenarios described and strategy. } \label{fig:9} \end{figure} Thus, solar sails, ion engines and miniaturization of instrumentation are key to reaching the challenges of long-duration, $<$ 1 AU, high helio-latitude solar orbits. For some years there has been a drive to smaller, lower mass instruments; consider, for example, the payloads of ESA's SOHO and Solar Orbiter, which contain some similar instrument-types but with very different instrument parameters due to the use of different materials and modified optical designs. We would anticipate that this effort would continue within the instrument community. The solar sail option would be a major challenge for the platform. Key developments are required in the following areas of solar sail technology: material, deployment (booms), attitude and orbit control system, and jettison mechanism. Some of these are discussed in \cite{Macdonald}. Different orbital concepts are presented by \cite{polaris,liewer2008}. Both employ a solar sail to reach a near-polar heliocentric orbit. One study includes a VGA to enhance transfer performance and allow for increased mass budget. A mission with a full payload using only a solar sail, requires a sail area of up to 200\,m$^2$. This would need significant technology development but, over the timescale extending to the end of the Voyage 2050 programme, we feel this should be an area of significant development in any case, as it would open up opportunities beyond solar and heliospheric physics. With regard to ion engines, such an option was explored in the early phase of the Solar Orbiter mission, but in the end was not used, and it was considered for the POLARIS study but rejected in favour of sails. Since this time, significant developments have taken place; for example, the BepiColombo spacecraft launched in 2018 with a state-of-the-art ion engine. However this option was not deemed viable for Solar Orbiter due to the mass impact. Thus, in the context of this White Paper, an ion engine should be explored early on as an option for the mission. A ``Tundra'' type orbit could increase the fraction of the orbit at high helio-latitude. \section{Summary and worldwide context} There is a clear scientific drive for the international community to study the solar poles in a consistent way, over an entire activity cycle. This is a critical next step in the global solar physics space mission program. Prior to 2006, all solar remote sensing observations were made from the ecliptic plane, on or near Earth (Earth orbit or L1). STEREO (2006 to date) provided a step change by pioneering observations out of the Sun--Earth line, and Parker Solar Probe (2018 to date) and Solar Orbiter (2020 to date) have taken the next key step by targeting the first solar encounter observations. The remaining unexplored regions are the polar regions of the Sun, targeted by this paper. Indeed, the fact that the polar regions are not well understood is particularly unfortunate as the Earth spends a significant time magnetically connected to the polar coronal holes \cite{Luhmann}, which are difficult to observe from Earth \cite{gordon}. In a response to a recent international review of Next Generation Solar Physics Missions carried out by JAXA, NASA, and ESA, a range of white papers were submitted, two of which proposed polar missions \cite{NGSPM}. \cite{gibson} also recently highlighted the importance of a polar view to answer fundamental questions of the kind addressed here. The next step for a polar mission will be building on the foundation laid by Solar Orbiter, which will get the first view of the solar poles by reaching over 30$^\circ$ out of the ecliptic towards the end of its mission. Whilst, over the next decade, significant progress will be made, there is no doubt that these sneak previews of the polar regions will lay the foundation that leads to a fully fledged polar mission. \bibliographystyle{spmpsci}
{ "redpajama_set_name": "RedPajamaArXiv" }
1,606
\section{Introduction} The study, analysis, and generation of astrophysical opacities \citep[e.g.,][Turck-Chi\`eze, this volume and references therein]{edd32, eps51, sea59, cox65a, cox65b, 1987JPhB...20.6363S, 1987JPhB...20.6379B, 1990SoPh..128...49C, rog92a, rog92b, 1993ApJ...408..347T, rog94, 1994MNRAS.266..805S, igl95, 1996ApJ...464..943I, 1998SSRv...85..125T, 2005MNRAS.360..458B, 2007ApJS..168..140S, 2012MNRAS.425...21T, mon15, 2015Natur.517...56B, 2016MNRAS.458.1427H, 2016ApJ...817..116C, 2016ApJ...821...45K, 2016ApJ...824...98K, pain:DetailOpacCalcs} is a complex and demanding pursuit. So much so that only a handful of groups around the world have the stamina, incentive, expertise, and human and computational resources to actively work in this field. Computing opacities \citep{2017arXiv170403528M} involves knowledge of atomic and molecular physics in order to calculate energy levels, line positions, $f$-values, and photoabsorption cross sections for large numbers of ions, atoms, and molecules of astrophysical interest; computational expertise to perform massive computations and to process large data volumes; and proficiency in plasma physics for the development of equations of state to determine ionization fractions and energy level occupation numbers, as well as line-broadening processes, all under extreme temperature and density regimes. Validating the quality and accuracy of opacity tables is complicated by the fact that the opacity itself is not directly observable, and can only be inferred from radiative transfer calculations (although simplified limiting cases often apply). Moreover, stellar interior conditions are difficult to achieve in a laboratory and for a long time were limited to external layers of massive stars (see a recent comparison of calculations for this range of conditions by \citealt{2016ApJ...823...78T}). Despite a long history of opacity experiments \citep[see, for instance,][for a review of experiments at conditions relevant for the solar convective envelope]{pain:DetailOpacCalcs}, plasma conditions approaching those of the solar radiative zone have only been recently reached \citep{bailey:Fe150eVplus,2015Natur.517...56B}. Apart from the very limited number of experimental opacities available at present, it is only through applications in the study of the solar interior, stellar pulsations, and the atmospheric spectra of stars and planets that theoretical opacities can be more or less directly confronted with reality. Nonetheless, present needs for improvements in current theoretical opacity data can hardly be overstated. First, we know that the combination of current opacities with the revised solar photospheric abundances of \citet[][AGSS09]{2009ARA&A..47..481A} fail to give standard solar models within the constraints of helioseismic observations \citep[][and Section~4 below]{2004PhRvL..93u1102T, serenelli:SunSeismAGSS09,2010ApJ...715.1539T}. Current studies of mode instabilities in pulsating stars seem to require opacities significantly larger than those in current opacity tables \citep{das10}. Modern studies of cool planetary atmospheres, on the one hand, and hot accretion disks around compact objects and protostars, on the other, require opacities well outside the range of most current tables (e.g., of dust and aerosols). Furthermore, the recent discovery of a neutron-star merger \citep{2017ApJ...850L..40A}, which brings about rapid neutron-capture nucleosynthesis, has shown the need for heavy-element opacities that are simply unavailable. The Workshop on Astrophysical Opacities (WAO) provides a forum for producers and consumers of opacity data from both the atomic and molecular sectors to contribute to solving outstanding problems and to develop more effective and integrated interfaces. The previous WAO took place at the IBM Venezuela Scientific Center, Caracas, Venezuela, in July 1991 \citep{1992RMxAA..23.....L}. This was an iconic event in the field as it brought together the two main groups recomputing the atomic astrophysical opacities at the time, the OPAL group \citep{igl92a, igl92b, rog92} and the Opacity Project \citep{sea92}, and it was at or around that meeting that complete opacity tables were first released by both teams. There have been major advances in astrophysical opacities since that first WAO, to which the subsequent WorkOp workshop series \citep{2000JQSRT..65..527S} has contributed. The strict constraints now imposed on atomic opacities by the helioseismic and solar neutrino benchmarks \citep[and references therein]{tur16} have led to a proliferation of opacity codes, new opacity tables \citep[e.g., OPLIB,][]{2016ApJ...817..116C}, and extensive comparisons \citep[for the iron-group bump, see][]{gil12, gil13, tur13} to examine key issues (completeness, configuration interaction, line broadening, plasma effects) and to look for possible sources of ``missing'' opacity. On the other hand, direct and transit spectroscopic observations of hundreds of exoplanets detected by space probes such as {\it Kepler}\footnote{\url{http://kepler.nasa.gov/}} and {\it Corot}\footnote{\url{http://bit.ly/2cYJ09w}} have uncovered a peremptory demand for molecular opacities \citep{ber14}. Along with these developments we have also seen the rise of asteroseismology as a window on stellar interiors \citep{jcd:AsteroseismReview}, large-scale surveys like the Two Micron All Sky Survey \citep[2MASS,][]{skrutskie:2MASS} and the Sloan Digital Sky Survey \citep[SDSS,][]{blanton:SDSS-IV}, and the astrometry mission Hipparcos \citep{vanleeuwen:HipparcosNewReduc} currently being extended to a billion stars in the Galaxy and beyond by the GAIA mission \citep{lindegren:GAIA-DR1}, all providing much tighter constraints on our modeling efforts. Thus, we were encouraged to organize a second WAO that took place at Western Michigan University, Kalamazoo, Michigan, USA, during the week 1--4 August 2017. In practice the opacities needed in stellar astrophysics must span a wide range of conditions from a star's surface to its center. In terms of temperature and density, conditions range from $10^3$~K and $10^{-12}$~g\,cm$^{-3}$ to $10^9$~K and $10^9$~g\,cm$^{-3}$. The more recent needs of planetary atmosphere research extend the temperature range down to a few hundred Kelvin, where molecules dominate over atomic species. In modeling stellar interiors it is safe to assume local thermodynamic equilibrium (LTE), which allows us to compute opacities from collisional and radiative data for all processes involved in energy transport, while ionization equilibria and level populations are set by an assumed equation of state (EOS). However, two big complications arise. (1) The enormous amount of atomic (and ionic) data needed to account for all bound levels from the ground to highly excited ones, and to include all possible transitions between them and the continuum, which brings about possible issues of completeness and propagation of uncertainties in the radiative data. Towards the lower temperatures of planetary atmospheres, the emphasis shifts from atoms to molecules expanding the size of cross-section data volumes by orders of magnitude. (2) The adoption of an EOS valid for the same large range of plasma conditions that includes all relevant processes, in particular high-density plasma interactions (see Section 3), and propagating the results to the opacity calculation for a consistent treatment. In the lower density regime, as in accretion disks and some stellar atmospheres, the LTE approximation breaks down and collisional cross sections are needed, as well as the photoabsorption data for individual transitions. The present report attempts to summarize the discussions during the second WAO and suggest future priorities for opacity research. Some bias in the views expressed is inevitable given work already undertaken by the authors to compute atomic and molecular data, as well as opacity tables. Readers need to be aware of the Lawrence Livermore National Laboratory (LLNL) work not fully represented in the present paper, and the perspective on future directions for opacity research that follow. But all who are engaged in calculating opacity tables share a common objective: to provide opacity tables of the highest achievable accuracy for present and future astrophysics research. \section{Workshop on Astrophysical Opacities} One of the main highlights of the second WAO was the discussion on the competing needs for accuracy and completeness. Completeness gives priority to identifying and taking account of all relevant physical processes that contribute to the opacity in the astrophysical context. Accuracy insists on minimizing assumptions and approximations to model already known physical processes as reliably as possible. Of course it was recognized that both are needed, but resource limitations demand priorities be assigned. The debate was motivated in great part by the realization that astrophysical applications, such as modeling the solar structure and stellar pulsation, and recent laboratory opacity determinations all seem to suggest that current theoretical opacities may be significantly underestimated. Hence, the debate centered around whether focusing on completeness would provide improved opacities more rapidly (and cheaply) than expending effort on accuracy. The many excellent papers reporting on the WAO in the present proceedings is a testimony to the current scope, sophistication, and detail of opacity calculations. Also their applications and verifications through astrophysical modeling and measurements under a range of relevant plasma conditions. We refer the reader to these papers for full details. Instead of summarizing each contribution, we review opacities currently in use and give reasons for an essential validation. \citeauthor{1982ApJ...260L..87S}'s \citeyearpar{1982ApJ...260L..87S} plea for a reexamination of metal opacities in stars led to the Opacity Project \citep[OP,][]{1994MNRAS.266..805S,2005MNRAS.360..458B} and OPAL \citep{1996ApJ...464..943I} revisions to the astrophysical opacities then in use. A high priority for both projects were physically consistent and much improved EOS foundations for the opacity calculations, developed independently and in two conceptually different formulations: the OPAL EOS by \citet{2002ApJ...576.1064R} in the \emph{physical picture} and the MHD EOS by \citet{1988ApJ...331..794H}, \citet{1988ApJ...331..815M}, \citet{1988ApJ...332..261D}, and \citet{1990ApJ...350..300M} in the \emph{chemical picture}. Efforts were made to identify and take account of all atomic and plasma processes of importance to the calculation of stellar envelope opacities. They were nonetheless limited by the computing power at the time and approximations were made, many of which can now be relaxed. Astronomical evidence for the need to further revise stellar opacities comes from several lines of inquiry: the \emph{missing opacity in the Sun problem} labels the issue that solar models based on current atomic opacities combined with the low-metal abundances of AGSS09 cannot be made to agree with the helioseismic constraints \citep{serenelli:SunSeismAGSS09}. Agreement with helioseismology could be restored by either increasing the atomic opacity by some 20\% at the bottom of the convection zone and decreasing it towards the solar center \citep{serenelli:SunSeismAGSS09, jcd:AsteroSeism} or by returning to the older, higher metal abundances. The AGSS09 abundances cannot, however, be easily dismissed. They constitute the first homogeneous abundance analysis of the Sun (as opposed to a compilation of a plethora of other groups' works); they are based on a 3D solar atmosphere simulation that greatly outperforms the semi-empirical Holweger--M{\"u}ller model in reproducing solar observations \citep{tiago:suneHlinesLimbdark}; and the 3D atmosphere gives asymmetric line profiles that match observations and reveal even weak blends that go unnoticed in a 1D analysis \citep{grevesse:StopUsingGN93}. Moreover, the accuracy of seismic results has highlighted the need to verify opacity calculations in an attempt to reconcile the Standard Solar Model, helioseismology, and solar neutrino fluxes (see the review by \citealt{2011RPPh...74h6901T}). On the other hand, the AGSS09 abundances are disputed by \citet{caffau:CLSFB11-SunAbunds}, who find abundances much closer to the classic values, based on an independent 3D solar atmosphere simulation but analyzing only five of the 15 metals included in the \citet[][OP05]{2005MNRAS.360..458B} opacities. Higher metal abundances have also been recently derived from {\it in situ} measurements of the solar wind by \citet{ste16}, but in spite of an improved accord with the helioseismic sound speed around the base of the convection zone, they have been questioned for leading to neutrino overproduction due to an excess of refractory elements, namely, Mg, Si, S, and Fe \citep{ser16, vag17a, vag17b}. The connection between photospheric and interior compositions is also debatable. \citet{2012MNRAS.422.3460S} have tested the dependence of the Magellanic Cloud B~star pulsation excitation on opacity and chemical mixture. With known Magellanic Cloud abundances, they find these pulsations can be driven by an increased iron-group bump opacity as long as this occurs at the temperature corresponding to the maximum nickel contribution. An additional astronomical hint comes from a study of $\beta$\,Cephei pulsation by \citet{2017EPJWC.15206005W} and \citet{2017MNRAS.466.2284D}, who show for $\nu$\,Eri, $\gamma$\,Peg, and 12\,Lac how a modified opacity would improve the agreement between observed and calculated pulsation frequencies. This includes a ${\sim}50$\% opacity increase around $T=290$\,kK, which is a larger change at a lower temperature than that needed to resolve the missing opacity in the Sun. A further indication that current atomic opacities may need an increase comes from laboratory measurements by \citet{2015Natur.517...56B}, who used the Sandia Z-Facility to create an iron plasma at conditions close to those at the base of the solar convection zone and measured its opacity in transmission. The iron absorption measured by \citeauthor{2015Natur.517...56B}\ is demonstrably higher and broadened when compared with current theoretical predictions. For a solar composition at a temperature of $T = 2.11 \times 10^6$~K and electron density of $n_e = 3.1 \times 10^{22}$~cm$^{-3}$, \citeauthor{2015Natur.517...56B} replaced the iron contribution to the OP opacity with their experimental determination (in the wavelength range $7 < \lambda < 12$~{\AA}), and showed that this alone increased the Rosseland mean opacity by $7\pm 3$\%. If correct, the higher opacities implied by the \citeauthor{2015Natur.517...56B}\ experiment strongly indicate that we are missing physics in current opacity models. From the theoretical view point, though, it seems unlikely that more accurate models or larger calculations of the same type will lead to an agreement with this measurement. The experimental data appear to contravene the oscillator strength sum rule \citep{2015HEDP...15....4I}, which implies that an overlooked mechanism might be responsible for the result. For example, \cite{mor17} have proposed two-photon contributions as a source of the missing opacity; however, this possibility is very uncertain \citep{2018HEDP...26...23P}, and it is also unclear why previous comparisons of opacity calculations and measurements did not require additions to existing models to concur. The \citeauthor{2015Natur.517...56B}\ experiment needs to be replicated using a different setup; \citet{2017JPlPh..83a5903H} discuss the prospects for doing so using the National Ignition Facility (NIF). \citet{2016PhRvL.116w5003N} carried out an extensive close-coupling calculation of \ion{Fe}{xvii}, a major contributor to the opacity at the base of the solar convection zone and the dominant ion present in \citeauthor{2015Natur.517...56B}'s experiment. \citeauthor{2016PhRvL.116w5003N} demonstrate, as had already been done previously \citep{1995ApJ...443..460I}, that the much simpler calculations performed under the original OP \citep{1994MNRAS.266..805S} severely underestimated this absorption. However, whether such close-coupling calculations can help resolve current discrepancies between the solar model and the experiment by \citeauthor{2015Natur.517...56B}\ remains an intensely debated topic \citep{2016PhRvL.117x9501B}. As atomic core excitations and ionizations are accounted for using Voigt profiles---or in some cases a far-wing cut-off following \citet{1994MNRAS.266..805S} in other current opacity calculations \citep{2017ApJ...835..284I} including the more recent OP opacities by \citet{2005MNRAS.360..458B}---it is not clear to what extent the more elaborate, but so far less complete cross sections by \citeauthor{2016PhRvL.116w5003N}, will increase the Rosseland mean of a solar mixture. The more physical close-coupling formulation results in Fano-shaped resonances effectively moving absorption from the line cores to the wide wings, increasing the harmonic Rosseland mean despite identical total oscillator strengths. Collisional line broadening effects can often hide subtle Fano line-profile effects at moderate and high plasma densities. But whether absorption is moved to, or away from, windows in the absorption by other elements of a solar mix determines whether the Rosseland mean will increase and possibly resolve the disagreement between calculations and experiment \citep{2015Natur.517...56B}. Moreover, it remains to be demonstrated if close-coupling calculations alone, however accurate, can achieve the required completeness for the more complicated ions. Conversely, perturbative approaches can account for many more transitions, but some caution is advised in that they may miss important coupling effects and introduce systematic errors in the mean opacities. Adopting a hybrid model may provide a solution as configuration interaction only affects some transitions. \section{Plasma Effects and Line Broadening} Plasma effects refer to phenomena arising from collisions of finite duration between particles in a plasma rendering the gas non-ideal. (Thermodynamics rely on collisions for thermalization but are assumed infinitely short in ideal gases, and all plasma effects mentioned below are therefore incompatible with the ideal gas assumption.) The most well-known effect of non-ideal collisions is line broadening, either by Stark broadening of resonance lines or by perturbing bound states on time scales shorter than the radiative lifetime that increase their uncertainty in energy and, hence, their line widths. A bound state may be so perturbed by collisions that the electron can no longer be assumed bound, and has effectively been removed from the parent ion causing a second plasma effect known as {\it pressure ionization}. \citet{1988ApJ...331..794H} introduced the occupation probability formalism to describe this process in an EOS based on the chemical picture by minimizing the Helmholtz free energy, which also makes partition functions converge. In the physical picture the activities of clusters of electrons and nuclei consist of bound and scattering state parts that together are convergent. No assertions about occupation probabilities need to be made, and non-Boltzmann level populations arise directly from the formulation. Regardless of the EOS framework employed, this effectively moves absorption from lines to the continuum (bound--bound to bound--free) as states pressure ionize \citep{1987ApJ...319..195D}. Such heavily perturbed states are often referred to as \emph{dissolved states}. A third plasma effect arises when electronic wave functions are perturbed resulting in shifts in energy levels and line positions, energy-level shifts being larger than line shifts since the former largely cancel in differencing. There have been persistent measurements of such shifts, most recently by \citet{hansen:DenseFeEshifts} for iron. Energy-level shifts (continuum lowering) are, however, much less than expected \citep{1988ApJ...331..794H} if that alone were responsible for bound states dissolving into the continuum \citep{rogers:BndStatesDebyePot}. A fourth plasma effect is that on the free energy, to first order described by \citet{Deb:Huck} theory, due to the screened Coulomb interactions. In the physical picture the Debye--H{\"u}ckel term is the sum of all ring diagrams in the activity expansion. This was the single most important stellar EOS improvement at the time, which brought solar models into agreement with a helioseismic analysis of the convection zone \citep[where opacities have no effect,][]{jcd-wd:osc-eos}. All four plasma effects must be operating simultaneously and connected by the probability distribution of the electric field strength from passing ions, the so-called {\it microfield distribution function} (the screened Coulomb coupling affects the microfield distribution). There are several choices of microfield distributions in the literature \citep[e.g.,][]{1999ApJ...526..451N, iglesias:MultiCompMicrofield, potekhin:microF, laulan:FastQbeta}, but little observational or experimental guidance as to which is preferred. Occupation probabilities also depend on the adopted critical field strength for ionization, which depends on the exact mechanism considered \citep{luc-koenig:StarkContinuum, 1988ApJ...331..794H}, adding an uncertainty that is convolved with the microfield distribution. The level shifts and occupation probabilities are usually considered mutually exclusive, and a consistent picture of the combined effects is needed. Considerable progress has been made towards this goal through the ChemEOS model \citep{chemeos1,chemeos2}, which is implemented in the ATOMIC code and used to make the recent OPLIB tables \citep{2016ApJ...817..116C}. ChemEOS minimizes the free energy in a chemical picture, and makes use of an occupation probability formalism that is generated from a microfield distribution \citep{potekhin:microF}. ChemEOS also goes beyond the Debye--H{\"u}ckel approximation in the Coulomb contribution to the free energy. Hydrogen lines in white-dwarf atmospheres provide strong observational tests of how lines dissolve into the continuum. For that purpose \citet{2009ApJ...696.1755T} analyzed Balmer spectra of 250 DA white dwarfs with non-convective (and therefore simpler) atmospheres. By employing occupation probabilities, they found greatly improved consistency among Balmer lines and masses in better agreement with stellar evolution. They also found that a more realistic implementation of the microfield distribution function \citep{1999ApJ...526..451N} is needed, rather than the approximation to the Holtsmark distribution originally used by \citet{1988ApJ...331..794H}. However, there is still some disagreement between white-dwarf masses determined from such spectra {\it vs.}~from photometry or gravitational redshifts. The asymmetric structure near the core of Stark-broadened lines is affected by the order of the multipole expansion used for describing the perturbing charges. \citet{2016PhRvA..94b2501G} investigated its convergence, and found profiles diverge from the dipole approximation with increasing perturber density (both cores and wings are affected). Line cores are more important for interpreting experiments or stellar spectra, but the wings will also affect opacity calculations in general. With a conserved total oscillator strength (from sum rules), redistributing absorption from line cores to the wings is an efficient way of increasing the harmonic mean of the Rosseland opacity. Experiments by \citet{2016PhRvA..94b2501G} and \citet{2017ASPC..509..149F} provide strong experimental constraints on our models, and efforts are underway to derive experimental occupation probabilities. \citet{2016JPhCS.717a2069M} notes that spectral line shapes in large-scale opacity calculations involving L- and M-shell transitions are modeled as Voigt profiles, whereby detailed ion broadening effects are ignored. But an ion's microfield distribution can lead to forbidden (field-dependent) line transitions and significant changes to the broadening and shape of line profiles. \citeauthor{2016JPhCS.717a2069M} presents Stark broadening calculations for line profiles of L-shell transitions, linking the ground state and singly excited states in Ne-like iron ions, at the plasma conditions \citet{2015Natur.517...56B} adopted for their Z-Facility experiment. It is clear that forbidden components and altered line profile shapes will affect the opacity through an absorption frequency redistribution. If not modified, the far red line wings of Lorentz-shaped resonance lines will dominate the opacity at low temperatures. \citet{1994MNRAS.266..805S} employed an ad-hoc $\lambda^{-4}$ suppression, but the line-wing satellites from H--H$^+$ collisions \citep{allard:quasi-H2-Lya} provide a more physical and exponential suppression. In doubly excited atoms/ions, excited spectator electrons can cause interference with an electron transition forming a line \citep{2010HEDP....6..318I} thus altering its line shape. This process is crucially affected by the microfield distribution suppressing the Rydberg series of spectator electrons. Therefore, the Fano profiles of autoionization lines must also be subject to broadening, but the theoretical basis for this is only now being established (Pradhan \& Nahar, this volume). The many kinds of transitions giving rise to spectral lines, combined with the many broadening agents, lead to a large variety of line shapes. For this reason simplified profiles will be inadequate in many cases, although we also need tractable calculations in light of the (at least) tens of millions of lines involved in opacity calculations. A key ingredient is therefore fast and efficient yet accurate algorithms \citep{sonnad:FastLineCalc}. It is clear that there are already many efforts to address these issues from stellar observations, ambitious laboratory experiments, and sophisticated quantum mechanical calculations. There is still much to be learned, and these efforts must therefore be encouraged. \section{Missing Opacity in the Sun} The issue of whether our current opacity calculations are accurate is critical for solving a major problem in our solar models: Why is the structure of solar models constructed with the latest solar abundances of AGSS09 so different from that of the Sun? The structure mismatch is most notable at the base of the convection zone, and results imply that the opacity at such conditions must be higher than that included in the models. Opacity has of course two components: first the intrinsic opacity at the density and temperature of a given chemical composition, and second, the heavy element composition. For a given intrinsic opacity, the total opacity increases with metallicity. Helioseismic data, which probe the structure of the Sun, are only sensitive to the sound speed and density in a given region and, hence, cannot distinguish between total and intrinsic opacities (see Basu, this volume). Models constructed with older, higher metallicities \citep{1998SSRv...85..161G} agree very well with the Sun. This has led to what is often described as the ``missing opacity in the Sun problem,'' but it is most likely to be a problem with total opacities. Sorting out whether the issue is with intrinsic opacities or metallicity is critical because solar metallicities are used as the standard for describing the metal content of other stars. Metallicity affects both the structure and evolution of a star: low-metallicity models evolve faster. Thus errors in solar metallicities will result in errors in the ages of astrophysical objects such as exoplanet hosts or stellar clusters. Increasing intrinsic opacities as tabulated by OPAL, OP, etc. could solve the problem with the structure mismatch of the solar models and the Sun. However, the increment must be for elements that contribute the most at the solar convection-zone base (O, Fe, Ne, and Si in that order), and it should taper off towards the solar core; an overall increase would merely move the problem to the solar core. Thus clearly there is more work to be done. Experiments by \citet{2015Natur.517...56B} are important pieces of the puzzle, and interpreting those results in terms of opacity calculations is central to resolving this and the other issues noted above. We currently know of two ways to test interior abundances independently of our knowledge of opacities. Firstly N, O, and F in the core produce neutrinos (as part of the CNO cycles) that have energy thresholds between those of the pp and B neutrinos \citep[see, for instance,][]{bahcall:newOP+AGS05}, which means their fluxes can be measured separately. However, current neutrino measurements have so far only produced upper limits to their fluxes \citep{bergstroem:SolarNeutrFlux2016}. Secondly the ionization effect on the adiabatic exponent, $\gamma_1$, leaves little seismic glitches in the convection zone where stratification is determined by $\gamma_1$. \citet{lin:seismEOS} found that groups of elements with similar ionization energies make bumps in $\gamma_1$ that can be marginally constrained with helioseismology, but individual metals are not accessible. \citet{vorontsov:SeismicEOScalibr} found support for generally lower metallicity from a similar helioseismic analysis. Despite much progress, state-of-the-art solar models \citep{2017ApJ...835..202V} still leave out processes that might be significant regarding the missing opacity in the Sun problem, most notably mixing mechanisms that can operate in the (nominally) convectively stable regions. This includes overshooting from convective regions \citep{rempel:OvershootModel} into Schwarzschild-stable regions; semi- or double-diffusive convection \citep{wood:SemiConvMixing}; internal gravity waves \citep{talon:Int-g-waves1}; the combined effects of gravitational settling; thermal diffusion and radiative levitation \citep{turcotte:Diff+RadLev}; and rotational mixing \citep{eggenberger:RotMixSunType}. Furthermore, stellar models do not in general include a non-grey stellar atmosphere \citep{trampedach:T-tau}. Many improvements can therefore still be applied to our solar modeling, but none of these is likely to solve the missing opacity in the Sun problem---not even in aggregate. Thus improving, or at least validating, opacity calculations and measurements will remain the key to solving the problem. Comparisons between recent and earlier opacity calculations show that element completeness leads to an increase of about 20\% for iron and nickel but a decrease for other elements. OPAS opacity tables \citep{2015ApJ...813L..42L}, for example, give an increased mean opacity at the base of the convection zone of 6\%, about a third of what is required to remove the disagreement with helioseismology. \section{Molecular Opacities} Molecules provide the dominant source of opacity in cool stars and brown dwarfs. The complexity of molecular spectra often leads to whole-scale blanketing of large spectral regions, which can have a profound effect on the properties of the atmosphere \citep{84ErGuJo.HCN}. Molecular opacities are dominated by bound--bound transitions, so are normally based on extensive line lists of molecular transitions. The construction of such line lists was in its infancy at the time of the Caracas WAO \citep{92Jorg}, but this situation has now changed radically (see papers by Bernath, Calvet, Huang et al., Morley, Sousa-Silva et al., Tennyson, and Zammit et al. in this volume). There are coordinated projects such as ExoMol \citep{2012MNRAS.425...21T} and TheoReTS \citep{TheoReTS} leading to systematic compilations of molecular line lists for modeling hot atmospheres \citep{HITEMP, TheoReTS, 2016JMoSp.327...73T}. Much of this work has been stimulated by the desire to model and characterize exoplanets. As for atomic opacities, issues to do with completeness against accuracy are important, with models showing that for key species (e.g., methane) correct results can only be obtained by considering many billions of transitions \citep{jt572}. Although there has been a lot of work constructing extensive and reliable molecular line lists and hence opacities, there remains much to be done. BT-Settl model atmospheres \citep{BT-Settl} are among the most used for cool stars and brown dwarfs. In their study of BT-Settl model atmospheres for M-dwarfs, \citet{13RaReAl.NaHAlH} identified only AlH, NaH, and CaOH as key species for which data were missing. The ExoMol project has since provided line lists for AlH \citep{jtAlH} and NaH \citep{jt605}, but CaOH remains outstanding. The chemistry of carbon stars is more complicated than oxygen rich ones, and data for a number of species are required to complete good models, foremost amongst these are acetylene (HCCH) and C$_3$. Transition metal containing diatomics such as TiO provide strong sources of opacities since these open-shell species often display strong electronic transitions in the near infrared and visible, i.e., near the stellar flux peak for cool stars. There are some line lists, of variable accuracy, available for key species such as TiO, VO, FeH, ScH, and TiH, but data are missing for many others such as CrH, NiH, ZrO, YO, and FeO, to name a few. Constructing accurate theoretical line lists for transition metal containing molecules remains very challenging \citep{jt623,jt632}. As mentioned, the field of exoplanets has been a major driver for extending studies of molecular opacities. \citet{1602.06305} recently published a review of laboratory data needs for work on exoplanet atmospheres that covers opacities and much else. In particular, the recent discovery of hot rocky exoplanets, also known as super-Earths and lava planets, has raised the need to study a whole range of new molecular species. The possible molecules important in the atmospheres of these bodies, which can reach temperatures over $3000\,{\rm K}$, have been reviewed by \citet{jt693}. \section{Opacity Due to Trans-Iron Elements} Scarcely a fortnight after the Kalamazoo WAO, the Laser Interferometer Gravitational-Wave Observatory (LIGO) detected gravitational waves from the binary neutron-star (NS) merger GW170817 \citep[LIGO Scientific Collaboration\footnote{\url{https://www.ligo.org/partners/}} \& Virgo Collaboration\footnote{\url{http://public.virgo-gw.eu/the-virgo-collaboration/}}] {2017ApJ...850L..39A,2017ApJ...850L..40A}. This discovery was jointly observed in the UV, optical, and IR with various ground- and space-based instruments \citep{2017ApJ...848L..19C, 2017ApJ...848L..17C, 2017ApJ...848L..27T} opening a new era in astronomy in which atomic opacities are again at the forefront of cutting-edge science. Electromagnetic signatures accompanying NS mergers, referred to as macronovae \citep{2005astro.ph.10256K} or kilonovae \citep{2010MNRAS.406.2650M}, are roughly isotropic thermal transients powered by radioactive decay of r-process elements synthesized in the ejecta \citep{1998ApJ...507L..59L, 2011ApJ...736L..21R, 2012ApJ...746...48M, 2013ApJ...775..113T}. Furthermore, NS mergers are now believed to be the main site for rapid neutron-capture nucleosynthesis of heavy elements in the universe. In such NS merger events the time scale and shape of the transient light curve is determined by the fireball opacity. It is believed that lanthanides and, additionally, actinides can enhance the opacity by nearly two orders of magnitude over that of typical iron-rich supernova ejecta \citep{2013ApJ...774...25K,2013ApJ...775..113T}, resulting in a hotter, denser, and longer lived fireball. While a start has been made \citep{2015HEDP...16...53F, fon17} in providing opacity tables to characterize this type of heavy-element rich expanding ejecta, most kilonova models assume grey (wavelength independent) opacities. In modeling the light curve of GW170817, \citet{2017ApJ...848L..17C} tested opacity values between $\kappa = 0.1$~cm$^2$\,g$^{-1}$ (appropriate for Fe-peak ejecta) and $\kappa = 10$~cm$^{2}$\,g$^{-1}$ \citep[estimated for lanthanide-rich ejecta by][]{2013ApJ...774...25K}. They found that at least two independent components had to be used to roughly reproduce the UV/optical/IR light curves, and these would need an opacity of $\kappa\simeq 3$~cm$^{2}$\,g$^{-1}$. Many more events like GW170817 will be observed in the near future. These observations may allow us to answer a number of fundamental questions in the physics and astronomy pertaining to cosmology, chemical evolution, nuclear physics, and fundamental atomic physics, to name just a few. Study of GW170817 and other anticipated NS merger observations points out the compelling need to compute reliable opacities for heavy-element rich plasmas. Many of the methods and codes developed for stellar opacities over the last few decades (for example, TOPAZ, \citealp{2003JQSRT..81..227I}) can become the basis for such work, but additional theoretical and numerical methods are needed in some cases to deal with the fully relativistic and very complex atomic structure of such heavy elements. Heavy elements can have several bound electrons in excited states, all with non-negligible abundances due to a large configuration degeneracy not significantly reduced by Boltzmann factors. The {\it super-transition array} (STA) formalism \citep[and references therein]{1995PhRvE..52.6686B,1997PhRvE..56...70B} was developed to address the challenge presented by several bound electrons in excited states. While the STA method is used by several research groups, hybrid calculations combining detailed line accounting with STA are also being performed \citep[and references therein]{2015HEDP...15...30P, 2007HEDP....3..109H}. \citet{igl95} and \citet{2016ApJ...821...45K} have studied the impact of heavy elements on stellar opacities using the STA formalism, and find that, while these elements are effective photon absorbers, their low abundances lead to an insignificant contribution to the solar interior Rosseland mean opacities. Finally, methods developed for astrophysical opacities can also be used when atomic data are required for other applications. For example, the atomic data requirements for accurate modeling of tungsten plasmas that may contaminate current and planned magnetic fusion devices are formidable \citep{tungsten1}, and the problem is also greatly complicated by the non-LTE nature of fusion plasmas \citep{tungsten2}. Also accurate atomic data are required to model tin plasmas generated for EUV lithography applications \citep{2017HEDP...23..133C}. Depending on how these laser-produced plasmas are generated, resulting plasmas may or may not be in LTE. \section{General Findings} With regard to the future improvement of astrophysical opacities, in our opinion efforts need to take into account the following considerations: \begin{itemize} \item There are still significant differences between OP, OPAL, and more recent opacity tables \citep{2012ApJ...745...10B, 2016ApJ...817..116C, 2016ApJ...821...45K}. Much theoretical work needs to be carried out to diagnose such discrepancies, find any sources of missing opacity, and understand differences with the \citet{2015Natur.517...56B} experiment. \citet{mor17} calculated two-photon opacity including interference between classes of excited states, angular factors, atomic matrix elements, and density effects. While further investigation is needed before agreement with experiment can be claimed, they obtain substantial cross sections corresponding to an extra opacity comparable with the \citeauthor{2015Natur.517...56B} measurement. However, \citet{2018HEDP...26...23P} suggests that, due to the intensity of photon sources in the \citeauthor{2015Natur.517...56B} experiment, it is unlikely that two-photon processes would play an important role. \item Current computing power and the scale of the physics problem mandate trade-offs between completeness, detail, and accuracy in opacity calculations. We need to include a sufficient number of relevant processes and transitions in sufficient detail to adequately render radiative transfer in stars. The hope has been that, by summing up approximate cross sections over billions of bound--bound and bound--free transitions, we would end up with opacities somewhat more accurate than individual rates. This argument is only valid if all uncertainties are random and uncorrelated, but fails in the case of systematic errors. It is consequently important to identify and assess the importance of any systematic errors in atomic data whether measured or calculated. \item Early comparisons of calculated and experimental opacities under astrophysical conditions were made by \citet{1988ApPhL..52..847D} and \citet{1991PhRvL..67.3784P}; these and subsequent experiments were complemented by measurements of individual cross sections \citep{kjeldsen:Lab-bf-Review}. The experimental opacity determination at the Z-Facility by \citet{2015Natur.517...56B} marked an important advance in so far as conditions close to those at the base of solar convection zone were attained; the finding of an unexpectedly large iron opacity when compared with all existing theoretical tables has reignited the study of atomic opacities. Experimental reproducibility is of course also a key issue, and there is a great deal of expectation in independent approaches based on the NIF, (Perry et al., this volume) and on a double ablation front scheme (Cola\"itis et al., this volume). \item Further work is needed in the study of improved EOS for opacities, particularly in the high-density regime. At present the OP, OPAL, and OPLIB opacities use significantly different EOS formulations. This complicates detailed comparisons and the possibility of finding an exact source for the differences among opacity data sets. (See the paper by Kilcrease et al. in this volume.) \item In current research on extrasolar planetary atmospheres and planetary formation, forthcoming astronomical observatories, namely, JWST \citep{2006SSRv..123..485G} and ARIEL \citep{2016SPIE.9904E..1XT}, create an enormous need for molecular opacities. Molecules of interest range from simple diatomic systems to organic complexes that can trace the presence of life. These systems yield billions of bound--bound transitions taking significant resources to compute in full detail. Results of these detailed calculations and extensive measurements are available in ever growing databases such as ExoMol \citep{2016JMoSp.327...73T} and HITRAN \citep{jt691s}. (See also papers by Bernath, Calvet, Huang et al., Morley, Tennyson, and Zammit et al. in this volume.) \item While a lot of such detailed molecular data are currently available, the complex chemistry of rocky planets with temperatures ${\sim}300\,{\rm K}$ and a potential for life adds thousands of possible molecules for which this brute-force approach is rendered all but impossible. This has led to the development of alternative methods that are more efficient at producing approximate data (Sousa-Silva et al., this volume). \item One of the topics that we had in mind but were not able to cover in the WAO was the computation and measurement of dust opacities. Morley (this volume) shows that the modeling of brown-dwarf and planetary atmospheres to interpret observations must consider the opacity from aerosols (clouds and hazes), for which the optical constants of many compounds (ices, salts, and rocks) are not generally available. The increased wavelength range of the JWST will certainly inflate in the near future the demand for molecular line lists and aerosol opacities. Furthermore, Calvet (this volume) discusses the role dust plays in the structure of protoplanetary disks, as the variations of elemental composition are determined by dust structures (e.g., condensation fronts), and dust opacity depends on grain size distributions that are subject to settling and trapping effects. We would therefore encourage the organizers of the next WAO to promote this important opacity field. \end{itemize} \section{How to Move Forward} There are a number of steps in computing opacities for astrophysical entities, and each of these steps needs to be reassessed as part of the refinement of opacity calculations: \begin{itemize} \item[a)] We need detailed, accurate, and complete energy levels, line positions, $f$-values, and cross sections for each process and by each absorber of significance: atoms/ ions/anions/molecules and temporary dipoles formed in collisions. This is the opacity issue that has received the most attention, and great strides have been made as summarized above. In this respect quality assessments and benchmarks of atomic and molecular line data (e.g., the BRASS database project by Lobel et al. and Laverick et al., this volume) by detailed and extensive comparisons of theoretical and observed stellar spectra are essential. Plasma environment effects on close-coupling methods need investigation as a minimum requirement for accuracy to be assured. See also the {\it Opacity Wish List} (Trampedach, this volume) for new entries needed by the modeling community. \item[b)] We expect that the steady increase in computing power will ease the current compromise between accuracy and completeness, allowing both to be exhaustively addressed in future opacity calculations. \item[c)] A reliable EOS is needed that includes quantum and relativistic effects, higher order Coulomb interactions (beyond Debye--H{\"u}ckel), realistic microfield distributions and partition functions, and preferably, also molecules beyond H$_2$. Internal consistency results from the physical picture, but this has to be carefully crafted in the chemical picture. The fundamental unsolved problem is the description of atomic states in a plasma environment, which is expected to become increasingly critical at the higher densities. \item[d)] The EOS should also provide parameters for line broadening to establish a robust and accurate formulation of line profiles (consistent with the EOS), including Stark and pressure broadening, broadened Fano profiles, and interference by spectator electrons, etc. The importance of line broadening through the Stark effect, as mentioned in Section~3, becomes increasingly important as density increases; line absorption is then redistributed in frequency changing any resulting Rosseland mean opacity depending on them. It is therefore essential to take into account Stark broadening for all relevant L- and M-shell lines. The wavelength range selected must ensure a complete line profile is included in every case. \item[e)] We encourage quantum mechanical calculations of the continuum absorption by normal (in the zero-density limit) bound states that are wholly or partially ``dissolved'' by the plasma environment (via the occupation probabilities of the EOS in the chemical picture). Conservation of oscillator strength needs to be ensured. \item[f)] Proper accounting of plasma oscillations (through their effect on the dielectric constant) is desirable in the presence of electron degeneracy, screening, and Coulomb interactions. \citet{2018PhPl...25c2106S}, for example, derive the linear dispersion relation for electromagnetic waves in ultra-relativistic plasmas with arbitrary electron degeneracy that is applicable to white dwarfs and neutron stars. \item[g)] Robust and accurate integration of the Rosseland mean including the choice of wavelength or energy scale has often been overlooked. This needs to be done in a way that results in a Rosseland mean that is continuously differentiable in temperature, density, and composition and accompanied by a fast and robust scheme for interpolation in those variables. \item[h)] We firmly call for continued helioseismological work to help us constrain the solar chemical composition, the EOS, and atomic radiative data at solar interior conditions. Also further studies of pulsating stars, chemically peculiar stars, and asteroseismology, the latter particularly in the context of hybrid pulsators that have proven to be helpful in elucidating opacity shortcomings. \item[i)] As the discrepancies between the measurements by \citet{2015Natur.517...56B} and all current theoretical opacities remain unexplained, experimentalists are encouraged to continue the analysis of their setups and possible sources of uncertainty \citep{2016PhRvE..93b3202N, 2016HEDP...20...17N, 2017PhRvE..95f3206N}. This compels our community to make a plea for prioritizing experimental work such as that planned at the NIF \citep[][Perry et al., this volume]{2017JPlPh..83a5903H,2017HEDP...23..223P} and LMJ-PETAL \citep{2015HEDP...17..162L} to determine opacities under a wide range of conditions and independently verify these first experiments, as well as expand their scope. \end{itemize} The various indications of problems with current opacity tables as a result of experiments, helioseismology, and asteroseismology surely means the issue impacts a much larger region of stellar structure and evolution, but has hitherto been masked by other uncertainties and not been recognized for what it is. Solving these known opacity problems therefore has the potential to have wide-felt repercussions for astrophysics in general, and of a similar magnitude as the revolution in atomic physics applied to stars as we witnessed around the first WAO. \acknowledgements The authors are indebted to Dr. C. A. Iglesias (LLNL) for comments on two drafts of this paper that led to substantial improvements and to Dr. P. Hakel (LANL) for useful suggestions. One of us (AEL-G) is grateful to the University of Oxford for a travel and subsistence allowance that made attendance at the second WAO possible.
{ "redpajama_set_name": "RedPajamaArXiv" }
7,388
Q: Overriding Equals For Self Referencing Parent Child Objects I have a Class which holds Parent-Child relationship among its objects using self referencing public class MyObject { private String name; private List<MyObject> childrens; private MyObject parent; @Override public int hashCode() { int hash = 5; hash = 13 * hash + Objects.hashCode(this.name); hash = 13 * hash + Objects.hashCode(this.childrens); hash = 13 * hash + Objects.hashCode(this.parent); return hash; } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } final MyObject other = (MyObject) obj; if(!this.name.equalsIgnoreCase(other.name)) return false; if(this.parent==null) { if(other.parent!=null) { return false; } }else if(!this.parent.equals(other.parent)) { return false; } if(!allChildrenEqual(this,other)) { return false; } return true; } private boolean allChildrenEqual(MyObject aThis, MyObject other) { List<MyObject> children1=aThis.getChildrens(); List<MyObject> children2=other.getChildrens(); //Avoid infinite recusion can't use anything simple as // list1 equals other list return true; } I want to know how can I override equals properly which compares hierarchy of these objects without going into infinite recursion and causing StackOverflowError. I have a feeling I am missing something really obvious. Thanks in advance. A: Create an equalsNoParentCheck() method, then use that in the allChildrenEqual() method, and change the equals() method to use equalsNoParentCheck() and to perform the parent check. Also, you have the same recursion issue in the hashCode() method. Just remove parent from the hashcode algorithm. Hashcodes don't have to be unique, just use best effort. It's unlikely that including parent would make much difference, overall, anyway. Since you're using Objects.hashCode(), you might as well use Objects.hash(): @Override public int hashCode() { return Objects.hash(this.name, this.childrens); // Don't hash this.parent } A: I think that just putting the "base case of the recursive equivalence test", that is, if (object == this) return true; at the first line of the equals() definition will prevent infinite recursion.
{ "redpajama_set_name": "RedPajamaStackExchange" }
8,033
7th Annual Pop Fiction Writing Competition Winner: Romance Here's the winning entry for the Romance category for the 7th Annual Writer's Digest Popular Fiction Writing competition by lifestyle columnist and grant processor Karin Fuller. Marielle Murphy FEAR OF not FLYING I held the square of coated paper between my fingers, image side down. Was tempted to look again. Knew it wouldn't be wise. A picture of Rick and another woman wouldn't have surprised me. I'd seen it before. Even Rick and another man wouldn't have been such a stretch. But this? This caught me completely off guard. I hadn't expected to cry. Thought I was years past that kind of reaction. I'd become a master pretender, skilled at deluding myself and diverting the kids and staying so busy there wasn't energy left for thought. Rick had apparently ceased feeling a need for discretion a mistress or two back. Once he realized I knew and did nothing, he'd almost seemed challenged to do more—see how much I'd take. I took quite a bit, and didn't even know why. But a baby? That changed the game. Why it hurt so much, I'm not sure. But it did. Still carrying the ultrasound in one hand, I clipped Penny to her leash and stumbled down the lake path to a private bench not far from the flight park. The tears had waited until I'd sat, then they rolled up through me and out in tight choked sobs and hiccup-sounding barks. Ugly noises. Embarrassing. And then suddenly, there was something in my lap. Something heavier than Penny, my arthritic dachshund. Hard to see through the blur of tears, but it was large and white and—for shit's sake! A goose. It had apparently heard my distress and come waddling, then launched onto my lap. Her head was pressed to my chest, almost as though she was listening to my heart. I realized the goose had only one wing. The other appeared to have been severed close to her body, or perhaps never there at all. The place it would've been was neatly covered with feathers. Too stunned to keep crying, I mopped away my tears and was marveling at the goose—a goose wearing not one, but three hot pink bangles around her neck—when I heard a man's voice. "Well, I'll be damned. You've gone and stolen my bird." The goose, my dog, and I all looked up at the same time. I swear that bird looked guilty. She hopped down from my lap and rushed to his side, wrap- rubbed herself around his legs like a cat, honking what I guessed to be an apology. The man was thick-chested, white-haired, and bearded. Like a lumberjack Santa, complete with twinkling eyes and grey flannel shirt. But more handsome than jolly. He reached down and stroked the side of the goose's neck. She honked softly again, then made clicking sounds with her beak. "She doesn't usually leave me," he said. "Especially here. Can't risk missing her time in the sky." I wanted to ask what he meant by time in the sky, but was afraid my voice hadn't recovered enough yet to speak. My throat closes when I get upset, tightening until it becomes hard to speak or if words do come out, the tears in them are obvious. I didn't want this stranger to hear me that way. I took several deep breaths while he tended his goose, giving her Cheerios from his pocket. My dog apparently decided the excitement was over and let out an exhausted-sounding groan, then collapsed on my feet. "You okay?" he asked. I nodded, tried to smile. A sniffle escaped. "What's her name?" I managed to ask. "Fancy Nancy." She honked at the sound of her name. "Nance to her friends. She doesn't often like women. Surprised me to see her take to you that way. She's a jealous sort, this girl of mine." He gestured toward my bench. "Mind if I sit?" I dropped my purse to the ground next to Penny. She made it her pillow instead of my feet. He sat at the far end and Nancy hopped in between us, using her one wing for balance. "Was she born that way?" I asked. He shrugged. "Been that way as long as I've known her." I ran my hand across the smooth feathers on her back several times. She bent her graceful neck and gently nibbled the skin of my forearm in return. "What do you know?" He laughed. "She never kisses anyone but me." "It's a first for me, too," I said. "I've had my share of dog kisses, but never a goose." Thing was—I realized I wasn't looking at Nancy when I spoke, but at the man's mouth. This strangely random thought—Wonder if his whiskers would tickle?—flashed through my head. Surprised, I blinked hard. Looked away. I'm losing my flipping mind. Feeling rattled, I sat quietly with the lumberjack Santa and his goose, she alternating her attentions from him to me and then back. It was a relief to have my thoughts distracted away from Rick and his pregnant mistress and our— also newly discovered—mountain of debt. "I manage the grounds at Berry Hills," the man said. "South Hills golf course. You know the place?" "Even the geese there are snobs," he said. "They were picking on Nancy something awful, so I brought her home." I smiled. He had such a nice face. It was well-lined, but the lines appeared caused by a lifetime of smiling. He had kind, happy eyes. A nice mouth. "She'd go to work with me every day, ride in my cart," he said. "Followed me around like a pup. Seemed content enough until fall rolled around and the other geese headed south. I watched her watching them fly. . . ." He was quiet for a moment. "She'd never seemed to mind not being able to fly until then," he said. "It hurt to see her that way." He gestured toward the sign for the flight park, where adventurous types went to hang glide and parasail. "That's why we come here." I laughed. "You take her flying?" "Every Sunday," he said. Nancy was looking at him adoringly, as if she understood every word. "We do the kind where the glider is pulled behind a truck and takes off from there," he said. "I made Nance her own harness. We get about ten minutes of airtime. Not much, but it makes it makes my girl happy, doesn't it?" Nancy flapped her one wing. "Once she got a taste of flying, there was no turning back. You should try it sometime." There was a gentleness to his manner of speaking that touched me, as if he sensed my sadness and was making an effort to distract me away. But there was something else, too. I liked how he looked at me. It made me feel strangely dizzy. "I'm afraid of heights," I said. "Wasn't fond of them myself," he said. "If it wasn't for her, I never would've tried. Sure, it's scary. But you just push yourself through." He stroked Nancy's head, "When you're in love, you do crazy things." "Some of us do crazy things when we aren't," I said, and then instantly wished I could take the words back. I didn't want that personal of a conversation. I saw him glance at my wedding ring. Was grateful he chose not to speak. Nancy stood then and hopped off the bench, waggled her back side dramatically and honked. "Figured that was coming," he said. "Such a nag, that one. If I don't jump when she commands, it'll get ugly." He extended his hand and I took it. It was strong, I noticed. Rough and calloused. A man's hand. Nothing like Rick's. "Name's Layne, by the way," he said. "First or last?" "Both," he said. "My parents were strange." I decided to stay and watch them fly, partly because I was so thoroughly charmed, but also because I wasn't yet ready to face Rick at home. This had been pleasant. That, likely not. When they finished and Nancy was unstrapped, she raced toward me, honking so constantly it was as if she was telling every detail. I sat cross-legged on the grass with a happily chattering goose in my lap. Layne dropped down beside me. Just when we'd think she was finished, she'd remember something else and blurt it all out. After she settled, Layne shared a few stories, telling about his years in the military, his time overseas, his summers as a whitewater guide. "Tell me about you," he said. "What kind of nonsense have you gotten into over the years?" I thought of stories I could tell him. Streaking in college. Skinny dipping with friends. Blowing the whistle on an embezzling boss. The time I busted the window of a new Lexus with a brick to rescue a dog. Except . . . my stories weren't mine. For years, I'd told them as if they were, but they weren't. It had been my friends who'd gone streaking and skinny dipping, who'd left parties with boys they barely knew and kissed strangers in elevators and did the kind of things that make people smile to remember. But me? I hadn't even ratted out my swindling boss. He'd left on his own, undetected. And I'd only held the brick in my hand while I stood by that Lexus; imagining so vividly how it would feel to break through the glass that it was almost as if I'd actually done it. Instead, I'd just stood there, feeding squares of ice from my cup to the dog inside that hot car until its owner returned. I'd spent much of my life on the high dive, but never once jumped. With Nancy napping on the grass in between us, I shared this with Layne, and then told him about Rick's infidelities, his pregnant mistress, the debts he'd amassed under our name. "There are worse things than being alone," Layne said. I knew he was right, but change that drastic terrifies me. What I had with Rick wasn't good, but it still seemed preferable to the unknown. "Come flying with us next Sunday," Layne said. As if that was the answer. I shook my head, suddenly unable to speak. My throat had tightened. Clamped in my words. Layne took my hand. Squeezed it. His touch set a thousand bees to buzzing inside me, and he seemed completely aware (and thoroughly pleased) with the reaction he'd caused. "When you're ready," he said. After they left, I stayed for a while, enjoying a strange sort of peace—the absolute last feeling I'd expected to experience. There are worse things than being alone, Layne had said. You just push yourself through. I felt ready to push. I didn't crumple the picture. Just put it back where I'd found it—tossed casually on Rick's dresser, with his keys and spare change. I looked around our bedroom. Oversized furniture. Dull colors. All had been Rick's choices. He wanted a manly bedroom, no flowers or frills. I'd acquiesced because I didn't like conflict, but in spite of the many years I'd slept here, I never felt it was mine. I walked through the rooms of our house, seeing it with fresh eyes. I had to work to find traces of me. The kids were there, as was Rick. Even Penny. But somewhere along the way, I'd stopped leaving my mark, so caught up in them that I forgot about me. It must've happened gradually, much the same way Rick's arrogance had increased and his character diminished over the years. He'd become someone I no longer respected, didn't even much like. And now that the last of our children had gone off to college, it wasn't them that tethered us together. Our nest had emptied. Maybe that was all the inspiration Rick needed to fertilize a new egg somewhere else. The image of Layne and his goose came to mind then and I smiled. I knew a bit about geese. Knew they mated for life. They were fiercely loyal creatures, protective of those they loved. That my husband couldn't compete with the moral fiber of a goose did not escape me. So why did I stay? Was I that severe a creature of habit? I thought again about Layne. The way he'd looked at me. How those looks made me feel. I'd been around him such a short time, but felt almost infected with his brand of alive. I picked up the square of coated paper, stared hard at the image. That perfect little profile, so much more detailed than the ultrasounds I'd had so many years back. It's humiliating that it took something so drastic for my backbone to finally be pushed into place, but once it was set, there was no bending it back. The days that followed were frenzied. So many appointments. It was hard to believe an area as small as ours would have so many divorce attorneys, but I had a consultation with darn near every one before I officially hired The Shark. There'd never been any doubt that I'd hire The Shark, but I met with all those other lawyers so Rick wouldn't be able to hire them. It would be a conflict of interest since they'd already consulted with me. I considered some sort of creative revenge—something of legendary proportions, that people would hear about and forward to each other by email, that Snopes would be checked to debunk. Then I decided the best revenge would be to simply step aside and let them be together. They deserved one another. Funny how a goose led me to a shark to get rid of an ass. On Sunday, I returned to my bench just outside the flight park, hoping to again see Nancy and Layne. I was not disappointed. Nancy recognized me from a distance; came honking toward me with her single wing flapping in a wobbly circle. Around her neck, she wore pearls and a wrist watch. Layne smiled. Looked at me in that way. I opened my cooler and removed some torn up spinach and fresh peas that I'd brought for Nancy. She gobbled them greedily, pausing only long enough to snatch the crust from Layne's sandwich. "I've tried teaching her manners," said Layne. "Can't get her to use a napkin either." We talked easily as we ate our sandwiches and drank our sweet tea. I'd spent years with an unsweetened tea drinker, but somehow intuited that Layne would like sweet. Maybe it was the slight Santa-ness about his appearance that attached him and sugar, but I hadn't been wrong. He emptied his cup with pleasure apparent, and then asked for more. His hand lingered on mine as I refilled his cup. "I've been pushing myself through," I told him. "It's been uncomfortable, but not as hard as I'd thought." I explained how every new step started with me scared beyond reason, but somewhere along the way, I realized it wasn't just fear. I felt something else, too. I felt alive. Layne smiled. And then, that very same afternoon, he took me flying. Without ever once leaving the ground. popular fiction competition
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
7,888
Il Chonburi Football Club è una società calcistica thailandese con sede nella città di Chonburi fondata nel 1997. Palmarès Calcio Competizioni nazionali 2007 2010, 2016 2007, 2009, 2011, 2012 Altri piazzamenti Secondo posto: 2008, 2009, 2011, 2012, 2014 Terzo posto: 2010, 2013 Finalista: 2014, 2020-2021 Semifinalista: 2008, 2009, 2010 Semifinalista: 2012 Organico Rosa 2021 Rosa 2020 Aggiornata al 4 febbraio 2020. Calcio a 5 Palmarès Competizioni nazionali Campionato thailandese: 8 2006, 2009, 2010, 2011-12, 2012-13, 2014, 2015, 2016, 2017 Coppa della Thailandia: 4 2010, 2011-12, 2014, 2015 Competizioni internazionali AFC Futsal Club Championship: 2 2013, 2017 Collegamenti esterni Chonburi F.C.
{ "redpajama_set_name": "RedPajamaWikipedia" }
6,978
{"url":"https:\/\/mathoverflow.net\/questions\/200210\/injective-inclusion-map-from-rkhs-function-space-to-l-p-mu","text":"# Injective inclusion map from RKHS function space to $L_p(\\mu)$\n\nLet $X$ be a measurable space, $\\mu$ be a $\\sigma$-finite measure on $X$, and $H$ be a separable reproducing kernel Hilbert space over $X$ with a measurable kernel $k$.\n\nAt a certain part in a proof I am reading there is the condition\n\n$S_k : L_q (\\mu) \\to H$ has a dense image if and only if $id: H \\to L_p(\\mu)$ is injective.\n\nWe know (it is shown in the proof) $id$ is the inclusion operator and is continuous. $S_k$ is the adjoint of the inclusion operator. And $H$ consists if $p$-integrable functions.\n\nMy question is how can the inclusion mapping $id$ always be injective? $H$ is a Hilbert space of function and $L_p(\\mu)$ is a space of equivalence classes of functions. I am really confused on the intuition here.\n\nConsider the following:\n\n\u2022 I take the function $f \\in H$. I know $f$ belongs to some element of $L_p$ and is measurable. Thus the inclusion makes sense.\n\u2022 The inclusion maps $f$ to its equivalence class $[f]$\n\u2022 Say I take a measurable $p$-integrable function $g$ which equals $f$ almost everywhere. (I am obviously assuming such a function exists in $H$).\n\u2022 We see the inclusion maps $g$ to $[f]$.\n\u2022 Therefore the inclusion map can't be injective\n\nI could see that this can fail if no such function $g$ exists in $H$. (For example if the RKHS $H$ has a continuous kernel then every $f \\in H$ is continuous and the $g$ in question could not be found.) But I have no reason to believe that such a $g$ can't exist. I am obviously suffering from a knowledge gap. Where am I going wrong in how I am thinking about this.\n\nPS: The proof can be found on page 126 here: here\n\n\u2022 Haven't you answered your own question? If, for instance, all the functions in $H$ are continuous, the inclusion map will be injective. For a concrete example, take $X = [0,1]$ and let $H = H^1([0,1])$ be the Hilbert space of all absolutely continuous $f : [0,1] \\to \\mathbb{R}$ having $f' \\in L^2([0,1],m)$, equipped with the inner product $\\langle f,g\\rangle_H = \\int_0^1 (fg + f'g')\\,dm$. Mar 17 '15 at 5:35\n\u2022 So I guess I am confused as to what you are really asking. Certainly the inclusion map can be injective. There doesn't seem to be any assertion that it is always injective. Mar 17 '15 at 5:37\n\u2022 @NateEldredge The assertion is the the inclusion map is always injective. If the kernel is continuous (which means all functions in $H$ are continuous, then yes we have injectivity.) If the kernel is * measurable* then we can only say that each $f \\in H$ is measurable.\n\u2013\u00a0Eric\nMar 17 '15 at 5:41\n\u2022 I don't see that assertion in what you've written; is it elsewhere in the text? The line you quote after \"At a certain point\" is an if and only if statement. It doesn't assert that the map is injective. If there is an assertion, somewhere, that the inclusion map is always injective, then it must follow from other conditions you have not mentioned. Mar 17 '15 at 5:44\n\u2022 @NateEldredge, thanks for clarifying! I wasn't familiar with this class of spaces. Mar 17 '15 at 5:44\n\nAs an incredibly trivial example, let $H = \\mathbb{R}$ with its usual inner product, considered as the RKHS of real-valued functions on a one-point set $X = \\{x\\}$. (The reproducing kernel is $k(x,x) = 1$.) If $\\mu$ is a measure on $X$ assigning positive mass to $x$, then the inclusion $H \\to L^2(X,\\mu)$ is injective. If $\\nu$ is the zero measure, then the inclusion $H \\to L^2(X,\\nu)$ is not injective.","date":"2022-01-24 14:23:20","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.9464665651321411, \"perplexity\": 119.25068917655743}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.3, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2022-05\/segments\/1642320304570.90\/warc\/CC-MAIN-20220124124654-20220124154654-00530.warc.gz\"}"}
null
null
Q: php function and mysql insert I'm a real beginner with php and I really need help. Here is my code for my first file where I need to call an insertnewmarker function from my second file. I really dont know how and where to call the function. (the code of the second file is below) I need to insert $name in database 1st file code: require_once('includes/php.config.db.php'); function curl($url){ $ch = curl_init(); curl_setopt($ch, CURLOPT_URL, $url); curl_setopt($ch, CURLOPT_ENCODING, "gzip"); curl_setopt($ch, CURLOPT_RETURNTRANSFER, 1); $data = curl_exec($ch); curl_close($ch); return $data; } libxml_use_internal_errors(true); $dom = new DOMDocument(); @$dom->loadHTMLFile('http://www.kupime.com/aktivne-ponude/'); $xpath = new DOMXPath($dom); $entries = $xpath->query("//ul[@id='list_offers']//table//a/@href"); $output = array(); $i = 1; foreach($entries as $e) { $dom2 = new DOMDocument(); @$dom2->loadHTMLFile('http://www.kupime.com' . $e->textContent); $xpath2 = new DOMXPath($dom2); $data = array(); $data['link']= ('http://www.kupime.com' . $e->textContent); $data['naslov'] = trim($xpath2->query("//h1[@id='offer_title']")->item(0)->textContent); $data['opis'] = trim($xpath2->query("//div[@id='company_info']")->item(0)->textContent); $data['slika'] = trim($xpath2->query("//div[@id='img_border']/img/@src")->item(0)->textContent); $location = $xpath2->query("//div[@id='location']//script")->item(1)->textContent; preg_match('/var\s+lat\s+=\s+(\d+\.\d+)\s*;/', $location, $lat); preg_match('/var\s+lang\s+=\s+(\d+\.\d+)\s*;/', $location, $lng); $data['lat'] = $lat[1]; $data['lng'] = $lng[1]; $data['popust'] = trim($xpath2->query("//li[@class='discount']")->item(0)->textContent); $data['firma'] = trim($xpath2->query("//div[@id='company_info']/h3")->item(0)->textContent); $data['telefon'] = trim($xpath2->query("//div[@id='company_info']/p[2]")->item(0)->textContent); $data['email'] = trim($xpath2->query("//div[@id='company_info']/p[3]/a[1]/@href")->item(0)->textContent); $data['sajt'] = trim($xpath2->query("//div[@id='company_info']/p[3]/a[2]/@href")->item(0)->textContent); //vreme $data['sat'] = trim($xpath2->query("//div[@class='countdown hasCountdown']/strong[1]")->item(0)->textContent); $data['minut'] = trim($xpath2->query("//div[@class='countdown hasCountdown']/strong[2]")->item(0)->textContent); $data['sekund'] = trim($xpath2->query("//div[@class='countdown hasCountdown']/strong[3]")->item(0)->textContent); $data['vreme'] = time() + $data['sekund']+ $data['minut']*60 + $data['sat']*3600; $image_url = 'http://www.kupime.com'.$data['slika']; $naslov = $data['naslov']; $latitude = $data['lat']; $longitude = $data['lng']; $latitude = (float) $latitude; $lin = $data['link']; $vreme = $data['vreme']; $popust = str_replace ('POPUST','',$data['popust']); $firma = $data['firma']; $telefon = $data['telefon']; $email = $data['email']; $sajt = $data['sajt']; $type = 'bar'; $latitude = (float) $latitude; $longitude = (float) $longitude; $output[] = $data; } ?> </body> </html> Second file code: <?php error_reporting(true); $link = mysql_connect('localhost', 'user', 'pass') or die('<p>Connection imposible!</p>'); mysql_select_db('tablename', $link); mysql_query("SET NAMES utf8"); mysql_query("SET CHARACTER SET utf8"); mysql_query("SET COLLATION_CONNECTION='utf8_general_ci'"); function insertNewMarker($name='', $lat='', $lng='', $slika='', $link='', $type='', $popust='', $vaziDo='') { if(!empty($name) and !empty($lat) and !empty($lng) and !empty($link)) { $name = mysql_real_escape_string(trim($name)); $sql = "INSERT INTO `markers` (`name`, `lat`, `lng`, `address`, `totolink`, `type`, `popust`, `vaziDo`) VALUES ('$name', '$lat', '$lng', '$slika', '$link', '$type', '$popust', '$vaziDo')"; mysql_query($sql) or print "<p>Error in SQL Statement ($sql):<br />". mysql_error() .'</p>'; } else { print "<p>Polja 'name', 'lat' i 'lng' can't be empty!<br />Vi ste uneli <br />Name:$name<br />Lat:$lat<br />Lng:$lng</p>"; } } ?> A: [1 file] after: $longitude = (float) $longitude; [1 file] add: include('second_file.php'); insertNewMarker($name, $lat, $lng, $slika, $link, $type, $popust, $vaziDo); and make sure function arguments $name, $lat, $lng, $slika, $link, $type, $popust, $vaziDo are assigned - i can't help you in this, cause i dont understand your language, anyways it's your job :)
{ "redpajama_set_name": "RedPajamaStackExchange" }
2,227
{"url":"https:\/\/www.jaquishbiomedical.com\/support\/x3-specs\/latex-allergy\/","text":"# Can I use X3 despite a latex allergy?\n\nWe do have people with latex allergies that use the X3 but they usually take some precautions such as, wearing a longer sleeved shirt, wearing gloves, and making sure to wash after your workout to prevent the latex proteins from irritating the skin.\n\nThe bands are made using natural latex, so if you have an extreme allergy to latex, these bands may not be right for you. We have been looking into non latex bands and have been unable to find any that can provide the resistance and durability to be usable with X3 bar workouts.","date":"2020-05-31 20:28:46","metadata":"{\"extraction_info\": {\"found_math\": false, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.9303619861602783, \"perplexity\": 1624.7915525288247}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.3, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2020-24\/segments\/1590347413624.48\/warc\/CC-MAIN-20200531182830-20200531212830-00156.warc.gz\"}"}
null
null
{"url":"http:\/\/mathoverflow.net\/tags\/ordinal-numbers\/info","text":"# Tag Info\n\nAn ordinal is the order type of a well-ordered set. The first few ordinals are $0, 1, 2, \\dots, \\omega, \\omega+1, \\dots$ where $\\omega$ is the order type of $\\mathbb{N}$, and $\\omega+1$ is the order type of $\\mathbb{N}$ together with a maximum element.\n\nThere is no tag wiki for this tag \u2026 yet!\n\nTag wikis help introduce newcomers to the tag. They contain an overview of the topic defined by the tag, along with guidelines on its usage.\n\nAll registered users may propose new tag wikis.\n\n(Note that if you have less than 20000 reputation, your tag wiki will be peer reviewed before it is published.)","date":"2016-07-23 23:24:15","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.32156452536582947, \"perplexity\": 863.4841787129051}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 20, \"end_threshold\": 5, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2016-30\/segments\/1469257823802.12\/warc\/CC-MAIN-20160723071023-00092-ip-10-185-27-174.ec2.internal.warc.gz\"}"}
null
null
The Federal Government's Office of Special Education Programs is now requiring all 50 states to participate in the Early Childhood Outcomes Project. Any child ages 3-5 years, receiving special education services that was enrolled for those services after March 1, 2006, must have a Ratings Scale completed within 45 calendar days of their enrollment. They must also have an exit Rating Scale completed after they turn 5 years of age. Purple folders should include an Outcomes form. This form may be completed at the Eligibility Review or one of the team members may complete and send it in to Kim Cox with in 30 days of the child's enrollment. This is required for those students receiving Speech Language services only. Outcomes must be done on student's whether they come from the EI system or not. STARnet is the state agency assigned to train and provide technical assistance in this project.
{ "redpajama_set_name": "RedPajamaC4" }
4,375
// MIT License. Copyright (c) 2016 Maxim Kuzmin. Contacts: https://github.com/maxim-kuzmin // Author: Maxim Kuzmin using Makc2017.Core.Web.Api; using Microsoft.AspNetCore.Cors; using Microsoft.Extensions.Logging; namespace Makc2017.Host.Web.Mvc { /// <summary> /// Хост. Веб. MVC. Контроллер. /// </summary> [EnableCors("AllowWebApp")] public class HostWebMvcController : CoreWebMvcController { #region Constructors /// <summary> /// Конструктор. /// </summary> /// <param name="logger">Средство логирования.</param> public HostWebMvcController(ILogger logger) : base(logger) { } #endregion Constructors } }
{ "redpajama_set_name": "RedPajamaGithub" }
4,537
\section{Introduction} \label{sec:intro} In statistical mechanics, \emph{indistinguishability} of particles and consequently the correct Boltzmann counting play an essential role, see, e.g., Refs.~\cite{Pathria1996, Kardar2007, Huang2009}. The ``Gibbs paradox'' is well-known in this regard and the extensivity of entropy is recovered by introduction of the $1/N!$ factor, which corrects the number of microstates by the number of permutations of $N$ particles. Commonly, the $1/N!$ factor is regarded as a remnant of quantum mechanics in the classical limit, in which identical particles are inherently indistinguishable. In contrast to such a point of view, however, the $1/N!$ factor should be consistently interpreted based on an ``informatic'' definition of entropy~\cite{Cates2015}. Accordingly, a modified term, ``undistinguished'' particles~\cite{Sethna2006}, has also been proposed. Consequently, even though the classical particles such as colloidal particles are undoubtedly distinguishable, the statistical mechanics with the $1/N!$ correction describes the macroscopic behaviors of such systems successfully~\cite{Swendsen2008, Frenkel2014}, as far as one ignores detailed differences between particles~\cite{Jaynes1957, Jaynes1992} and leaves the particles unlabeled. At the very microscopic level, i.e., at the atomic scale, a system consisting of identical particles is invariant under permutations of the particles and the particles are unlabeled in principle. If one considers a mesoscopic length scale and employs a coarse-grained description~\cite{Doi1986,Menzel2019}, however, it may become necessary to distinguish between particles that are physically identical. Such a scenario can emerge if the particles are permanently localized with respect to their neighbors, thereby rendering the system non-ergodic on the relevant energy scale. It is thus a major challenge for statistical mechanical theories to describe a model with labeled particles~\cite{Cremer2017} or to keep track of a single localized particle~\cite{Wittmann2019}. Here we develop a statistical description for a new class of composite materials, which consist of magnetic particles and an elastic polymer matrix~\cite{Filipcsei2007, Ilg2013, Menzel2019}. In these materials, called ferrogels or magnetorheological elastomers, the dynamical trajectories of the magnetic particles are frequently strongly constrained by the polymeric environment~\cite{Gundermann2014, Landers2015}. Such a magneto-mechanical coupling can even be enhanced by directly anchoring the polymers on the surface of magnetic particles~\cite{Frickel2011, Messing2011, Ilg2013, Roeder2015}. Hence, the elastic properties of the materials can be tuned from outside by non-invasive applications of magnetic fields~\cite{Stepanov2008, Stolbov2011, Stolbov2019}. As a further consequence of this coupling the particles cannot exchange their positions due to the fixation by the elastic medium. Various studies have been conducted to theoretically understand the behavior of such ferrogels with different description levels from the microscopic scale resolving the individual polymer particles to the macroscopic hydrodynamic/thermodynamic theory. For many practical purposes, one may neglect the thermal motions of the magnetic particles~\cite{Menzel2019}. In particular, a mesoscopic dipole-spring model has been adopted to study the elastic and dynamical properties of ferrogels~\cite{Pessot2016, Pessot2018, Goh2018}. The matrix-mediated interaction between magnetic particle inclusions has also been revealed in terms of continuous elastic backgrounds~\cite{Biller2014, Biller2015, Cremer2015, Cremer2016, Puljiz2016, Puljiz2017}. Furthermore, microscopic descriptions of ferrogels via coarse-grained molecular dynamics simulations enable us to probe the role of thermal motions of the magnetic particles explicitly~\cite{Weeber2012, Weeber2015jmmm, Weeber2019}. Here, we merge several of the aspects mentioned above. Our goal is to formulate a statistical mechanical theory for ferrogels in a dipole-spring model with thermal fluctuations taken into account. The most challenging problem that has to be addressed along the way arises from the fact that the particles in ferrogels are strictly \emph{labeled} by their positions as in lattice systems, for instance, the classical Ising or XY models~\cite{Goldenfeld1992} and harmonic crystals~\cite{Ashcroft1976}. Accordingly, correcting the number of microstates by the factor $1/N!$ does not apply to a statistical description of ferrogels and a permutation of particles will cause a change in energy (physically this results in strong distortions of the surrounding elastic matrix). While computational approaches such as Monte-Carlo (MC) simulations are still feasible~\cite{Weeber2019}, the formulation of a statistical mechanical theory is severely complicated by the inherent composite nature of the ferrogels, in contrast to, for instance, harmonic crystals. In practice, one would need to take into account the nonlinearity stemming from the steric and magnetic interactions. One natural candidate for a statistical theory is classical density functional theory (DFT)~\cite{Evans1979, Lutsko2010, Evans2016}, which has been probed to be successful for variety of systems~\cite{Lowen2002}, ranging from simple classical fluids~\cite{Ebner1976} to systems showing a freezing transition~\cite{Ramakrishnan1979}, from hard-spheres~\cite{Roth2010} to hard convex particles~\cite{Wittmann2016} and also for two-dimensional systems~\cite{Roth2012, Wittmann2017, Lin2018}, including dipolar or electrostatic interactions~\cite{Zimmermann2016, Roth2016}, and capturing the spinodal decomposition dynamics~\cite{Archer2004} in an adiabatic approximation for time-dependent systems. However, due to the particle labeling, a direct application of the machinery of DFT to ferrogels is not possible. To this end, we will map the elastic interaction onto an appropriate pairwise pseudo-potential~\cite{Cremer2017} between unlabeled particles, which allows us to formulate a DFT. Ultimately, we aim at investigating the elastic properties of the dipole-spring systems within the DFT framework. By comparison to the MC simulations, the validity of the theory is confirmed. The paper is organized as follows: In Sec.~\ref{sec:model}, we introduce a two-dimensional model for ferrogels. Sec.~\ref{sec:mapping} describes detailed procedures of the mapping and the subsequent formulation of a DFT. Combining MC simulations and DFT calculations, two distinctive response scenarios in elastic properties to the change of the magnetic moment are identified in Sec.~\ref{sec:elastic}. Lastly, a summary and a discussion are given in Sec.~\ref{sec:conclusion}. \section{The dipole-spring model} \label{sec:model} We consider a bead-spring model~\cite{Doi1986} in terms of a periodic two-dimensional hexagonal lattice, as illustrated in Fig.~\ref{fig:model}. There are $N$ identical magnetic particles and $3N$ identical harmonic springs connecting the nearest neighbors. We denote the position and the dipole moment of the $i$th particle of diameter $\sigma$ by $\vec{r}_i$ and $\vec{m}_i$, respectively. The total Hamiltonian $\mathcal{H}_{\rm tot}$ of the model system is given by the sum of the kinetic part and the interaction Hamiltonian $\mathcal{H}_{\rm int}$ which consists of three parts in the form of \begin{align} \mathcal{H}_{\rm int} = \mathcal{H}_{\rm m} + \mathcal{H}_{\rm el} + \mathcal{H}_{\rm st}. \label{eq_Hint} \end{align} Among these three terms, the magnetic part $\mathcal{H}_{\rm m}$ and the steric part $\mathcal{H}_{\rm st}$, can be written as \begin{align}\label{eq:pairadd} \mathcal{H}_{\rm m, st} = \frac{1}{2} \sum_{i\neq j} u_{\rm m, st} (\vec{r}_{ij}), \end{align} where $\vec{r}_{ij} = \vec{r}_j - \vec{r}_i$. For isotropic interactions, the vector $\vec{r}_{ij}$ in the argument can be replaced by $r_{ij} = |\vec{r}_{ij}|$. First, the two-body magnetic dipole-dipole interaction energy between magnetic particles reads \begin{align} u_{\rm m}(\vec{r}_{ij}) = \frac{\mu_0}{4\pi} \left[ \frac{\vec{m}_i\cdot \vec{m}_j}{r_{ij}^3}- \frac{3(\vec{m}_i\cdot \vec{r}_{ij})(\vec{m}_j\cdot \vec{r}_{ij})}{r_{ij}^5} \right], \end{align} where $\mu_0$ is the vacuum permeability. Henceforth, we assume that the magnetic moment is constant in time and for all particles, i.e., $\vec{m}_i (t)=\vec{m}$ regardless of $i$ and $t$ for the sake of simplicity. Moreover, we constrain ourselves to isotropic magnetic interactions in the two-dimensional plane by further assuming that $\vec{m} = m\hat{z}$ is perpendicular to the lattice plane, so that the magnetic two-body interaction energy can be written in a simpler form as \begin{align} u_{\rm m}(r_{ij}) = \frac{\mu_0 m^2}{4\pi} \frac{1}{r_{ij}^3}. \label{eq_Hmag} \end{align} The second term in Eq.~\eqref{eq_Hint} corresponds to the elastic energy of the harmonic springs and reads \begin{align} \mathcal{H}_{\rm el} = \sum_{\langle i, j \rangle} u_{\rm el}(r_{ij}) =\sum_{\langle i, j \rangle} \frac{1}{2} k_{\rm el}({r}_{ij} -a)^2, \label{eq_Hel} \end{align} where $k_{\rm el}$ is the spring constant and $a$ is the rest length of the springs. Apparently, each spring connects a certain prescribed pair of particles, and our summation persistently runs only over nearest-neighboring pairs as indicated by the angular bracket. Consequently, all particles are labeled by the predetermined ordering on a lattice, the energetic memory of which being recalled by $\mathcal{H}_{\rm el}$. We remark that Eq.~\eqref{eq_Hel} cannot be cast into the form of $\frac{1}{2}\sum_{i\neq j} \bar{u}_{\rm el} (\vec{r}_{ij})$ with a $i, j$-independent function $\bar{u}_{\rm el} (\vec{r})$. Lastly, the two-body steric repulsion energy is taken as a hard-core potential of the form \begin{equation} \label{eq_Hst} u_{\rm st}(r_{ij}) =\begin{cases} 0 & \ {\rm if} \ r_{ij} \geq \sigma \\ \infty & \ {\rm otherwise} \end{cases} \end{equation} and completes the interaction Hamiltonian in Eq.~\eqref{eq_Hint}. This term prevents the possible divergence of the magnetic interactions at $r_{ij} = 0$. Following Refs.~\cite{Bernard2009,Roth2012,Engel2013,Lin2018}, we introduce a dimensionless packing fraction defined as the ratio of the space occupied by particles to the two-dimensional ``volume'' (the term ``volume'' is used for area throughout the paper) of the system. As we might consider the systems under constant pressure (see Sec.~\ref{sec:impl}), however, the volume $V$ of our system is not necessarily a fixed variable. We therefore define a reference volume $V_{\rm ref} \equiv N V_0 \equiv N \sqrt{3}a^2/2$ in which the springs are in their rest state. Accordingly, the packing fraction of a reference system with this volume is given by $\eta_0=(\sigma^2 \pi/2)/(\sqrt{3} a^2)$. Apart from $a$, which will be used as the unit of length, $\eta_0$ depends only on the diameter of particles $\sigma$ and therefore we employ it as a model parameter representing the steric repulsion. In contrast to that, the conventional packing fraction $\eta \equiv (\pi \sigma^2 /4)N/V$ defined in terms of the ``actual'' volume $V$ of the system, may change as the volume increases/decreases in response to a decrease/increase of the pressure or an increase/decrease of the magnetic moment $m$ at constant pressure. \begin{figure} \includegraphics[width=8.6cm]{fig1} \caption{\label{fig:model} Illustration of the dipole-spring model. } \end{figure} From now on, we measure lengths and energies in units of $a$ and $k_B T$, respectively. Accordingly, the magnetic moment $m$ and the spring constant $k_{\rm el}$ are measured in units of $m_0 \equiv \sqrt{k_B T a^3 /\mu_0}$ and $k_B T/a^2$, respectively. Finally, let us note that our bead-spring model is identical to the classical harmonic crystal~\cite{Ashcroft1976}, if the magnetic interactions energy $\mathcal{H}_{\rm m}$ and the steric repulsion energy $\mathcal{H}_{\rm st}$ are neglected in Eq.~\eqref{eq_Hint}. \section{Mapping onto pseudo-springs} \label{sec:mapping} Now we address the issue of particle labeling and derive an approximate elastic Hamiltonian $\tilde{\mathcal{H}}_{\rm el}$, which can be readily used for the density functional calculation. Putting aside the magnetic and steric parts of the Hamiltonian, i.e., $\mathcal{H}_{\rm m, st}$, which are free from this issue, see Eq.~\eqref{eq:pairadd}, we only consider the elastic part $\mathcal{H}_{\rm el}$ in this section. Following Ref.~\cite{Cremer2017}, we consider a mapping of $\mathcal{H}_{\rm el}$ onto a pseudo-spring potential between unlabeled particles of the form \begin{align} \mathcal{H}_{\rm el} \to \tilde{\mathcal{H}}_{\rm el} =\frac{1}{2} \sum_{i \neq j} u_{\rm pel}(r_{ij}), \label{eq_HelMAP} \end{align} where $u_{\rm pel}(r)$ is a two-body interaction between particles of center-to-center distance $r$. $\tilde{\mathcal{H}}_{\rm el}$, in contrast to Eq.~\eqref{eq_Hel}, involves all particle pairs $(i,j)$. Consequently, the Hamiltonian $\tilde{\mathcal{H}}_{\rm el}$ is invariant under permutations of the particles, i.e., \begin{align} \label{eq:permutation} \tilde{\mathcal{H}}_{\rm el}(\{ \vec{r}_i \}) = \tilde{\mathcal{H}}_{\rm el}(\{\vec{r}_{\hat{\pi}({i})}\}), \end{align} where $\hat{\pi}$ is a permutation operator constituting the symmetric group $S_N$. While such a mapping is convenient from a technical point of view, it is physically appropriate only if we can ensure through the form of $u_{\rm pel}(r)$ that each particle in effect only interacts with a prescribed set of other particles, i.e., the same number of nearest neighbors as in the real-spring system. To this end, we will cut each spring-like interaction (which we specify later) at larger distances to prevent interactions between particles too far away from each other. The crucial point is then to ensure that $\tilde{\mathcal{H}}_{\rm el}$ does neither introduce additional contacts nor miss actually existing ones. In one spatial dimension, the particles described by a pseudo-spring model are automatically ``labeled'' through the steric hard-core repulsions~\cite{Tonks1936,Percus1976}, which fixes their mutual ordering. Hence, within a proper range of the spring length at sufficiently high density~\cite{Cremer2017}, where the particles usually interact with their two nearest neighbors, the mapping works perfectly. Note that there is no phase transition due to the strong thermal fluctuations in one dimension. In a more realistic two- or three-dimensional fluid, however, the particles can always find a path to bypass each other. Still, a similar fixation to a cell surrounded by the nearest neighbors can be achieved via ergodicity breaking associated with the freezing transition. In this sense, we can construct a mapping of the labeled particles in the original lattice model onto the crystalline phase of the unlabeled particles with all-to-all pairwise-additive interactions. We take this viewpoint as the inversion of the following interpretation from Ref.~\cite{Cates2015} for a system of unlabeled hard spheres. For hard-sphere crystalline systems, even though there are $N!$ possible distinct crystals of labeled particles as the Hamiltonian is invariant under permutations, all microscopic configurations correspond to one unique macroscopic lattice structure. In this way, lattice models of labeled particles, for which the $1/N!$ factor is omitted in the statistical counting, provide a good approximation for the crystalline state of materials, emerging from the freezing of fluids with undistinguished particles. Inversely, for ferrogels, given the lattice system of labeled particles as the original model, we restore the $1/N!$ factor by mapping the unique lattice onto $N!$ possible frozen states of a fluid-like system with unlabeled particles. In Sec.~\ref{sec:just} and in Fig.~\ref{fig:pseudo}, our argument justifying such a mapping in two dimensions is laid out in full detail, before establishing the specification of the elastic pseudo potential $u_{\rm pel}$ in Sec.~\ref{sec:pair}. Then we demonstrate in Sec.~\ref{sec:DFT} how to implement the mapped system within our DFT approach. Before we proceed, we make two further remarks. First, we assume that the pseudo-spring systems governed by $u_{\rm pel}$ have a crystalline phase. Second, we consider two-dimensional crystals~\cite{Zahn2000, Keim2004} in this study. As is well known, there is no true long-range order in two-dimensional systems in the absence of truly long-range interactions~\cite{Mermin1966}. \subsection{Detailed justification of the mapping} \label{sec:just} Our argument is based on the idea that, at low temperatures, the energetic contribution overwhelms the entropic contribution involving the $1/N!$ factor, enabling us to map the systems of labeled particles onto those of the unlabeled ones. Such an argument is strong enough to justify the mapping, for instance, at zero temperature, for which the minimum of $\mathcal{H}_{\rm int}$ completely determines the equilibrium properties. At finite temperatures, however, the elastic properties (or even the stability of the systems) critically depend on the details of the mapping and of the profile of the consequent interaction potential $\tilde{\mathcal{H}}_{\rm int} \equiv \mathcal{H}_{\rm m} +\tilde{\mathcal{H}}_{\rm el}+ \mathcal{H}_{\rm st}$. This does not only apply at the minimum of $\tilde{\mathcal{H}}_{\rm int}$, but even if the adjacent particles are not located exactly at their lattice sites due to thermal fluctuations. Accordingly, a quantitative matching of lattice structures in real- and pseudo-spring systems turns out to be essential. Therefore, in this section, we carefully describe how the $1/N!$ factor and the fluctuations of particles confined to their lattice sites in the low-temperature crystalline phase should be addressed throughout the mapping. Let us consider the ${4N}$-dimensional \emph{phase space} of our system. The probability density to locate our system in this phase space reads $\omega (\{ \vec{r}_i\}, \{\vec{p}_i\}) \propto \exp{\{-\beta \mathcal{H}_{\rm tot}(\{ \vec{r}_i\}, \{\vec{p}_i\})\}}$ with the inverse temperature $\beta \equiv k_B T$. We then partition the phase space into $N!$ subspaces that are related by permutations of the particles, analogously to the symmetry-related regions discussed in Ref.~\cite{Goldenfeld1992}. To this end, we first specify the completely ordered set (subspace) as $\sigma_1 = \{ (\vec{r}_1, \ldots, \vec{r}_N,\vec{p}_1, \ldots, \vec{p}_N)|\ |\vec{r}_i| > |\vec{r}_j| \ {\rm for}\ i > j\}$. In other words, every configuration in which each particle with a smaller label is located closer to the origin than each other particle with a larger label belongs to this subspace. (If there exist pairs of particles, the distances of which from the origin are the same, one can further order the particle pairs by comparing the angle, e.g., from the $x$-axis.) Then, with the permutation operators $\hat{\pi}_i$ (for $i=1, \ldots, N!$ where $\hat{\pi}_1$ is the identity of the $S_N$ symmetric group) introduced in Eq.~\eqref{eq:permutation}, one can generate $N!$ subspaces exhausting the whole phase space by permuting particles, i.e., $\sigma_i = \{(\vec{r}_{\hat{\pi}_i (1)}, \ldots,\vec{r}_{\hat{\pi}_i (N)}, \vec{p}_{\hat{\pi}_i (1)},$ $ \ldots, \vec{p}_{\hat{\pi}_i (N)})|(\vec{r}_1, \ldots,\vec{r}_N,\vec{p}_1,\ldots , $ $ \vec{p}_N ) $ $\in \sigma_1\}$. For example, a subspace $\sigma_\alpha$ is generated by the permutation of particles in every element (configuration) of the completely ordered subspace $\sigma_1$, with a corresponding operator $\hat{\pi}_\alpha$. In the case of the real-spring systems, the probability densities of each subspace are not identical to each other, see Figs.~\ref{fig:pseudo}(a) and (b), as an exchange of any particle pair is always accompanied with a change in energy, i.e., $\mathcal{H}_{\rm el}(\{\vec{r}_i\}) \neq \mathcal{H}_{\rm el}(\{\vec{r}_{\hat{\pi}(i)}\}) $. However, for pseudo-spring systems, the probability densities corresponding to each subspace are identical to each other, as illustrated in Figs.~\ref{fig:pseudo}(d) and (e), because the Hamiltonian is invariant under permutations. Therefore, the probability densities in the whole phase space of the real- and pseudo-spring systems are not equivalent to each other in general. More specifically, for real-spring systems, the probability density in a subspace should deviate from the one in another subspace as they involve permutations of particles, while, for pseudo-spring systems, they are still identical due to the symmetry under permutations. \begin{figure} \includegraphics[width=8.6cm]{fig2} \caption{\label{fig:pseudo} A schematic diagram to illustrate the justification of our mapping. In panels (a), (b), and (d)--(f), we present the whole $4N$-dimensional \emph{phase space} partitioned by the black grids, each cell of the grid representing a permutation-related subspace $\sigma_i$. Then probability density profiles in the phase space are depicted by red lines. Specifically, the probability densities of the real-spring systems at high and low temperature are illustrated in panels (a) and (b), respectively; panels (d) and (e) depict those using pseudo-springs at high and low temperature, respectively. In panel (f), one density profile, localized within one single permutation-related subspace, is chosen arbitrarily from $N!$ available subspaces [the $1/N!$ factor is simultaneously omitted in the partition function, see the last equality in Eq.~\eqref{eq:Z_pseudo}]. Moreover, panel (c) describes the lattice structure in the \emph{configurational space} corresponding to panel (f) [see, Eq.~\eqref{eq:Z_pseudo}] or approximately to panel (b) [see, Eq.~\eqref{eq:Z_real}]. Finally, the approximation involved in between panels (b) and (c) is optimized via Eq.~\eqref{eq:cond}, which we here achieve by searching for a vanishing concentration $n_{\rm vac}$ of vacancies [indicated by an open circle in panel (c)].} \end{figure} Let us now turn to low-temperature systems, i.e., $k_B T \ll k_{\rm el} a^2$. Recall that there is no phase transition for harmonic crystals~\cite{Jancovici1967, Ashcroft1976}, that is, for our real-spring system. Nonetheless, the corresponding probability density becomes highly localized, as depicted in Fig.~\ref{fig:pseudo}(b). In the case of the pseudo-spring system, there might occur a freezing transition instead, which is the starting point of our mapping. First we constrain ourselves to each one of the permutation-related subspaces. Therefore, particle exchanges or, namely, permutations of particles via, e.g., vacancy hopping is ignored in the following analysis. Then we consider the ergodicity breaking of the systems due to freezing. Accordingly, the probability density in each permutation-related subspace is isolated as illustrated in Fig.~\ref{fig:pseudo}(e). [In contrast to that, as shown in Fig.~\ref{fig:pseudo}(a) and (d), the probability densities at high temperature are rather broad for both the real- and pseudo-spring systems and, therefore, it is not possible to construct such a mapping in our situation.] In both systems, we conclude that, below a certain temperature, the thermal vibration in the particle positions are much smaller than inter-particle distances in the crystalline state. Therefore, the trajectory of each particle in each subspace does not span the full \emph{configurational space} $\mathbb{R}^2$ but only a localized area. That is, particle $i$ remains localized within the corresponding Wigner-Seitz cell $\Omega_i$ associated with its average position, see Fig.~\ref{fig:pseudo}(c). In such a way, we provide a tiling of $\mathbb{R}^2 = \Omega_1 \cup \cdots \cup \Omega_N$, where $\Omega_i\cap \Omega_j= \emptyset$ for $i \neq j$. Now let us be more explicit and consider only one subspace among all permutation-related subspaces at low temperature. In the case of the real-spring system, a particle $i$ is localized in the Wigner-Seitz cell around a lattice site with the same label $i$, so that $\int_{\mathbb{R}^2} {\rm d}\vec{r}_i e^{-\beta \mathcal{H}} \approx \int_{\Omega_i} {\rm d}\vec{r}_i e^{-\beta \mathcal{H}}$. Then, the partition function becomes \begin{align}\label{eq:Z_real} Z_N^{\rm real} &= \frac{1}{\Lambda^N} \int_{\mathbb{R}^2} {\rm d}\vec{r}_1 \cdots \int_{\mathbb{R}^2} {\rm d}\vec{r}_N\, e^{-\beta \mathcal{H}_{\rm int}} \nonumber \\ &\to \frac{1}{\Lambda^N} \int_{\Omega_1} {\rm d}\vec{r}_1 \cdots \int_{\Omega_N} {\rm d}\vec{r}_N\, e^{-\beta \mathcal{H}_{\rm int}}, \end{align} where $\Lambda$ is the mean thermal wavelength of the particles. As indicated by the arrow on the second line, the partition function has been rewritten in terms of the Wigner-Seitz cells. With the Wigner-Seitz cells $\{\tilde{\Omega}_i \}$ corresponding to the pseudo-spring system in the crystalline state, the partition function involving the unlabeled particles can be similarly rewritten as \begin{align} \label{eq:Z_pseudo} Z_N^{\rm pseudo} &= \frac{1}{\Lambda^N N!} \int_{\mathbb{R}^2} {\rm d}\vec{r}_1 \cdots \int_{\mathbb{R}^2} {\rm d}\vec{r}_N\, e^{-\beta \tilde{\mathcal{H}}_{\rm int}} \nonumber \\ &\to \frac{1}{\Lambda^N N!} \sum_{\pi \in S_N} \int_{\tilde{\Omega}_{\hat{\pi}(1)}} {\rm d}\vec{r}_1 \cdots \int_{\tilde{\Omega}_{\hat{\pi}(N)}} {\rm d}\vec{r}_N\, e^{-\beta \tilde{\mathcal{H}}_{\rm int}} \nonumber \\ &= \frac{1}{\Lambda^N} \int_{\tilde{\Omega}_1} {\rm d}\vec{r}_1 \cdots \int_{\tilde{\Omega}_N} {\rm d}\vec{r}_N\, e^{-\beta \tilde{\mathcal{H}}_{\rm int}}, \end{align} where the last equality follows from the fact that $\tilde{\mathcal{H}}_{\rm int}$ is invariant under permutations. In this way, one can cancel the $N!$ counting factor, completely neglecting the exchange of particles, as illustrated in Fig.~\ref{fig:pseudo}(f). We stress that the probability of the ignored configurations in both Eq.~\eqref{eq:Z_real} and Eq.~\eqref{eq:Z_pseudo} is negligible and vanishes as $T\to 0$ and/or $N\to \infty$. Comparing now both expressions to each other, we map the lattice of labeled particles to that of unlabeled particles via \begin{align} \label{eq:cond} \int_{\Omega_1} {\rm d}\vec{r}_1 \cdots \int_{\Omega_N} &{\rm d}\vec{r}_N \, e^{-\beta \mathcal{H}_{\rm int}} \nonumber \\ &\stackrel{!}{=} \mathcal{N} \int_{\tilde{\Omega}_1} {\rm d}\vec{r}_1 \cdots \int_{\tilde{\Omega}_N} {\rm d}\vec{r}_N \, e^{-\beta \tilde{\mathcal{H}}_{\rm int}}, \end{align} where $\mathcal{N}$ is an arbitrary constant which does not alter any physical properties of the mapping, indicating that the (many-body) pseudo-spring potential can be determined up to an additive constant. Even though the above equation is not easy to analyze as it still involves interactions between many particles, the issue of particle labeling has been resolved: the $1/N!$ factor does not appear in the mapping. Moreover, it provides us with a route of how to construct an approximate elastic part of the Hamiltonian $\tilde{\mathcal{H}}_{\rm el}$ in Eq.~\eqref{eq_HelMAP} for practical calculations. In addition to the trivial condition that (i) the nearest-neighbor interaction of $\mathcal{H}_{\rm int}$ should be reproduced by $\tilde{\mathcal{H}}_{\rm int}$, Eq.~\eqref{eq:cond} indicates that (ii) the lattice of the real-spring system should be recovered with the pseudo-spring potential, namely, $\{\Omega_i \}\stackrel{!}{=}\{\tilde{\Omega}_i \}$. Next, we describe our strategy to explicitly perform such a mapping by introducing a cut-off for the springs. \subsection{Pair potential of pseudo-springs} \label{sec:pair} We consider an elastic interaction energy via pseudo-springs with a cut-off. The latter introduces an additional degree of freedom to control the offset of the remaining part of the springs, leading to a two-body interaction in the form \begin{equation} \label{eq:pseudo_spring} u_{\rm pel}(r_{ij})=\begin{cases} \frac{1}{2}k_{\rm el} (r_{ij}-a)^2 - u_{\rm pel}^0, & r_{ij} < R_c \\ 0, & {\rm otherwise}, \end{cases} \end{equation} where $k_{\rm el}$ and $a$ take the same role as in Eq.~\eqref{eq_Hel} and the two mapping parameters $R_c$ and $u_{\rm pel}^0$ are fixed by the conditions (i) and (ii), respectively, as discussed below. Regarding condition (i), the nature of the mapping onto the pair potential $u_{\rm pel}(r_{ij})$ leads to an apparent violation of Eq.~\eqref{eq:cond}, because the number of nearest neighbors cannot be unambiguously imposed. Even when the potential has only a finite range, additional contacts with next-to-nearest-neighbor particles can be formed in one direction, simultaneously missing the contacts with nearest-neighbor particles in the other directions. Specifically, if a particle is located, e.g., at one of the corners of its Wigner-Seitz cell, the minimal possible distance to a particle in a non-adjacent cell is only one edge length, i.e., $a$, while the maximal possible distance to another particle in a neighboring cell is as large as $\sqrt{13}a\approx3.6a$. This drawback cannot be overcome by any choice of another cell shape but becomes less severe as $k_B T/k_{\rm el}a^2 \to 0$. Hence, we simply consider an isotropic pseudo-potential of range $R_c$ for computational convenience. This cut-off parameter in Eq.~\eqref{eq:pseudo_spring} should be determined to achieve the optimal connectivity with the six nearest-neighbor particles (see the inset of Fig.~\ref{fig:dft_formulation}), as the best approximation for the original bead-spring Hamiltonian. To this end, we first determine the cut-off radius $R_c^0$ for the reference system with $V_{\rm ref} = N V_0$. As a simple analytic estimate for $R_c^0$, one may assume a uniform (fluid-like) density $N/V_{\rm ref}$ and replace the seven Wigner-Seitz cells containing a particle and its six nearest neighbors by a circle of the same total area. Such an assumption leads to the value of $R_c^0= (7V_0/\pi)^{1/2} \approx 1.39 a$. As we only consider the crystal, in which the density is highly inhomogeneous, however, $R_c^0$ is expected to be smaller. As a more appropriate alternative, we extract the value from MC simulations of the real-spring system with $V=N V_0$, i.e., the target of our mapping. Specifically, we compute the isotropic pair correlation function defined as \begin{align} g(r) = \frac{V}{2\pi r N^2}\left\langle \frac{1}{2} \sum_{i\neq j} \delta (r-r_{ij}) \right\rangle. \end{align} By simply probing the distance at which $g(r)$ takes its first minimum, we find $R_c^0 \approx 1.34a$, compare Fig.~\ref{fig:dft_formulation}. As the effect from the discontinuity of $u_{\rm pel}(r)$ at $R_c^0$ (see, the red solid line in Fig.~\ref{fig:dft_formulation}) is minimized with $R_c^0 = 1.34a$, the convergence rate turns out to be faster than the minimization with $R_c^0 = 1.39a$. As the results are hardly affected by this small change of the parameter and the value $R_c^0=1.34a$ seems to be sufficiently accurate, we use it from now on. For systems under constant pressure with volumes other than $V = V_{\rm ref}$ (see Sec.~\ref{sec:elastic}), we use \begin{align} \label{eq:cutoff_scaling} R_c = R_c^0\, \sqrt{\frac{V}{V_{\rm ref}}}. \end{align} This is the only external input necessary during the formulation of our DFT in Sec.~\ref{sec:DFT}. The condition (ii) cannot be directly imposed, since the lattice structure is usually not an input but the result of a calculation based on a prescribed interaction. The fact that this condition is not automatically fulfilled even if the real- and pseudo-spring systems are at the same density is related to another aspect of a pair-potential system in general: the lattice may be imperfect, as indicated in Fig.~\ref{fig:pseudo}(c). The real-spring system with labeled particles assigned to the lattice is completely free from defects, whereas the mismatch between the range of the pairwise interaction and the desired lattice structure suggests that there should be some vacancies or interstitials. Moreover, if there were such defects, Eq.~\eqref{eq:Z_pseudo} should also be corrected by additional factors addressing the number of possible defect configurations. Hence, we modify $u_{\rm pel}(r_{ij})$ such that it yields a zero vacancy concentration, $n_{\rm vac}=0$, as an equivalent requirement to the above condition of an equal cell structure. The only way to do so while leaving $R_c$ invariant is to tune the depth of the pseudo-spring potential in Eq.~\eqref{eq:pseudo_spring} by an offset value $u_{\rm pel}^0$, which we understand as follows. On the one hand, if $\langle u_{\rm pel} \rangle \ll 0$, the total elastic energy is lowered by forming additional contacts with new neighbors. This undesired effect results in the undesired formation of interstitials or aggregates. On the other hand, when $\langle u_{\rm pel} \rangle \gg 0$, vacancies are generated. Closing this section on the mapping, we are now ready to formulate our DFT. The only remaining mapping parameter $u_{\rm pel}^0$ will be determined within the DFT framework. \subsection{Density functional theory} \label{sec:DFT} Following previous studies~\cite{vanTeeffelen2008, Roth2012}, we consider two unit cells of a hexagonal lattice in a rectangular base with periodic boundary conditions in $x$ and $y$ directions. The volume and the number of particles of the two unit cells are denoted by $V_{\rm cell}$ and $N_{\rm cell}$, respectively. Our starting point is to construct a grand canonical free energy functional $\Omega ([\rho (\vec{r})])$ the value of which at its minimum corresponds to the equilibrium grand potential $\Omega_0 (T, \mu, V;m, k_{\rm el}, \eta_0, u_{\rm pel}^0)$ in the grand canonical ensemble at fixed temperature $T$, chemical potential $\mu$, and volume $V$. The free parameters employed here are the magnetic moment $m$, the spring constant $k_{\rm el}$, and the packing fraction $\eta_0$ of the reference system with $V_{\rm cell}=N_{\rm cell} V_0 = \sqrt{3}a^2$ as defined in Sec.~\ref{sec:model}. In addition, we will complete the theory, by specifying the yet to be determined offset for the pseudo-spring potential $u_{\rm pel}^0$. As we have also employed an additional condition for $n_{\rm vac}$ in the previous section, however, each resultant equilibrium density profile corresponds to a parameter set of $(m, k_{\rm el}, \eta_0)$. We write \begin{align} \Omega ([\rho (\vec{r})]) = \mathcal{F}([\rho(\vec{r})]) -\mu \int {\rm d}\vec{r} \rho(\vec{r}), \end{align} where the Helmholtz free energy functional $\mathcal{F}\equiv \mathcal{F}_{\rm id}([\rho (\vec{r})]) +\mathcal{F}_{\rm exc}([\rho (\vec{r})])$ consists of the ideal gas term and the excess functional, which read \begin{align} &\mathcal{F}_{\rm id}([\rho (\vec{r})]) = \beta^{-1} \int {\rm d}\vec{r} \rho(\vec{r}) [\ln{\{\Lambda^2 \rho(\vec{r})\}}-1], \nonumber \\ &\mathcal{F}_{\rm exc}([\rho (\vec{r})];m, k_{\rm el}, \eta_0, u_{\rm pel}^0) = \mathcal{F}_{\rm m} +\mathcal{F}_{\rm el}+\mathcal{F}_{\rm st}, \end{align} respectively. Here, $\Lambda$ denotes the (irrelevant) thermal wave length. Regarding the elastic and magnetic interactions, we employ the mean-field functional in the form \begin{align} \label{eq:mean-field} \mathcal{F}_{\rm el, m} \equiv \frac{1}{2}\int {\rm d}\vec{r}\, {\rm d}\vec{r}\,'\,\rho(\vec{r}) u_{\rm pel, m}(|\vec{r}-\vec{r}\,'|)\rho(\vec{r}\,'), \end{align} while we adopt fundamental measure theory~\cite{Roth2012} for $\mathcal{F}_{\rm st}$. Each excess functional is calculated with the aid of the Fourier convolution theorem. The detailed forms of the Fourier transforms of $u_{\rm pel}$ and $u_{\rm m}$ used in our calculations are described in Appendix~\ref{app:FT_of_energies}. As in Refs.~\cite{Oettel2010, Roth2012, Lin2018}, we minimize $\Omega$ for a prescribed bulk density \begin{align} \label{eq:rho_vac} \rho_{\rm bulk} \equiv \frac{N_{\rm cell}}{V_{\rm cell}}=\frac{2(1-n_{\rm vac})}{V_{\rm cell}}. \end{align} Here, the chemical potential is obtained as an output of the calculation. Specifically, the minimization process consists of two distinct stages. At the first stage, we minimize $\mathcal{F}$ for fixed $n_{\rm vac}$ using the Picard iteration algorithm \begin{align} \rho^{(i+1)} (\vec{r}) = \alpha \tilde{\rho}^{(i)}(\vec{r}) +(1-\alpha)\rho^{(i)}(\vec{r}), \label{eq_PI} \end{align} with a mixing parameter $\alpha$~\cite{Roth2010}, where \begin{align}\label{eq:functional_derivative} \tilde{\rho}^{(i)}(\vec{r})= \frac{1}{\Lambda^2} \exp{\left[ -\beta \frac{\delta \mathcal{F}_{\rm exc}([ \rho^{(i)} (\vec{r})])}{\delta \rho^{(i)} (\vec{r})} +\beta \mu^{(i)} \right]}. \end{align} As we have prescribed the bulk density not the chemical potential, $\mu^{(i)}$ is updated in each iteration step to keep the average density constant. Our convergence criterion for minimization at this stage is $(\mathcal{F}^{(i)}-\mathcal{F}^{(i+1)})/\mathcal{F}^{(i)} < 10^{-15}$, where $\mathcal{F}^{(i)} = \mathcal{F}([\rho^{(i)}(\vec{r})])$. Then we determine the equilibrium density profile by further minimizing $\mathcal{F}/N$ with respect to $n_{\rm vac}$. In practice, we vary $n_{\rm vac}$ by controlling $V_{\rm cell}$ while $\rho_{\rm bulk}$ is fixed. Comparing the values of $\mathcal{F}/N$ obtained for each $n_{\rm vac}$, we determine the vacancy concentration $n_{\rm vac}$ and the equilibrium density profile $\rho(\vec{r})$ with which the free energy per particle $\mathcal{F}/N$ is minimized. These procedures are repeated until we have an accuracy of $10^{-6}$ for $n_{\rm vac}$. In the calculations, we set $\Lambda = 1$. Now, we determine the values of $u_{\rm pel}^0$ to close our DFT. Specifically, we first perform the two-stage DFT minimization as discussed above to find corresponding vacancy concentrations for given values of $u_{\rm pel}^0$. Then we choose the value of $u_{\rm pel}^0$ for which the vacancy concentration is zero, i.e., $n_{\rm vac} = 0$. As described in Eq.~\eqref{eq:rho_vac}, controlling $n_{\rm vac}$ in DFT calculations involves the change in $V_{\rm cell}$. Therefore, we also use a rescaled value of $R_c$ given by Eq.~\eqref{eq:cutoff_scaling} when $n_{\rm vac}\neq 0$. \begin{figure} \includegraphics[width=8.6cm]{fig3} \caption{\label{fig:dft_formulation} Formulation of a DFT for the bead-spring model. We compare pair correlation functions $g(r)$ from the real and pseudo-spring MC simulations of the reference system with $V=V_{\rm ref}$, which are depicted by black and green symbols, respectively. First, black square symbols show $g(r)$ obtained from the MC simulation of the real-spring system. As depicted by the black vertical line, the distance at the minimum point between the first and the second peak in $g(r)$ is chosen as $R_c^0$. Here, $ka^2/k_B T=100$, $\eta_0 = 0.3$, $m/m_0 =0$ and $N=480$ are used and we find $R_c^0= 1.34$. In the inset, the anisotropic pair correlation function $g(\vec{r})$ obtained from the real spring MC-simulation results is also displayed, together with the yellow line indicating $R_c^0$. As shown, the yellow line is far enough from the neighboring peaks and we confirm that the isotropic cutting off of the springs is a reasonable approach. The pair correlation function $g(r)$ obtained from pseudo-spring systems with $u_{\rm pel}^0 =3.94k_B T$ is represented by green circle symbols. The agreement between the real- and pseudo-spring simulations is manifested clearly. Finally, the red solid line presents the elastic energy of pseudo-springs $u_{\rm pel} (r)$ defined in Eq.~\eqref{eq:pseudo_spring}. } \end{figure} Performing first the minimization with $m=0$ and without a hard core repulsion ($\eta_0=0$), we confirm that the pseudo-spring potential indeed admits a freezing transition, which is the prerequisite of the mapping. Then we perform MC simulations with $\tilde{\mathcal{H}}_{\rm pel}$, together with $\mathcal{H}_{\rm st}$, to verify the mapping at finite packing fraction, using the value of $u_{\rm pel}^0$ obtained from such a DFT as an input. As manifested in Fig.~\ref{fig:dft_formulation}, the agreement between the real- and the pseudo-spring systems is quantitatively excellent. Therefore, we conclude that the DFT approach to systems with distinguishable particles is successfully formulated with the pseudo-spring potential between indistinguishable particles and ready to use in the presence of magnetic interactions as a model for ferrogels. \section{Elastic properties of ferrogels} \label{sec:elastic} We finally demonstrate the utility of the theory to investigate the mechanical properties of ferrogels, varying the magnetic moment and the density of the particles. In particular, we probe the system at constant pressure $p = k_B T/a^2$ and determine the volume $V$ and the responses to elastic deformations $\vec{\nabla} \vec{u}$, where $u$ is the displacement field. With the component of the corresponding linear strain tensor~\cite{Landau1986} \begin{align} \epsilon_{ij} =\frac{1}{2}(\nabla_i u_j + \nabla_j u_i), \end{align} the stress tensor and the stiffness tensor are defined as \begin{align} \label{eq:def_modulus} \sigma_{ij} = \frac{\partial f}{\partial \epsilon_{ij}}, \quad C_{ijkl} = \frac{\partial \sigma_{ij}}{\partial \epsilon_{kl}}, \end{align} where $f \equiv F/V$ is the density of the Helmholtz free energy $F$. For two-dimensional hexagonal lattices, the stiffness tensor \begin{align} C_{ijkl} = K\delta_{ij}\delta_{kl} +G (\delta_{ik}\delta_{jl} +\delta_{il}\delta_{jk} -\delta_{ij}\delta_{kl}). \end{align} can be expressed in terms of only two independent elastic constants, namely the bulk modulus $K$ and the shear modulus $G$ because of the symmetry~\cite{Chaikin2000}. In the following, we describe in Sec.~\ref{sec:impl} how to calculate the volume and elastic constants in MC simulations for the real- and pseudo-spring systems and using pseudo-potentials from our DFT treatment we compare the results of those methods in Sec.~\ref{sec_res2}. Henceforth, $V$ and the elastic constants, namely $K$ and $G$, are measured in units of $a^2$ and $k_B T/a^2$, respectively. \subsection{Implementation} \label{sec:impl} \subsubsection{MC simulation} In the case of MC simulations, we employ the isobaric-isothermal ($TpN$) ensemble~\cite{Wood1968, Eppenga1984} and vary the rectangular lengths $L_x$ and $L_y$ independently~\cite{Boal1993}. Accordingly, random walks in terms of $\ln{L_x}$ and $\ln{L_y}$ have been performed with the detailed balance condition~\cite{Frenkel2002} \begin{align} \frac{\omega(V\to V')}{\omega(V'\to V)} &=\exp \left\{ -\beta[U(\vec{s}\,'^N, V')-U(\vec{s}^N,V) \right. \nonumber \\ & \left. +p(V'-V)] + (N+1)\ln{(V'/V)} \right\}, \end{align} where $\omega$ is the transition rate corresponding to the volume changes and $\vec{s}^N=\{ \vec{s}_i \}$ are the scaled coordinates defined by $\vec{s}_i \equiv V^{-1/2} \vec{r}_i$ for $i=1,\ldots, N$. We then compute the volumes of the systems, simply taking the average of $V \equiv \langle L_x L_y \rangle$ and extract $K$ and $G$ from the fluctuations $\langle (\Delta V)^2 \rangle$, $\langle (\Delta L_x)^2 \rangle$, and $\langle (\Delta L_y)^2 \rangle$~\cite{Boal1993}. The elastic constants are computed from the real-spring as well as the pseudo-spring MC simulations to verify the validity of the mapping. In particular, for the pseudo-spring MC simulation, we use the values of the offset $u_{\rm pel}^0$ obtained from the density functional calculations as inputs, while the average volumes extracted from the corresponding real-spring MC simulations are employed to determine the value of the cut-off $R_c$, with the aid of Eq.~\eqref{eq:cutoff_scaling}. \subsubsection{DFT calculation} Computation of thermodynamic quantities under the given pressure in DFT is formally not straight-forward, because the theory is based on the grand-canonical ensemble. In a finite system, for example, the structure depends significantly on the specific choice of the statistical ensemble~\cite{Gonzalez1997, Gonzalez2004, delasHeras2016}. Due to the equivalence of the ensembles in the thermodynamic limit, however, one can still regard the density profiles obtained from DFT also as minima in the isobaric ensemble as long as large systems are considered. Hence, the requirement to compare the DFT results to MC simulations at constant pressure does not represent a conceptual problem for our study. Specifically, we first compute pressures at various volumes $V_{\rm cell}$ from the relation $p=-\Omega_0/V_{\rm cell}$ and choose the volume at which the pressure reaches the prescribed value. The accuracy of the volume is $10^{-5}(2V_0)$ and the vacancy concentration is fixed during the procedures by fine-tuning $u_{\rm el}^0$ with an accuracy of $10^{-4}k_B T$. The detailed procedures are exemplified in Appendix~\ref{app:DFT_NpT}. Since we have direct access to the free energy, we can directly compute the elastic constants deforming the system. We consider four types of deformations as follows: \begin{align} {\tensor{\epsilon}} = \left( {\begin{array}{cc} \epsilon_K & 0 \\ 0 & \epsilon_K \\ \end{array} } \right), \left( {\begin{array}{cc} \epsilon_x & 0 \\ 0 & 0 \\ \end{array} } \right), \left( {\begin{array}{cc} 0 & 0 \\ 0 & \epsilon_y \\ \end{array} } \right), \left( {\begin{array}{cc} \epsilon_G & 0 \\ 0 & -\epsilon_G \\ \end{array} } \right). \end{align} Specifically, we first deform the system according to the given strain, simultaneously controlling the density. Then we obtain the equilibrium density profile of the deformed system adjusting the vacancy concentration equivalently to the undeformed system as described in Sec.~\ref{sec:DFT}. We note that two types of deformation are enough to determine the two unknown $K$ and $G$. Examining four types of deformation, we verify the consistency of the theory. However, the deformations involving a volume difference seem to involve inelastic changes: shifts of the vacancy concentration in the equilibrium step indeed imply changes in the number of particles in the unit-cell, which would not occur during genuinely elastic deformations. To minimize such inelastic contributions, we utilize the identity, $p \equiv -\Omega_0 /V_{\rm cell}$, which does not involve any deformations, instead of directly computing the bulk modulus $K$ from the definition of Eq.~\eqref{eq:def_modulus}. Specifically, the elastic constants are calculated from \begin{align} \label{eq:K_comp} K&=-\frac{\{p(\epsilon_K)-p(0)\}+\{p(0)-p(-\epsilon_K)\}}{4\epsilon_K}, \\ \label{eq:G_comp} G&=\frac{f(\epsilon_G)+f(-\epsilon_G)-2f(0)}{4{\epsilon_G}^2}. \end{align} Here the pressure of the deformed systems is computed from $p(\epsilon_K) = -\Omega_0 (\epsilon_K)/V_{\rm cell}(\epsilon_K)$, where $\Omega_0 (\epsilon_K)$ is the grand potential at which the density functional is minimized under the given constraint due to a deformation. We note that, in every case, the differences in the values of $K$ are less than 10\%, compared to the results for $K$ that we have calculated in analogy to Eq.~\eqref{eq:G_comp} via the changes in $f$, instead of using the pressure as in Eq.~\eqref{eq:K_comp}. Before we proceed, let us make a few technical remarks. First, the minimization of the functionals at $n_{\rm vac}=0$ requires large computational resources since a small value of the Picard iteration parameter $\alpha$ in Eq.~\eqref{eq_PI} should be used to guarantee the convergence of the minimization. To calculate the equilibrium density in a reasonable time scale, we chose to fix the iteration parameter at $\alpha=0.001$ in general, at the cost of loosening the strict condition to impose a zero vacancy concentration. Values of $\alpha$ smaller than that are only used for a few cases in which the minimization eventually fails. With this constraint, we could minimize the free-energy functional up to $n_{\rm vac}= 0.0006 \pm 1.0\times 10^{-6}$ for $\eta=0.3$ and $0.5$, and $n_{\rm vac}=0.015\pm 1.0\times 10^{-6}$ for $\eta=0.8$. Secondly, as is well known, the numerical treatment of long-range interactions with periodic boundary condition would require sophisticated techniques~\cite{Allen1989, Arnold2005}. In our unit-cell DFT calculations, we obviously neglect the Fourier components the wave lengths of which are longer than the unit-cell size. To bypass such complications and provide a fair comparison, with the MC simulations we simply cut the magnetic interaction beyond the nearest-neighbor interaction as an approximation~\cite{Goh2018, Ider2019}. \subsection{Results} \label{sec_res2} We explore the following sets of parameters: $(k_{\rm el}, \eta_0)=(100.0, 0.3), (100, 0.5), (100.0, 0.8)$, each for several magnetic moments $m\leq12$. Again, we note here that $\eta_0$ corresponds to the packing fraction of the reference system with $V=V_{\rm ref}$, conveying the information of the diameter of particles. The conventional packing fraction is denoted by $\eta \equiv N/V$ and is not a fixed variable as we consider the isobaric ensemble, see the paragraph below Eq.~\eqref{eq_Hst} for details. In Fig.~\ref{fig:result_low}, we first present the results at low packing fractions, i.e., $\eta_0 = 0.3$ and $0.5$, for which the conventional hard disk system is in the fluid phase~\cite{Mak2006,Roth2012,Engel2013}. Therefore, the crystallization in this low packing fraction regime is due to the elastic interaction. As one can see, the DFT and the real- and pseudo-spring MC simulations agree well with each other, except that the DFT overestimates the volumes, especially for large values of $m$. As we adopt the mean-field functionals, the repulsion effects from the energy seem to be exaggerated, leading to the increases in volumes. Overall, the volume $V$, the bulk modulus $K$, and the shear modulus $G$ increase as the magnetic moment $m$ increases. \begin{figure} \includegraphics[width=8.6cm]{fig4a} \includegraphics[width=8.6cm]{fig4b} \includegraphics[width=8.6cm]{fig4c} \caption{\label{fig:result_low} For systems of low packing fractions, i.e., $\eta_0 = 0.3$ and $0.5$, (a) the volume $V$, (b) the bulk modulus $K$, and (c) the shear modulus $G$ are presented as functions of the repulsive magnetic dipole moment $m$. Apparently, the DFT overestimates the volume $V$ for elevated values of $m$. Rough agreement among the theory and the MC simulations of both the real- and pseudo-spring systems are observed for the both elastic constants $K$ and $G$. Here, $\epsilon_K = 0.00025$, $\epsilon_G = 0.00025$ and $N=480$ have been used. } \end{figure} Meanwhile, results with $\eta_0 =0.8$ are shown in Fig.~\ref{fig:result_high}. In this case, we expect strong contributions from the steric forces. We first note that a significant deviation of the theory from the MC simulations in the bulk modulus is observed. Quantitatively, the bulk moduli obtained from the DFT are approximately one fourth of those obtained from the MC simulations. This is mostly due to the fact that we had to use a quite large value, i.e., $n_{\rm vac}\approx 0.015$, for the vacancy concentration because of the technical reasons related to the computational time and the consequent choice of the mixing parameter $\alpha$ (see Sec.~\ref{sec:impl}). Indeed, using the same vacancy concentration, we also obtain similar deviations in $K$ at $\eta_0=0.3$ or $0.5$. Moreover, a good agreement between the real- and pseudo-spring MC simulations is still observed, confirming that such a deviation of the DFT does not indicate a failure of our mapping. Surprisingly, the behavior of $G$ is still well predicted by the DFT. In contrast to $K$, computation of $G$ does not involve any changes in volume and consequently both the density and the vacancy concentration remain approximately constant during deformation. (In contrast to that, deformations involving volume changes are always accompanied by shifts of the vacancy concentration in equilibrium.) Therefore, we speculate that the vacancy concentration plays a much smaller role in the case of the volume-conserving deformation in DFT, for which $n_{\rm vac}$ remains basically the same. To conclude, except for the technical issue discussed, the DFT provides qualitatively correct trends even at high packing fractions. \begin{figure} \includegraphics[width=8.6cm]{fig5a} \includegraphics[width=8.6cm]{fig5b} \includegraphics[width=8.6cm]{fig5c} \caption{\label{fig:result_high} For systems with a high packing fraction, $\eta_0 = 0.8$, (a) the volume $V$, (b) the bulk modulus $K$, and (c) the shear modulus $G$ are presented as functions of the magnetic dipole moment $m$. As in Fig.~\ref{fig:result_low}, the DFT overestimates the volume $V$. For the elastic constants, the agreement among the theory, the real MC simulations and the pseudo MC simulations is again quite reasonable for the shear modulus $G$. However, the DFT significantly underestimates the bulk modulus $K$. The reason for this large deviation is very likely a rather large value of the vacancy concentration, $n_{\rm vac} =0.015$, see the main text for details. In the inset of (c), the values of the shear modulus are additionally presented as a function of the packing fraction $\eta \equiv N/V$. Here, $\epsilon_K = 0.00025$, $\epsilon_G = 0.00025$ and $N=120$ have been used. } \end{figure} Remarkably, we observe new response behaviors of $K$ and $G$ to an increase in $m$ for the high packing fraction, which are opposite to the low packing fraction. For $\eta_0 = 0.8$, $K$ and $G$ decrease as $m$ increases, while $V$ is still an increasing function of $m$. Emergence of such two different scenarios originates from the composite nature of the ferrogels: here, we are observing different types of crossovers from the hard-disk crystals (high packing fraction, small $m$) or the harmonic crystals (low packing fraction, small $m$) to the elastic-dipolar crystals (low packing fraction, large $m$). To understand the phenomena in detail, we first note that $V$ always increases as $m$ increases because the magnetic interaction is repulsive. Then for low packing fractions, the magnetic repulsion simply causes additional increases in the bulk and shear moduli on top of the harmonic crystals. In contrast to that, for high packing fractions, the steric force dominates the mechanical properties and the system is very stiff (hard-disk crystals) with relatively large $K$ and $G$ for small values of $m$. As the magnetic moment $m$ and consequently the volume $V$ increase, the packing fraction $\eta$ decreases and the contributions from the steric repulsion become insignificant compared to the dipolar repulsion at some values of the packing fraction around $0.6 \lesssim \eta \lesssim 0.7$ [see, e.g., the inset of Fig.~\ref{fig:result_high}(c)]. These values are slightly smaller than the fluid-crystal coexisting packing fractions of the hard-disk systems, which are in between 0.68 and 0.73~\cite{Bernard2011, Engel2013, Roth2012, Thorneywork2017, Lin2018}. Once $\eta$ has decreased enough, as in the low packing fraction regime, the signature of the elastic-dipolar crystals should be recovered, the elastic properties of which are governed by the combination of the spring and dipolar interactions. Indeed for $m=10$, the volume and the elastic constants (compare Figs.~\ref{fig:result_low} and~\ref{fig:result_high}) are quantitatively similar among $\eta_0$=0.3, 0.5, and 0.8. As expected, the values of $K$ and $G$ for $\eta_0=0.8$ increase in the regime of large magnetic moments, i.e., for $m \gtrsim 8$ where $\eta < 0.70$ ($\eta \approx$ 0.68 and 0.60 for $m=$5 and 8, respectively). \section{Summary and outlook} \label{sec:conclusion} In this study, we have formulated a DFT for a two-dimensional ferrogel model. We have replaced the labeled particles in a state of strictly permanently connected neighboring particles by the unlabeled particles in a fluid-like state to map the elastic part of the associated energy onto a pseudo-potential invariant under permutations. In particular, we have shown that the mapping provides a plausible approximation for the considered systems and their response to magnetic interactions, even though the mapping still leads to some deviations in the calculated response of the systems. These deviations have been minimized by fine-tuning the mapping parameters. Lastly, it has been demonstrated that the elastic properties of ferrogels can be successfully investigated in this framework and two scenarios have been identified for the response mechanism of the dipole-spring system, depending on the packing fraction. Notably, our DFT approach may also provide a clue for the scale-bridging~\cite{Menzel2014} between mesoscopic dipole-spring models and the macroscopic description of ferrogels~\cite{Jarkova2003,Bohlius2004}. A strong feature of the presented DFT approach relying on the pseudo-spring model in two dimensions is that it works well independently of the density. First of all, the mapping is generally well justified even in the low-density regime. Moreover, the mean-field DFT, which we employed here, provides a good approximation even in the high-density regime where we found good agreement between our theoretical treatment and MC simulations, especially for the shear modulus. In contrast to that, for a one-dimensional model, the prediction of unphysical freezing within mean-field DFT restricts the parameter space to intermediate densities and weaker elasticities~\cite{Cremer2017}, where the pseudo-spring approximation itself is less accurate due to large possible gaps between neighboring particles. We stress that, in general, mean-field approximations become more accurate with increasing dimensionality. Besides, true long-range order generally exists in three dimensions. Therefore, we expect our DFT approach to work even better in three dimensions. Regarding possible experiments to test our theoretically predicted scenarios, the high packing fraction of $\eta_0 = 0.8$ needs to be discussed. As mentioned in Ref.~\cite{Huang2016}, for certain systems there exists a region of increased mechanical stiffness around the magnetic particles. Effectively, this could be equivalent to an increased size of the particles. In addition, we note that in three dimensions, the fluid-crystal coexisting densities are reported as low as $\eta \approx 0.5$~\cite{Hoover1968, Roth2002}. Indeed, volume concentrations of approximately 50\% have already been reported in three-dimensional samples of ferrogels~\cite{Liu2008}, implying that the steric force should be taken into account explicitly. Therefore, decreasing elastic moduli in response to an increase of the magnitude of the magnetic moments might be observed in real three-dimensional systems. Furthermore, the pressure value employed in this study seems to be small: for instance, $p = k_B T/a^2 \sim 2\times 10^{-7}$\,Pa for the system studied in Ref.~\cite{Messing2011} with $a\approx 150$\, nm at room temperature. Therefore, for direct comparison with experiments, a broad range of pressures should be explored in future studies with more realistic settings. There are several remaining issues. First, aligning the magnetic dipoles within the two-dimensional plane and the resulting in-plane anisotropy will give rise to additional phenomena not observed here for perpendicular dipoles. To list a few, the aspect ratio of width to height of the unit cell will vary depending on the in-plane orientation of the magnetic dipole moments, the volume may change and the magnetic particles could touch each other, leading to an abrupt change in elastic moduli even at a relatively low packing fraction~\cite{Annunziata2013}. As mentioned above, an extension to three dimensions would even strengthen the correspondence to real materials. Additionally, the mean-field functionals can be replaced by more sophisticated ones, especially to consider the long-range nature of the magnetic interaction~\cite{vanTeeffelen2008}. Apart from that, the response dynamics to external magnetic fields represents another topic of interest. Dynamical DFT~\cite{Marconi1999, Archer2004, Stopper2018} should be the obvious candidate to study these phenomena in the present context. \section*{Acknowledgments} We thank Urs Zimmermann and Christian Hoell for providing numerical codes that were useful for the initiation of this study. We also thank Shang-Chun Lin, Martin Oettel, Benno Liebchen, Mehrdad Golkia, Saswati Ganguly, and Robert Evans for helpful discussions. This work was supported by funding from the Alexander von Humboldt-Stiftung (S.G.) and from the Deutsche Forschungsgemeinschaft through the SPP 1681, grant nos. ME 3571/3 (A.M.M) and LO 418/16 (H.L.).
{ "redpajama_set_name": "RedPajamaArXiv" }
4,448
Q: Access Dynamic Pass-through Query I've been looking for a bit, but haven't found a good answer. I've found that I could generate the code in a VBA procedure and run it from there, however, i'm working on someone else's design and don't want to change it up too much. In part of that pass-through query we have something like this: WHERE (((ID='380') I want the 380 to be code that will look at a UserForm combo box, write a query for my database returning a value WHERE column = combobox.value I hope that makes sense, let me know if anyone know's how to work with this. Thanks. A: You can set the combobox to put the selected value in a cell, and then link the query to the value of that cell via a parameter. Cheers - A: After looking around a lot and researching a bit it seemed that only a stored procedure on the server (Which I cannot do) or creating a string and passing that string to the pass-through query are my options. I'm going to pursue the string option since it seems like the best option with my constraints here. if anyone comes up with any idea's later on that would help, let me know. Thanks. A: One way would be to store a template query in a pass through query, say: select * from table where ID = [ID] Then use VBA to read the query, substitute the ID, write it out to another prepared query, and execute it. Something like this: Sub zzz() Dim SQL As String With CurrentDb SQL = .QueryDefs("PassThruQueryTemplate").SQL .QueryDefs("PassThruQuery").SQL = Replace(SQL, "[ID]", "380") .Execute "PassThruQuery" End With End Sub
{ "redpajama_set_name": "RedPajamaStackExchange" }
286
Filemon R. Balbastro Jr. (November 13, 1940 – February 2, 2008), better known as Billy Balbastro, was a Filipino lawyer and entertainment columnist. Early life Balbastro was born in Oton, Iloilo. By the age of fifteen, Balbastro was covering the entertainment beat for the Philippine Free Press and the Kislap-Graphic. He graduated from the University of the Philippines College of Law in 1962, and worked for ten years in the Commission of Elections (COMELEC) with stints in Vigan and Romblon. His columns appeared in the Abante and Abante Tonight tabloids. From 1982 to 1985, Balbastro was the president of the Philippine Movie Press Club. Balbastro hosted a showbusiness-themed radio program, Showtime with Billy, which aired over DZMM. He also was a member of the Committee for Cinema of the National Commission for Culture and the Arts. Filmography Radio Showtime With Billy (2000–2008; DZMM) TV Show Movie Magazine (1985–1992); GMA Network) Two for the Road (1979–1980; GMA Network) Troika Tonite (1993–1995; ABC 5) Movies Sinong Pipigil sa Pagpatak ng Ulan? (1979; publicity and promotions) High School Circa '65 (1979; publicity staff) Kaaway ng Batas (1990; public relations officer) Lt. Madarang: Iginuhit sa Dugo (1993; publicity and promotions) Lab Kita ... Bilib Ka Ba? (1994; publicity and promotions) Costales (1995; publicist) Tapatan ng Tapang (1997; publicity and promotions) Tampisaw (2002; promotion and publicity) Takaw-tingin (2004; promotion and publicity) Anak ni Brocka (2005) Kaleldo (2006; publicity and promotions) Twilight Dancers - Mr. Big Judge (2006) Troika (2007; publicist) Death Billy Balbastro died from lung cancer on February 2, 2008, in Parañaque. Notes 20th-century Filipino lawyers Filipino radio journalists Filipino LGBT writers LGBT lawyers People from Iloilo University of the Philippines alumni 1940 births 2008 deaths Deaths from lung cancer in the Philippines Visayan people 20th-century Filipino LGBT people 21st-century Filipino LGBT people
{ "redpajama_set_name": "RedPajamaWikipedia" }
3,921
\section{Introduction} The recent development of deep learning technologies has achieved successes in many perceptual visual tasks such as object recognition, image classification and pose estimation ~\cite{karpathy2014large,krizhevsky2012imagenet,lin2014microsoft,ILSVRC15,simonyan2014very,taigman2014deepface,toshev2014deeppose}. Yet the status quo of computer vision is still far from matching human capabilities, especially when it comes to understanding an image in all its details. Recently, visual question answering (QA) has been proposed as a proxy task for evaluating a vision system's capacity for deeper image understanding. Several QA datasets~\cite{antol2015vqa,gao2015you,malinowski2014multi,ren2015image,VisualMadlibs} have been released since last year. They contributed valuable data for training visual QA systems and introduced various tasks, from picking correct multiple-choice answers~\cite{antol2015vqa} to filling in blanks~\cite{VisualMadlibs}. \begin{figure}[t!] \begin{center} \includegraphics[width=1.\linewidth]{pull_figure.pdf} \vspace{-7mm} \caption{Deep image understanding relies on detailed knowledge about different image parts. We employ diverse questions to acquire detailed information on images, ground objects mentioned in text with their visual appearances, and provide a multiple-choice setting for evaluating a visual question answering task with both textual and visual answers.} \label{fig:pull} \vspace{-7mm} \end{center} \end{figure} Pioneer work in image captioning~\cite{chen2015cvpr,Donahue_2015_CVPR,karpathy2015cvpr,Vinyals_2015_CVPR,xu2015icml}, sentence-based image retrieval~\cite{karpathy2014deep,socher2014grounded} and visual QA~\cite{antol2015vqa,gao2015you,ren2015image} shows promising results. These works aimed at establishing a global association between sentences and images. However, as Flickr30K~\cite{plummer2015flickr30k,young2014image} and Visual Madlibs~\cite{VisualMadlibs} suggest, a tighter semantic link between textual descriptions and corresponding visual regions is a key ingredient for better models. As Fig.~\ref{fig:pull} shows, the localization of objects can be a critical step to understand images better and solve image-related questions. Providing these image-text correspondences is called \emph{grounding}. Inspired by Geman et al.'s prototype of a visual Turing test based on image regions~\cite{geman2015visual} and the comprehensive data collection of QA pairs on COCO images~\cite{lin2014microsoft} such as VQA~\cite{antol2015vqa} and Baidu~\cite{gao2015you}, we fuse visual QA and grounding in order to create a new QA dataset with dense annotations and a more flexible evaluation environment. Object-level grounding provides a stronger link between QA pairs and images than global image-level associations. Furthermore, it allows us to resolve coreference ambiguity~\cite{kong2014you,ramanathan2014linking} and to understand object distributions in QA, and enables visually grounded answers that consist of object bounding boxes. Motivated by the goal of developing a model for visual QA based on grounded regions, our paper introduces a dataset that extends previous approaches~\cite{antol2015vqa,gao2015you,ren2015image} and proposes an attention-based model to perform this task. We collected 327,939 QA pairs on 47,300 COCO images~\cite{lin2014microsoft}, together with 1,311,756 human-generated multiple-choices and 561,459 object groundings from 36,579 categories. Our data collection was inspired by the age-old idea of the \emph{W} questions in journalism to describe a complete story~\cite{kuhn2013political}. The \emph{7W} questions roughly correspond to an array of standard vision tasks: \emph{what}~\cite{girshick2014rich,karpathy2014large,simonyan2014very}, \emph{where}~\cite{linlearning,zhou2014learning}, \emph{when}~\cite{palermo2012dating,pickup2014seeing}, \emph{who}~\cite{ramanathan2014linking,taigman2014deepface}, \emph{why}~\cite{pirsiavash2014inferring}, \emph{how}~\cite{lampert2009learning,patterson2012sun} and \emph{which}~\cite{kazemzadeh2014emnlp,kiapour2015iccv}. The Visual7W dataset features richer questions and longer answers than VQA~\cite{antol2015vqa}. In addition, we provide complete grounding annotations that link the object mentions in the QA sentences to their bounding boxes in the images and therefore introduce a new QA type with image regions as the visually grounded answers. We refer to questions with textual answers as \emph{telling} questions (\emph{what}, \emph{where}, \emph{when}, \emph{who}, \emph{why} and \emph{how}) and to such with visual answers as \emph{pointing} questions (\emph{which}). We provide a detailed comparison and data analysis in Sec.~\ref{sec:data_analysis}. A salient property of our dataset is the notable gap between human performance (96.6\%) and state-of-the-art LSTM models~\cite{malinowski2015ask} (52.1\%) on the visual QA tasks. We add a new spatial attention mechanism to an LSTM architecture for tackling the visually grounded QA tasks with both textual and visual answers (see Sec.~\ref{sec:attention_model}). The model aims to capture the intuition that answers to image-related questions usually correspond with specific image regions. It learns to attend to the pertinent regions as it reads the question tokens in a sequence. We achieve state-of-the-art performance with 55.6\%, and find correlations between the model's attention heat maps and the object groundings (see Sec.~\ref{sec:exp}). Due to the large performance gap between human and machine, we envision our dataset and visually grounded QA tasks to contribute to a long-term joint effort from several communities such as vision, natural language processing and knowledge to close the gap together. The Visual7W dataset constitutes a part of the Visual Genome project~\cite{krishnavisualgenome}. Visual Genome contains 1.7 million QA pairs of the 7W question types, which offers the largest visual QA collection to date for training models. The QA pairs in Visual7W are a subset of the 1.7 million QA pairs from Visual Genome. Moreover, Visual7W includes extra annotations such as object groundings, multiple choices and human experiments, making it a clean and complete benchmark for evaluation and analysis. \section{Related Work} \noindent \textbf{Vision\,+\,Language.} There have been years of effort in connecting the visual and textual information for joint learning~\cite{barnard2003matching,kong2014you,pirsiavash2014inferring,ramanathan2014linking,socher2014grounded,zitnick2013learning}. Image and video captioning has become a popular task in the past year~\cite{chen2015cvpr,Donahue_2015_CVPR,karpathy2015cvpr,rohrbach2013translating,Vinyals_2015_CVPR,xu2015icml}. The goal is to generate text snippets to describe the images and regions instead of just predicting a few labels. Visual question answering is a natural extension to the captioning tasks, but is more interactive and has a stronger connection to real-world applications~\cite{bigham2010vizwiz}. \vspace{1mm} \noindent \textbf{Text-based question answering.} Question answering in NLP has been a well-established problem. Successful applications can be seen in voice assistants in mobile devices, search engines and game shows (e.g., IBM Waston). Traditional question answering system relies on an elaborate pipeline of models involving natural language parsing, knowledge base querying, and answer generation~\cite{ferrucci2010}. Recent neural network models attempt to learn end-to-end directly from questions and answers~\cite{iyyer2014emnlp,weston2015towards}. \vspace{1mm} \noindent \textbf{Visual question answering.} Geman et al.~\cite{geman2015visual} proposed a restricted visual Turing test to evaluate visual understanding. The DAQUAR dataset is the first toy-sized QA benchmark built upon indoor scene RGB-D images. Most of the other datasets~\cite{antol2015vqa,gao2015you,ren2015image,VisualMadlibs} collected QA pairs on Microsoft COCO images~\cite{lin2014microsoft}, either generated automatically by NLP tools~\cite{ren2015image} or written by human workers~\cite{antol2015vqa,gao2015you,VisualMadlibs}. Following these datasets, an array of models has been proposed to tackle the visual QA tasks. The proposed models range from probabilistic inference~\cite{malinowski2014multi,tu2014joint,zhu2014eccv} and recurrent neural networks~\cite{antol2015vqa,gao2015you,malinowski2015ask,ren2015image} to convolutional networks~\cite{ma2015cnnQA}. Previous visual QA datasets evaluate textual answers on images while omitting the links between the object mentions and their visual appearances. Inspired by Geman et al.~\cite{geman2015visual}, we establish the link by grounding objects in the images and perform experiments in the grounded QA setting. \section{Creating the Visual7W Dataset} \label{sec:7w_questions} We elaborate on the details of the data collection we conducted upon 47,300 images from COCO~\cite{lin2014microsoft} (a subset of images from Visual Genome~\cite{krishnavisualgenome}). We leverage the six W questions (\emph{what}, \emph{where}, \emph{when}, \emph{who}, \emph{why}, and \emph{how}) to systematically examine a model's capability for visual understanding, and append a 7th \emph{which} question category. This extends existing visual QA setups~\cite{antol2015vqa,gao2015you,ren2015image} to accommodate visual answers. We standardize the visual QA tasks with multi-modal answers in a multiple-choice format. Each question comes with four answer candidates, with one being the correct answer. In addition, we ground all the objects mentioned in the QA pairs to their corresponding bounding boxes in the images. The object-level groundings enable examining the object distributions and resolve the coreference ambiguity~\cite{kong2014you,ramanathan2014linking}. \begin{figure*}[t!] \includegraphics[width=1.\linewidth]{mc-7w-examples-figure.pdf} \vspace{-6mm} \caption{Examples of multiple-choice QA from the 7W question categories. The first row shows \emph{telling} questions where the green answer is the ground-truth, and the red ones are human-generated wrong answers. The \emph{what}, \emph{who} and \emph{how} questions often pertain to recognition tasks with spatial reasoning. The \emph{where}, \emph{when} and \emph{why} questions usually involve high-level common sense reasoning. The second row depicts \emph{pointing} (\emph{which}) questions where the yellow box is the correct answer and the red boxes are human-generated wrong answers. These four answers form a multiple-choice test for each question.} \label{fig:mc-qualitative-example} \vspace{-1mm} \end{figure*} \subsection{Collecting the 7W Questions} The data collection tasks are conducted on Amazon Mechanical Turk (AMT), an online crowdsourcing platform. The online workers are asked to write pairs of question and answer based on image content. We instruct the workers to be concise and unambiguous to avoid wordy or speculative questions. To obtain a clean set of high-quality QA pairs, we ask three AMT workers to label each pair as \emph{good} or \emph{bad} independently. The workers judge each pair by whether an average person is able to tell the answer when seeing the image. We accept the QA pairs with at least two positive votes. We notice varying acceptance rates between categories, ranging from 92\% for \emph{what} to 63\% for \emph{why}. The overall acceptance rate is 85.8\%. VQA~\cite{antol2015vqa} relied on both human workers and automatic methods to generate a pool of candidate answers. We find that human-generated answers produce the best quality; on the contrary, automatic methods are prone to introducing candidate answers paraphrasing the ground-truth answers. For the \emph{telling} questions, the human workers write three plausible answers to each question without seeing the image. To ensure the uniqueness of correct answers, we provide the ground-truth answers to the workers, and instruct them to write answers of different meanings. For the \emph{pointing} questions, the workers draw three bounding boxes of other objects in the image, ensuring that these boxes cannot be taken as the correct answer. We provide examples from the 7W categories in Fig.~\ref{fig:mc-qualitative-example}. \begin{figure}[thb] \begin{center} \includegraphics[width=1.\linewidth]{coref_ambiguity.pdf} \vspace{-6mm} \caption{Coreference ambiguity arises when an object mention has multiple correspondences in an image, and the textual context is insufficient to tell it apart. The answer to the left question can be either \emph{gray}, \emph{yellow} or \emph{black}, depending on which man is meant. In the right example, the generic phrase \emph{red bus} can refer to both buses in the image. Thus an algorithm might answer correctly even if referring to the wrong bus.} \label{fig:6w_qa_ambiguity} \vspace{-0.25 in} \end{center} \end{figure} \begin{table*}[t!] \centering \caption{Comparisons on Existing Visual Question Answering Datasets} \vspace{-3mm} \begin{scriptsize} \begin{tabular}{lrrccccccccc} \hline & \textbf{\# QA} & \textbf{\# Images} & \textbf{AvgQLen} & \textbf{AvgALen} & \textbf{LongAns} & \textbf{TopAns} & \textbf{HumanPerf} & \textbf{COCO} & \textbf{MC} & \textbf{Grounding} & \textbf{VisualAns} \\ \hline \hline \textbf{DAQUAR}~\cite{malinowski2014multi} & 12,468 & 1,447 & 11.5$\,\pm\,$2.4 & 1.2$\,\pm\,$0.5 & 3.4\% & 96.4\% & & & & & \\ \textbf{Visual Madlibs}~\cite{VisualMadlibs} & 56,468 & 9,688 & 4.9$\,\pm\,$2.4 & 2.8$\,\pm\,$2.0 & 47.4\% & 57.9\% & & & $\checkmark$ & & \\ \textbf{COCO-QA}~\cite{ren2015image} & 117,684 & 69,172 & 8.7$\,\pm\,$2.7 & 1.0$\,\pm\,$0 & 0.0\% & 100\% & & $\checkmark$ & & & \\ \textbf{Baidu~\cite{gao2015you}} & 316,193 & 316,193 & - & - & - & - & & $\checkmark$ & & & \\ \textbf{VQA}~\cite{antol2015vqa} & 614,163 & 204,721 & 6.2$\,\pm\,$2.0 & 1.1$\,\pm\,$0.4 & 3.8\% & 82.7\% & $\checkmark$ & $\checkmark$ & $\checkmark$ & & \\ \textbf{Visual7W (Ours)} & 327,939 & 47,300 & 6.9$\,\pm\,$2.4 & 2.0$\,\pm\,$1.4 & 27.6\% & 63.5\% & $\checkmark$ & $\checkmark$ & $\checkmark$ & $\checkmark$ & $\checkmark$\\ \hline \end{tabular} \label{table:comparisons_between_datasets} \end{scriptsize} \vspace{-1mm} \end{table*} \subsection{Collecting Object-level Groundings} \label{sec:grounding_canonicalization} We collect object-level groundings by linking the object mentions in the QA pairs to their bounding boxes in the images. We ask the AMT workers to extract the object mentions from the QA pairs and draw boxes on the images. We collect additional groundings for the multiple choices of the \emph{pointing} questions. Duplicate boxes are removed based on the object names with an Intersection-over-Union threshold of 0.5. In total, we have collected 561,459 object bounding boxes, on average 12 boxes per image. The benefits of object-level groundings are three-fold: 1) it resolves the coreference ambiguity problem between QA sentences and images; 2) it extends the existing visual QA setups to accommodate visual answers; and 3) it offers a means to understand the distribution of objects, shedding light on the essential knowledge to be acquired for tackling the QA tasks (see Sec.~\ref{sec:data_analysis}). We illustrate examples of coreference ambiguity in Fig.~\ref{fig:6w_qa_ambiguity}. Ambiguity might cause a question to have more than one plausible answers at test time, thus complicating evaluation. Our online study shows that, such ambiguity occurs in 1\% of the accepted questions and 7\% of the accepted answers. This illustrates a drawback of existing visual QA setups~\cite{antol2015vqa,gao2015you,malinowski2014multi,ren2015image,VisualMadlibs}, where in the absence of object-level groundings the textual questions and answers are only loosely coupled to the images. \section{Comparison and Analysis} \label{sec:data_analysis} In this section, we analyze our Visual7W dataset collected on COCO images (cf. Table~\ref{table:comparisons_between_datasets}, \emph{COCO}), present its key features, and provide comparisons of our dataset with previous work. We summarize important metrics of existing visual QA datasets in Table~\ref{table:comparisons_between_datasets}.\footnote{We report the statistics of VQA dataset~\cite{antol2015vqa} with its real images and Visual Madlibs~\cite{VisualMadlibs} with its filtered hard tasks. The fill-in-the-blank tasks in Visual Madlibs~\cite{VisualMadlibs}, where the answers are sentence fragments, differ from other QA tasks, resulting in distinct statistics. We omit some statistics for Baidu~\cite{gao2015you} due to its partial release.} \vspace{1mm} \noindent \textbf{Advantages of Grounding}\quad The unique feature of our Visual7W dataset is the grounding annotations of all textually mentioned objects (cf. Table~\ref{table:comparisons_between_datasets}, \emph{Grounding}). In total we have collected 561,459 object groundings, which enables the new type of visual answers in the form of bounding boxes (cf. Table~\ref{table:comparisons_between_datasets}, \emph{VisualAns}). Examining the object distribution in the QA pairs sheds light on the focus of the questions and the essential knowledge to be acquired for answering them. Our object groundings spread across 36,579 categories (distinct object names), thereby exhibiting a long tail pattern where 85\% of the categories have fewer than 5 instances (see Fig.~\ref{fig:object_grounding_distributions}). The open-vocabulary annotations of objects, in contrast with traditional image datasets focusing on predefined categories and salient objects~\cite{lin2014microsoft,ILSVRC15}, provide a broad coverage of objects in the images. \begin{figure}[t] \begin{center} \includegraphics[width=1.\linewidth]{object_grounding_distributions.pdf} \vspace{-7mm} \caption{Object distribution in \emph{telling} and \emph{pointing} QA. The rank of an object category is based on its frequency with rank \#1 referring to the most frequent one. The \emph{pointing} QA pairs cover an order of magnitude more objects than the \emph{telling} QA pairs. The top 20 object categories indicate that the object distribution's bias towards persons, daily-life objects and natural entities.} \label{fig:object_grounding_distributions} \vspace{-3mm} \end{center} \end{figure} \vspace{1mm} \noindent \textbf{Human-Machine Performance Gap}\quad We expect that a good QA benchmark should exhibit a sufficient performance gap between humans and state-of-the-art models, leaving room for future research to explore. Additionally a nearly perfect human performance is desired to certify the quality of its questions. On Visual7W, we conducted two experiments to measure human performance (cf. Table~\ref{table:comparisons_between_datasets}, \emph{HumanPerf}), as well as examining the percentage of questions that can be answered without images. Our results show both strong human performance and a strong interdependency between images and QA pairs. We provide the detailed analysis and comparisons with the state-of-the-art automatic models in Sec.~\ref{sec:exp}. \begin{table}[htb] \caption{Model and Human Performances on QA Datasets} \vspace{-6mm} \begin{footnotesize} \begin{center} \begin{tabular}{|lccc|} \hline & \textbf{Model} & \textbf{Human} & $\Delta$\\ \hline \hline \textbf{VQA} (open-ended)~\cite{antol2015vqa} & 0.54 & 0.83 & 0.29\\ \textbf{VQA} (multiple-choice)~\cite{antol2015vqa} & 0.57 & 0.92 & 0.35\\ \textbf{Facebook bAbI}~\cite{weston2015towards} & 0.92 & $\sim$1.0 & 0.08\\ \textbf{Ours} (\emph{telling} QA) & 0.54 & 0.96 & \textbf{0.42}\\ \textbf{Ours} (\emph{pointing} QA) & 0.56 & 0.97 & 0.41\\ \hline \end{tabular} \end{center} \end{footnotesize} \label{tbl:hum-com-gap} \vspace{-0.17 in} \end{table}% Table~\ref{tbl:hum-com-gap} compares Visual7W with VQA~\cite{antol2015vqa} and Facebook bAbI~\cite{weston2015towards}, which have reported model and human performances. Facebook bAbI~\cite{weston2015towards} is a textual QA dataset claiming that humans can potentially achieve 100\% accuracy yet without explicit experimental proof. For VQA~\cite{antol2015vqa}, numbers are reported for both multiple-choice and open-ended evaluation setups. Visual7W features the largest performance gap ($\Delta$), a desirable property for a challenging and long-lasting evaluation task. At the same time, the nearly perfect human performance proves high quality of the 7W questions. \vspace{1mm} \noindent \textbf{QA Diversity}\quad The diversity of QA pairs is an important feature of a good QA dataset as it reflects a broad coverage of image details, introduces complexity and potentially requires a broad range of skills for solving the questions. To obtain diverse QA pairs, we decided to rule out binary questions, contrasting Geman et al.'s proposal~\cite{geman2015visual} and VQA's approach~\cite{antol2015vqa}. We hypothesize that this encourages workers to write more complex questions and also prevents inflating answer baselines with simple yes/no answers. When examining the richness of QA pairs, the length of questions and answers (cf. Table~\ref{table:comparisons_between_datasets}, \emph{AvgQLen}, \emph{AvgALen}) is a rough indicator for the amount of information and complexity they contain. The overall average question and answer lengths are 6.9 and 2.0 words respectively. The \emph{pointing} questions have the longest average question length. The \emph{telling} questions exhibit a long-tail distribution where 51.2\%, 21.2\%, and 16.6\% of their answers have one, two or three words respectively. Many answers to \emph{where} and \emph{why} questions are phrases and sentences, with an average of 3 words. In general, our dataset features long answers where 27.6\% of the questions have answers of more than two words (cf. Table~\ref{table:comparisons_between_datasets}, \emph{LongAns}). In contrast, 89\% of answers in VQA~\cite{antol2015vqa}, 90\% of answers in DAQUAR~\cite{malinowski2014multi} and all answers in COCO-QA~\cite{ren2015image} are a single word. We also capture more long-tail answers as our 1,000 most frequent answers only account for 63.5\% of all our answers (cf. Table~\ref{table:comparisons_between_datasets}, \emph{TopAns}). Finally we provide human created multiple-choices for evaluation (cf. Table~\ref{table:comparisons_between_datasets}, \emph{MC}). \section{Attention-based Model for Grounded QA} \label{sec:attention_model} The visual QA tasks are visually grounded, as local image regions are pertinent to answering questions in many cases. For instance, in the first \emph{pointing} QA example of Fig.~\ref{fig:mc-qualitative-example} the regions of the window and the pillows reveal the answer, while other regions are irrelevant to the question. We capture this intuition by introducing a spatial attention mechanism similar to the model for image captioning~\cite{xu2015icml}. \begin{figure}[t] \centering \includegraphics[width=.8\linewidth]{LSTM-Att.pdf} \vspace{-2mm} \caption{Diagram of the recurrent neural network model for \emph{pointing} QA. At the encoding stage, the model reads the image and the question tokens word by word. At each word, it computes attention terms based on the previous hidden state and the convolutional feature map, deciding which regions to focus on. At the decoding stage, it computes the log-likelihood of an answer by a dot product between its transformed visual feature (fc7) and the last LSTM hidden state.} \label{fig:visual6w-lstm-diagram} \vspace{-2mm} \end{figure} \subsection{Recurrent QA Models with Spatial Attention} LSTM models~\cite{hochreiter1997long} have achieved state-of-the-art results in several sequence processing tasks~\cite{Donahue_2015_CVPR,karpathy2015cvpr,sutskever2014sequence}. They have also been used to tackle visual QA tasks~\cite{antol2015vqa,gao2015you,malinowski2015ask}. These models represent images by their global features, lacking a mechanism to understand local image regions. We add spatial attention~\cite{gregor2015draw,xu2015icml} to the standard LSTM model for visual QA, illustrated in Fig.~\ref{fig:visual6w-lstm-diagram}. We consider QA as a two-stage process~\cite{gao2015you,malinowski2015ask}. At the encoding stage, the model memorizes the image and the question into a hidden state vector (the gray box in Fig.~\ref{fig:visual6w-lstm-diagram}). At the decoding stage, the model selects an answer from the multiple choices based on its memory (the \emph{softmax} layer in Fig.~\ref{fig:visual6w-lstm-diagram}). We use the same encoder structure for all visual QA tasks but different decoders for the \emph{telling} and \emph{pointing} QA tasks. Given an image $I$ and a question $Q=(q_1, q_2, \ldots, q_m)$, we learn the embeddings of the image and the word tokens as follow: \begin{eqnarray} \small v_0 & = & W_i[F(I)] + b_i\\ v_i & = & W_w[OH(t_i)], i = 1, \ldots, m \end{eqnarray} where $F(\cdot)$ transforms an image $I$ from pixel space to a 4096-dimensional feature representation. We extract the activations from the last fully connected layer (fc7) of a pre-trained CNN model VGG-16~\cite{simonyan2014very}. $OH(\cdot)$ transforms a word token to its one-hot representation, an indicator column vector where there is a single one at the index of the token in the word vocabulary. The $W_i$ matrix transforms the 4096-dimensional image features into the $d_i$-dimensional embedding space $v_0$, and the $W_w$ transforms the one-hot vectors into the $d_w$-dimensional embedding space $v_i$. We set $d_i$ and $d_w$ to the same value of 512. Thus, we take the image as the first input token. These embedding vectors $v_{0, 1, \ldots, m}$ are fed into the LSTM model one by one. The update rules of our LSTM model can be defined as follow: \begin{eqnarray} \small \mathbf{i}_t & = & \sigma(W_{vi}v_t + W_{hi}\mathbf{h}_{t-1}+W_{ri}\mathbf{r}_{t}+b_i) \\ \mathbf{f}_t & = & \sigma(W_{vf}v_t + W_{hf}\mathbf{h}_{t-1}+W_{rf}\mathbf{r}_{t}+b_f) \\ \mathbf{o}_t & = & \sigma(W_{vo}v_t + W_{ho}\mathbf{h}_{t-1}+W_{ro}\mathbf{r}_{t}+b_o) \\ \mathbf{g}_t & = & \phi(W_{vg}v_t + W_{hg}\mathbf{h}_{t-1}+W_{rg}\mathbf{r}_{t}+b_g) \\ \mathbf{c}_t & = & \mathbf{f}_t \odot \mathbf{c}_{t-1} + \mathbf{i}_t\odot \mathbf{g}_t\\ \mathbf{h}_t & = & \mathbf{o}_t \odot \phi (\mathbf{c}_t) \end{eqnarray} where $\sigma(\cdot)$ is the sigmoid function, $\phi(\cdot)$ is the tanh function, and $\odot$ is the element-wise multiplication operator. The attention mechanism is introduced by the term $\mathbf{r}_{t}$, which is a weighted average of convolutional features that depends upon the previous hidden state and the convolutional features. The exact formulation is as follows: \begin{eqnarray} \small \mathbf{e}_{t} & = & w_a^T\tanh({W_{he}\mathbf{h}_{t-1} + W_{ce}C(I)})+b_a \\ \label{eq:attention_terms} \mathbf{a}_{t} & = & \text{softmax}(\mathbf{e}_{t})\\ \mathbf{r}_{t} & = & \mathbf{a}_t^TC(I) \end{eqnarray} where $C(I)$ returns the $14\times14$ 512-dimensional convolutional feature maps of image $I$ from the fourth convolutional layer from the same VGG-16 model~\cite{simonyan2014very}. The attention term $\mathbf{a}_{t}$ is a 196-dimensional unit vector, deciding the contribution of each convolutional feature at the $t$-th step. The standard LSTM model can be considered as a special case with each element in $\mathbf{a}_{t}$ set uniformly. $W_i$, $b_i$, $W_w$ and all the $W$s and $b$s in the LSTM model and attention terms are learnable parameters. \subsection{Learning and Inference} The model first reads the image $v_0$ and all the question tokens $v_{q_1}$, $v_{q_2}, \ldots, v_{q_m}$ until reaching the question mark (i.e., end token of the question sequence). When training for \emph{telling} QA, we continue to feed the ground-truth answer tokens $v_{a_1}$, $v_{a_2}, \ldots, v_{a_n}$ into the model. For \emph{pointing} QA, we compute the log-likelihood of an candidate region by a dot product between its transformed visual feature (fc7) and the last LSTM hidden state (see Fig.~\ref{fig:visual6w-lstm-diagram}). We use cross-entropy loss to train the model parameters with backpropagation. During testing, we select the candidate answer with the largest log-likelihood. We set the hyperparameters using the validation set. The dimensions of the LSTM gates and memory cells are 512 in all the experiments. The model is trained with Adam update rule~\cite{kingma2014adam}, mini-batch size 128, and a global learning rate of $10^{-4}$. \section{Experiments} \label{sec:exp} We evaluate the human and model performances on the QA tasks. We report a reasonably challenging performance delta leaving sufficient room for future research to explore. \begin{table*}[t!] \begin{center} \caption{Human and model performances in the multiple-choice 7W QA tasks (in accuracy)} \vspace{-3mm} \begin{small} \begin{tabular}{|l|c|c|c|c|c|c|c|c|} \hline \multirow{ 2}{*}{\textbf{Method}} & \multicolumn{6}{|c|}{\textbf{Telling}} & \textbf{Pointing} & \multirow{2}{*}{\textbf{Overall}}\\ \cline{2-8} & \multicolumn{1}{c|}{\textbf{What}} & \multicolumn{1}{c|}{\textbf{Where}} & \multicolumn{1}{c|}{\textbf{When}} & \multicolumn{1}{c|}{\textbf{Who}} & \multicolumn{1}{c|}{\textbf{Why}} & \textbf{How} & \textbf{Which} &\\ \hline \hline Human (Question) & 0.356 & 0.322 & 0.393 & 0.342 & 0.439 & 0.337 & - & 0.353\\ Human (Question + Image) & 0.965 & 0.957 & 0.944 & 0.965 & 0.927 & 0.942 & 0.973 & 0.966\\ \hline \hline Logistic Regression (Question) & 0.420 & 0.375 & 0.666 & 0.510 & 0.354 & 0.458 & 0.354 & 0.383\\ Logistic Regression (Image) & 0.408 & 0.426 & 0.438 & 0.415 & 0.337 & 0.303 & 0.256 & 0.305\\ Logistic Regression (Question + Image) & 0.429 & 0.454 & 0.621 & 0.501 & 0.343 & 0.356 & 0.307 & 0.352\\ LSTM (Question) & 0.430 & 0.414 & 0.693 & 0.538 & 0.491 & 0.484 & - & 0.462\\ LSTM (Image) & 0.422 & 0.497 & 0.660 & 0.523 & 0.475 & 0.468 & 0.299 & 0.359\\ LSTM (Question + Image)~\cite{malinowski2015ask} & 0.489 & 0.544 & 0.713 & 0.581 & 0.513 & \textbf{0.503} & 0.521 & 0.521\\ Ours, LSTM-Att (Question + Image) & \textbf{0.515} & \textbf{0.570} & \textbf{0.750} & \textbf{0.595} & \textbf{0.555} & 0.498 & \textbf{0.561} & \textbf{0.556}\\ \hline \end{tabular} \end{small} \label{table:visual6w-baseline-performance-multiple-choices} \end{center} \vspace{-1mm} \end{table*}% \begin{figure}[t] \begin{center} \includegraphics[width=.85\linewidth]{boxplot_response_time.pdf} \vspace{-3mm} \caption{Response time of human subjects on the \emph{telling} QA tasks. The boxes go from the first quartile to the third quartile of the response time values. The bars in the centers of the boxes indicate the median response time of each category.} \label{fig:visual6w-human-response-time} \end{center} \vspace{-6mm} \end{figure} \subsection{Experiment Setups} \label{sec:exp-setup} As the 7W QA tasks have been formulated in a multiple-choice format, we use the same procedure to evaluate human and model performances. At test time, the input is an image and a natural language question, followed by four multiple choices. In \emph{telling} QA, the multiple choices are written in natural language; whereas, in \emph{pointing} QA, each multiple choice corresponds to an image region. We say the model is correct on a question if it picks the correct answer among the candidates. Accuracy is used to measure the performance. An alternative method to evaluate \emph{telling} QA is to let the model predict open-ended text outputs~\cite{antol2015vqa}. This approach works well on short answers; however, it performs poorly on long answers, where there are many ways of paraphrasing the same meaning. We make the training, validation and test splits, each with 50\%, 20\%, 30\% of the pairs respectively. The numbers are reported on the test set. \subsection{7W QA Experiments} \label{sec:visual6w-qa-exps} \subsubsection{Human Experiments on 7W QA} \label{sec:visual6w-human-exps} We evaluate human performances on the multiple-choice 7W QA. We want to measure in these experiments 1) how well humans can perform in the visual QA task and 2) whether humans can use common sense to answer questions without seeing the images. \begin{figure*}[t!] \begin{center} \includegraphics[width=.97\linewidth]{qual_results.pdf} \vspace{-2mm} \caption{Qualitative results of human subjects and the state-of-the-art model (LSTM-Att) on multiple-choice QAs. We illustrate the prediction results of six multiple-choice QAs, with and without images. The green answer corresponds to the correct answer to each question, and the rest three are wrong answer candidates. We take the majority votes of five human subjects as the human predictions (H) and the top predictions from the model (M). The correct predictions are indicated by check marks.} \label{fig:visual6w-qa-prediction-qualitative-results} \end{center} \vspace{-5mm} \end{figure*} We conduct two sets of human experiments. In the first experiment (Question), a group of five AMT workers are asked to guess the best possible answers from the multiple choices without seeing the images. In the second experiment (Question + Image), we have a different group of five workers to answer the same questions given the images. The first block in Table~\ref{table:visual6w-baseline-performance-multiple-choices} reports the human performances on these experiments. We measure the mean accuracy over the QA pairs where we take the majority votes among the five human responses. Even without the images, humans manage to guess the most plausible answers in some cases. Human subjects achieve 35.3\% accuracy, 10\% higher than chance. The human performance without images is remarkably high (43.9\%) for the \emph{why} questions, indicating that many \emph{why} questions encode a fair amount of common sense that humans are able to infer without visual cue. However, images are important in the majority of the questions. Human performance is significantly improved when the images are provided. Overall, humans achieve a high accuracy of 96.6\% on the 7W QA tasks. Fig.~\ref{fig:visual6w-human-response-time} shows the box plots of response time of the human subjects for \emph{telling} QA. Human subjects spend double the time to respond when the images are displayed. In addition, \emph{why} questions take a longer average response time compared to the other five question types. Human subjects spend an average of 9.3 seconds on \emph{pointing} questions. However, that experiment was conducted in a different user interface, where workers click on the answer boxes in the image. Thus, the response time is not comparable with the \emph{telling} QA tasks. Interestingly, longer response time does not imply higher performance. Human subjects spend more time on questions with lower accuracy. The Pearson correlation coefficient between the average response time and the average accuracy is $-0.135$, indicating a weak negative correlation between the response time and human accuracy. \subsubsection{Model Experiments on 7W QA} \label{sec:visual6w-qa-baseline-exps} Having examined human performance, our next question is how well the state-of-the-art models can perform in the 7W QA task. We evaluate automatic models on the 7W QA tasks in three sets of experiments: without images (Question), without questions (Image) and with images (Question + Image). In the experiments without images (questions), we zero out the image (questions) features. We briefly describe the three models we used in the experiments: \vspace{1mm} \noindent \textbf{Logistic Regression}\quad A logistic regression model that predicts the answer from a concatenation of image fc7 feature and question feature. The questions are represented by 200-dimensional averaged word embeddings from a pre-trained model~\cite{mikolov2013distributed}. For \emph{telling} QA, we take the top-5000 most frequent answers (79.2\% of the training set answers) as the class labels. At test time, we select the top-scoring answer candidate. For \emph{pointing} QA, we perform k-means to cluster training set regions by fc7 features into 5000 clusters, used as class labels. At test time, we select the answer candidate closest to the centroid of the predicted cluster. \vspace{1mm} \noindent \textbf{LSTM}\quad The LSTM model in Malinowski and Fritz~\cite{malinowski2015ask} for visual QA with no attention modeling, which can be considered as a simplified version of our full model with the attention terms set to be uniform. \vspace{1mm} \noindent \textbf{LSTM-Att}\quad Our LSTM model with spatial attention introduced in Sec.~\ref{sec:attention_model}, where the attention terms in Eq.~\eqref{eq:attention_terms} determines which region to focus on at each step. \vspace{1mm} We report the results in Table~\ref{table:visual6w-baseline-performance-multiple-choices}. All the baseline models surpass the chance performance (25\%). The logistic regression baseline yields the best performance when only the question features are provided. Having the global image features hurts its performance, indicating the importance of understanding local image regions rather than a holistic representation. Interestingly, the LSTM performance (46.2\%) significantly outperforms human performance (35.3\%) when the images are not present. Human subjects are not \emph{trained} before answering the questions; however, the LSTM model manages to learn the priors of answers from the training set. In addition, both the questions and image content contribute to better results. The Question + Image baseline shows large improvement on overall accuracy (52.1\%) than the ones when either the question or the image is absent. Finally, our attention-based LSTM model (LSTM-Att) outperforms other baselines on all question types, except the \emph{how} category, achieving the best model performance of 55.6\%. We show qualitative results of human experiments and the LSTM models on the \emph{telling} QA task in Fig.~\ref{fig:visual6w-qa-prediction-qualitative-results}. Human subjects fail to tell a sheep apart from a goat in the last example, whereas the LSTM model gives the correct answer. Yet humans successfully answer the fourth \emph{why} question when seeing the image, where the LSTM model fails in both cases. \begin{figure}[t] \begin{center} \includegraphics[width=0.95\linewidth]{attention_grounding_visualizations_small.pdf} \vspace{-1mm} \caption{Object groundings and attention heat maps. We visualize the attention heat maps (with Gaussian blur) on the images. The brighter regions indicate larger attention terms, i.e., where the model focuses. The bounding boxes show the object-level groundings of the objects mentioned in the answers.} \label{fig:attention-map-visualization} \end{center} \vspace{-5mm} \end{figure} The object groundings help us analyzing the behavior of the attention-based model. First, we examine where the model focuses by visualizing the attention terms of Eq.~\eqref{eq:attention_terms}. The attention terms vary as the model reads the QA words one by one. We perform max pooling along time to find the maximum attention weight on each of the 14$\times$14 image grid, producing an attention heat map. We see if the model attends to the mentioned objects. The answer object boxes occupy an average of 12\% of image area; while the peak of the attention heat map resides in answer object boxes 24\% of the time. That indicates a tendency for the model to attend to the answer-related regions. We visualize the attention heat maps on some example QA pairs in Fig.~\ref{fig:attention-map-visualization}. The top two examples show QA pairs with answers containing an object. The peaks of the attention heat maps reside in the bounding boxes of the target objects. The bottom two examples show QA pairs with answers containing no object. The attention heat maps are scattered around the image grid. For instance, the model attends to the four corners and the borders of the image to look for the carrots in Fig.~\ref{fig:attention-map-visualization}(c). \begin{figure}[t] \begin{center} \includegraphics[width=.75\linewidth]{grounding_freq_vs_acc.pdf} \vspace{-1mm} \caption{Impact of object category frequency on the model accuracy in the \emph{pointing} QA task. The $x$-axis shows the upper bound object category frequency of each bin. The $y$-axis shows the mean accuracy within each bin. The accuracy increases gradually as the model sees more instances from the same category. Meanwhile, the model manages to handle infrequent categories by transferring knowledge from larger categories.} \label{fig:object-frequncy-on-accuracy} \end{center} \vspace{-5mm} \end{figure} Furthermore, we use object groundings to examine the model's behavior on the \emph{pointing} QA. Fig.~\ref{fig:object-frequncy-on-accuracy} shows the impact of object category frequency on the QA accuracy. We divide the object categories into different bins based on their frequencies (by power of 2) in the training set. We compute the mean accuracy over the test set QA pairs within each bin. We observe increased accuracy for categories with more object instances. However, the model is able to transfer knowledge from common categories to rare ones, generating an adequate performance (over 50\%) on object categories with only a few instances. \section{Conclusions} In this paper, we propose to leverage the visually grounded 7W questions to facilitate a deeper understanding of images beyond recognizing objects. Previous visual QA works lack a tight semantic link between textual descriptions and image regions. We link the object mentions to their bounding boxes in the images. Object grounding allows us to resolve coreference ambiguity, understand object distributions, and evaluate on a new type of visually grounded QA. We propose an attention-based LSTM model to achieve the state-of-the-art performance on the QA tasks. Future research directions include exploring ways of utilizing common sense knowledge to improve the model's performance on QA tasks that require complex reasoning. \noindent \textbf{Acknowledgements} We would like to thank Carsten Rother from Dresden University of Technology for establishing the collaboration between the Computer Vision Lab Dresden and the Stanford Vision Lab which enabled Oliver Groth to visit Stanford to contribute to this work. We would also like to thank Olga Russakovsky, Lamberto Ballan, Justin Johnson and anonymous reviewers for useful comments. This research is partially supported by a Yahoo Labs Macro award, and an ONR MURI award. {\small \bibliographystyle{ieee}
{ "redpajama_set_name": "RedPajamaArXiv" }
7,447