Spaces:
Runtime error
Runtime error
Commit
·
cefab10
1
Parent(s):
87bdcd8
Upload 8 files
Browse files- datab.py +72 -0
- requirements.txt +170 -0
- storage/docstore.json +1 -0
- storage/index_store.json +1 -0
- storage/vector_store.json +0 -0
- storage1/docstore.json +1 -0
- storage1/index_store.json +1 -0
- storage1/vector_store.json +0 -0
datab.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# import databutton as db
|
| 2 |
+
import streamlit as st
|
| 3 |
+
from langchain.agents import Tool
|
| 4 |
+
from langchain.chains.conversation.memory import ConversationBufferMemory
|
| 5 |
+
from langchain.chat_models import ChatOpenAI
|
| 6 |
+
from langchain.agents import initialize_agent
|
| 7 |
+
from llama_index import StorageContext, load_index_from_storage
|
| 8 |
+
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader
|
| 9 |
+
import os
|
| 10 |
+
# user = db.user.get()
|
| 11 |
+
# name = user.name if user.name else "you"
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# user = db.user.get()
|
| 15 |
+
# name = user.name if user.name else "you"
|
| 16 |
+
|
| 17 |
+
st.title("🤖 Personalized Bot with Memory 🧠 ")
|
| 18 |
+
|
| 19 |
+
st.markdown(
|
| 20 |
+
"""
|
| 21 |
+
#### 🗨️ Chat with a bot with additional information 📜 with `Conversational Buffer Memory`
|
| 22 |
+
> *powered by [LangChain]('https://langchain.readthedocs.io/en/latest/modules/memory.html#memory') +
|
| 23 |
+
[OpenAI]('https://platform.openai.com/docs/models/gpt-3-5') + [DataButton](https://www.databutton.io/) + [LlamaIndex](https://gpt-index.readthedocs.io/en/stable/index.html)*
|
| 24 |
+
----
|
| 25 |
+
"""
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
option = st.selectbox(
|
| 29 |
+
'Which data do you want to use?',
|
| 30 |
+
('Finite-size effects of avalanche dynamics', 'A Review of ChatGPT Applications'))
|
| 31 |
+
|
| 32 |
+
st.write('You selected:', option)
|
| 33 |
+
os.environ["OPENAI_API_KEY"] = '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'
|
| 34 |
+
if option:
|
| 35 |
+
if option == 'Finite-size effects of avalanche dynamics':
|
| 36 |
+
storage_context = StorageContext.from_defaults(persist_dir="./storage1")
|
| 37 |
+
|
| 38 |
+
if option == 'A Review of ChatGPT Applications':
|
| 39 |
+
storage_context = StorageContext.from_defaults(persist_dir="./storage")
|
| 40 |
+
|
| 41 |
+
index = load_index_from_storage(storage_context)
|
| 42 |
+
tools = [
|
| 43 |
+
Tool(
|
| 44 |
+
name="GPT Index",
|
| 45 |
+
func=lambda q: str(index.as_query_engine().query(q)),
|
| 46 |
+
description="useful for when you want to answer questions about the author. The input to this tool should be a complete english sentence.",
|
| 47 |
+
return_direct=True
|
| 48 |
+
),
|
| 49 |
+
]
|
| 50 |
+
if "memory" not in st.session_state:
|
| 51 |
+
st.session_state.memory = ConversationBufferMemory(
|
| 52 |
+
memory_key="chat_history"
|
| 53 |
+
)
|
| 54 |
+
llm = ChatOpenAI(temperature=0)
|
| 55 |
+
agent_chain = initialize_agent(tools, llm, agent="conversational-react-description", memory=st.session_state.memory)
|
| 56 |
+
wtf = st.text_input(
|
| 57 |
+
"**What's on your mind?**",
|
| 58 |
+
placeholder="Ask me anything from {}"
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
if wtf:
|
| 62 |
+
with st.spinner(
|
| 63 |
+
"Generating Answer to your Query : `{}` ".format(wtf)
|
| 64 |
+
):
|
| 65 |
+
res = agent_chain.run(input=wtf)
|
| 66 |
+
st.info(res, icon="🤖")
|
| 67 |
+
with st.expander("History/Memory"):
|
| 68 |
+
st.session_state.memory
|
| 69 |
+
if st.button('forget the context.'):
|
| 70 |
+
st.session_state.memory = ConversationBufferMemory(
|
| 71 |
+
memory_key="chat_history"
|
| 72 |
+
)
|
requirements.txt
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
aiofiles==23.1.0
|
| 2 |
+
aiohttp==3.8.4
|
| 3 |
+
aiosignal==1.3.1
|
| 4 |
+
altair==4.2.2
|
| 5 |
+
anyio==3.6.2
|
| 6 |
+
argon2-cffi==21.3.0
|
| 7 |
+
argon2-cffi-bindings==21.2.0
|
| 8 |
+
arrow==1.2.3
|
| 9 |
+
asttokens==2.2.1
|
| 10 |
+
async-timeout==4.0.2
|
| 11 |
+
attrs==23.1.0
|
| 12 |
+
backcall==0.2.0
|
| 13 |
+
beautifulsoup4==4.12.2
|
| 14 |
+
bleach==6.0.0
|
| 15 |
+
blinker==1.6.2
|
| 16 |
+
cachetools==5.3.0
|
| 17 |
+
certifi==2022.12.7
|
| 18 |
+
cffi==1.15.1
|
| 19 |
+
charset-normalizer==3.1.0
|
| 20 |
+
click==8.1.3
|
| 21 |
+
comm==0.1.3
|
| 22 |
+
contourpy==1.0.7
|
| 23 |
+
cryptography==40.0.2
|
| 24 |
+
cycler==0.11.0
|
| 25 |
+
dataclasses-json==0.5.7
|
| 26 |
+
debugpy==1.6.7
|
| 27 |
+
decorator==5.1.1
|
| 28 |
+
defusedxml==0.7.1
|
| 29 |
+
entrypoints==0.4
|
| 30 |
+
executing==1.2.0
|
| 31 |
+
fastapi==0.95.1
|
| 32 |
+
fastjsonschema==2.16.3
|
| 33 |
+
ffmpy==0.3.0
|
| 34 |
+
filelock==3.12.0
|
| 35 |
+
fonttools==4.39.3
|
| 36 |
+
fqdn==1.5.1
|
| 37 |
+
frozenlist==1.3.3
|
| 38 |
+
fsspec==2023.5.0
|
| 39 |
+
gitdb==4.0.10
|
| 40 |
+
GitPython==3.1.31
|
| 41 |
+
gradio==3.28.3
|
| 42 |
+
gradio_client==0.2.0
|
| 43 |
+
greenlet==2.0.2
|
| 44 |
+
h11==0.14.0
|
| 45 |
+
httpcore==0.17.0
|
| 46 |
+
httpx==0.24.0
|
| 47 |
+
huggingface-hub==0.14.1
|
| 48 |
+
idna==3.4
|
| 49 |
+
importlib-metadata==6.6.0
|
| 50 |
+
ipykernel==6.23.0
|
| 51 |
+
ipython==8.13.2
|
| 52 |
+
ipython-genutils==0.2.0
|
| 53 |
+
ipywidgets==8.0.6
|
| 54 |
+
isoduration==20.11.0
|
| 55 |
+
jedi==0.18.2
|
| 56 |
+
Jinja2==3.1.2
|
| 57 |
+
jsonpointer==2.3
|
| 58 |
+
jsonschema==4.17.3
|
| 59 |
+
jupyter==1.0.0
|
| 60 |
+
jupyter-console==6.6.3
|
| 61 |
+
jupyter-events==0.6.3
|
| 62 |
+
jupyter_client==8.2.0
|
| 63 |
+
jupyter_core==5.3.0
|
| 64 |
+
jupyter_server==2.5.0
|
| 65 |
+
jupyter_server_terminals==0.4.4
|
| 66 |
+
jupyterlab-pygments==0.2.2
|
| 67 |
+
jupyterlab-widgets==3.0.7
|
| 68 |
+
kiwisolver==1.4.4
|
| 69 |
+
langchain==0.0.161
|
| 70 |
+
linkify-it-py==2.0.2
|
| 71 |
+
llama-index==0.6.2
|
| 72 |
+
markdown-it-py==2.2.0
|
| 73 |
+
MarkupSafe==2.1.2
|
| 74 |
+
marshmallow==3.19.0
|
| 75 |
+
marshmallow-enum==1.5.1
|
| 76 |
+
matplotlib==3.7.1
|
| 77 |
+
matplotlib-inline==0.1.6
|
| 78 |
+
mdit-py-plugins==0.3.3
|
| 79 |
+
mdurl==0.1.2
|
| 80 |
+
mistune==2.0.5
|
| 81 |
+
multidict==6.0.4
|
| 82 |
+
mypy-extensions==1.0.0
|
| 83 |
+
nbclassic==1.0.0
|
| 84 |
+
nbclient==0.7.4
|
| 85 |
+
nbconvert==7.4.0
|
| 86 |
+
nbformat==5.8.0
|
| 87 |
+
nest-asyncio==1.5.6
|
| 88 |
+
notebook==6.5.4
|
| 89 |
+
notebook_shim==0.2.3
|
| 90 |
+
numexpr==2.8.4
|
| 91 |
+
numpy==1.24.3
|
| 92 |
+
openai==0.27.6
|
| 93 |
+
openapi-schema-pydantic==1.2.4
|
| 94 |
+
orjson==3.8.12
|
| 95 |
+
packaging==23.1
|
| 96 |
+
pandas==1.5.3
|
| 97 |
+
pandocfilters==1.5.0
|
| 98 |
+
parso==0.8.3
|
| 99 |
+
pexpect==4.8.0
|
| 100 |
+
pickleshare==0.7.5
|
| 101 |
+
Pillow==9.5.0
|
| 102 |
+
platformdirs==3.5.0
|
| 103 |
+
prometheus-client==0.16.0
|
| 104 |
+
prompt-toolkit==3.0.38
|
| 105 |
+
protobuf==3.20.3
|
| 106 |
+
psutil==5.9.5
|
| 107 |
+
ptyprocess==0.7.0
|
| 108 |
+
pure-eval==0.2.2
|
| 109 |
+
pyarrow==12.0.0
|
| 110 |
+
pycparser==2.21
|
| 111 |
+
pydantic==1.10.7
|
| 112 |
+
pydeck==0.8.1b0
|
| 113 |
+
pydub==0.25.1
|
| 114 |
+
Pygments==2.15.1
|
| 115 |
+
PyJWT==2.6.0
|
| 116 |
+
Pympler==1.0.1
|
| 117 |
+
pyparsing==3.0.9
|
| 118 |
+
PyPDF2==3.0.1
|
| 119 |
+
pyrsistent==0.19.3
|
| 120 |
+
python-dateutil==2.8.2
|
| 121 |
+
python-json-logger==2.0.7
|
| 122 |
+
python-multipart==0.0.6
|
| 123 |
+
pytz==2023.3
|
| 124 |
+
pytz-deprecation-shim==0.1.0.post0
|
| 125 |
+
PyYAML==6.0
|
| 126 |
+
pyzmq==25.0.2
|
| 127 |
+
qtconsole==5.4.3
|
| 128 |
+
QtPy==2.3.1
|
| 129 |
+
regex==2023.5.5
|
| 130 |
+
requests==2.29.0
|
| 131 |
+
rfc3339-validator==0.1.4
|
| 132 |
+
rfc3986-validator==0.1.1
|
| 133 |
+
rich==13.3.5
|
| 134 |
+
semantic-version==2.10.0
|
| 135 |
+
Send2Trash==1.8.2
|
| 136 |
+
six==1.16.0
|
| 137 |
+
smmap==5.0.0
|
| 138 |
+
sniffio==1.3.0
|
| 139 |
+
soupsieve==2.4.1
|
| 140 |
+
SQLAlchemy==2.0.12
|
| 141 |
+
stack-data==0.6.2
|
| 142 |
+
starlette==0.26.1
|
| 143 |
+
streamlit==1.22.0
|
| 144 |
+
tenacity==8.2.2
|
| 145 |
+
terminado==0.17.1
|
| 146 |
+
tiktoken==0.4.0
|
| 147 |
+
tinycss2==1.2.1
|
| 148 |
+
toml==0.10.2
|
| 149 |
+
toolz==0.12.0
|
| 150 |
+
tornado==6.3.1
|
| 151 |
+
tqdm==4.65.0
|
| 152 |
+
traitlets==5.9.0
|
| 153 |
+
typing-inspect==0.8.0
|
| 154 |
+
typing_extensions==4.5.0
|
| 155 |
+
tzdata==2023.3
|
| 156 |
+
tzlocal==4.3
|
| 157 |
+
uc-micro-py==1.0.2
|
| 158 |
+
uri-template==1.2.0
|
| 159 |
+
urllib3==1.26.15
|
| 160 |
+
uvicorn==0.22.0
|
| 161 |
+
validators==0.20.0
|
| 162 |
+
watchdog==3.0.0
|
| 163 |
+
wcwidth==0.2.6
|
| 164 |
+
webcolors==1.13
|
| 165 |
+
webencodings==0.5.1
|
| 166 |
+
websocket-client==1.5.1
|
| 167 |
+
websockets==11.0.3
|
| 168 |
+
widgetsnbextension==4.0.7
|
| 169 |
+
yarl==1.9.2
|
| 170 |
+
zipp==3.15.0
|
storage/docstore.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"docstore/metadata": {"878bdaad-e099-44e7-8cfb-d214c9e407d1": {"doc_hash": "8ad3e580478e226164372eb6d3f0038dbd2a55f4c75a17681a7f7a0dc9793305"}, "45838104-6e6e-4c29-beda-59d5941d912e": {"doc_hash": "1726317792dc3b040dabf4d551d9dff525e6b06b58e024529e0eb93106e886a6"}, "bfa53f84-e81b-4d85-80a8-28e0864774aa": {"doc_hash": "771f0d8053019949b356542730efc955873b26aa7fd13256d820ac0669daee3d"}, "9015c73c-a129-4422-bb81-2ad60b3e0458": {"doc_hash": "da74f35db0f361856268c4971ee62c81b41a2a76b5dc905de3fc44d9f85e6e74"}, "3e0c2955-89ea-4a88-ab65-323f73c022a6": {"doc_hash": "82f5548dc3e33700854bfc21389ebbcfb153f20d4c08ba5ec87a45fe2d8d66fc"}, "eb6f75f5-4119-4a47-80f1-f967c1299210": {"doc_hash": "5b3f3ae0619668d33d2d15218851766f9bf75404fa0e9bca108f75ea1287db27"}, "e8b520f8-96ef-4abe-b6f5-fcb26be295e7": {"doc_hash": "41ac1a95af4be8d599f5220fd59befdebe356d9d3950212fbd7de6d1a921cc6c"}, "041abddf-0d22-4833-a7aa-45330b5bb49e": {"doc_hash": "b589fbc40da9cbad2ac9a29578a0dd17adb08a7f41cd8f23434f4cce232c9d85"}, "eec8c5c1-025d-4d48-8c95-02960991813f": {"doc_hash": "9605d2c8f2cc0c372b29e7e113b7a25c2236950a29e295e3bad68e986ab8e169"}, "5c196df3-e04a-46bc-89a1-d3406c2f0357": {"doc_hash": "c954a2aab1e96c45105df6cb42564aa1a918c35259b2f21093ed6095e0a43d32"}, "76d2b8ea-8fc5-4e16-8413-1e632581c4d7": {"doc_hash": "36d339883c37d21f760fee15228d35e8c8b938f3ee9c6267aab5be6265b342d8"}, "6695aaa4-3f68-4863-9eff-8e37406349e8": {"doc_hash": "e133cabca104cddd8f0adf1f7a9ad54f360b39d4acb42fdcc1f6ab5d7317ca46"}, "f7c1761b-4d8d-41a2-85eb-b597ef039513": {"doc_hash": "feb29eb1299f6ccd83e275f91d4880cff76617d58c247b2c4d71c102282e4edd"}, "0780cde1-9a19-4cbd-9525-12a03b247a78": {"doc_hash": "095f2375d10187d2c1173ea12893c2419c51167771c13debe896a2ecf9e37816"}, "6012fe95-dfd4-477c-b0c5-98be7f19251a": {"doc_hash": "01eab58188ce2f2e31bbecdc4f945f59e1910edf9e2ec4f284bba450965cb5f9"}, "9aa77e4d-8cdc-4d30-98f1-053ec73fcb4a": {"doc_hash": "2f2c6bbdd753e9b91656d525c47c7ce63470678033397425231436b5d18ab5cd"}, "f7b61543-9a3f-4a9e-908a-ebf80a05d53e": {"doc_hash": "330b041a424b4099694487e34da27b691ff0ad79c73dc09daa2dac9decbcb70b"}, "c5242644-4e19-4413-9b0f-20b838b08201": {"doc_hash": "c6275dfe81b828294b7f07fe6471694c37fe5a9ec51b67079f593bc9f71db73d"}, "fb85b6f9-c436-44bb-a256-53b372536df4": {"doc_hash": "a7c9c327eaa28c1a37685a7405789b3a4fcd275c0c77b8a15637fb7d3994b06f"}, "25027da7-f677-499d-90e0-5665fa5133c5": {"doc_hash": "f53f49e06053ca0c480ae781ff6177a173e3f22217f1969f62f1bc5ba098db3d"}, "e62b0869-d6f1-4863-9be0-173f9fbfae9d": {"doc_hash": "688488b63effcb96f645323bed3441480a9d9ac0aec1b9cad761890e6a14c33c"}, "f2a49d34-de3c-4aa1-8081-4ef7b9e79a60": {"doc_hash": "eb2e1fe28f046e882d541656cb9923f4adb8044a85c857d88950e044c9e41f77"}, "bf73a3f1-3f4f-4c5d-8809-5acbede2d600": {"doc_hash": "292cdeb011353a6ad2ed315a65f8556f35cc6cbdb8d3497519257890f429292e"}, "ee3a3718-eeac-4615-808c-897a216b27a5": {"doc_hash": "e40fcbd51a88772e814351ed641ac753eff39b3b1d34e6de77941100423b5660"}}, "docstore/data": {"45838104-6e6e-4c29-beda-59d5941d912e": {"__data__": {"text": " \nA Review of ChatGPT Applications in Education, Mark eting, Software \nEngineering, and Healthcare: Benefits, Drawbacks, a nd Research Directions \n \n Mohammad Fraiwan \u0002,* and Natheer Khasawneh \u0004 \n \u0002 Department of Computer Engineering, Jordan Univer sity of Science and Technology, \nP.O. Box 3030, Irbid 22110, Jordan; mafraiwan@just. edu.jo \n \u0004 Department of Software Engineering, Jordan Univer sity of Science and Technology, \nP.O. Box 3030, Irbid 22110, Jordan; natheer@just.ed u.jo \n*Correspondence: mafraiwan@just.edu.jo \n \n \nAbstract: ChatGPT is a type of artificial intelligence langu age model that uses deep \nlearning algorithms to generate human-like response s to text-based prompts. The introduction \nof the latest ChatGPT version in November of 2022 h as caused shockwaves in the industrial and \nacademic communities for its powerful capabilities, plethora of possible applications, and the \ngreat possibility for abuse. At the time of writing this work, several other language models (e.g., \nGoogle\u2019s Bard and Meta\u2019s LLaMA) just came out in an attempt to get a foothold in the vast \npossible market. These models have the ability to r evolutionize the way we interact with \ncomputers and have potential applications in many f ields, including education, software \nengineering, healthcare, and marketing. In this pap er, we will discuss the possible applications, \ndrawbacks, and research directions using advanced l anguage Chatbots (e.g., ChatGPT) in each of \nthese fields. We first start with a brief introduct ion and the development timeline of artificial \nintelligence-based language models, then we go thro ugh possible applications of such models, \nafter that we discuss the limitations and drawbacks of the current technological state of the art, \nand finally we point out future possible research d irections. \nKeywords: Artificial Intelligence; ChatGPT; Chatbot; Machine Learning; Natural Language \nProcessing \n \n \n1 Introduction \n \nChatGPT is a type of artificial intelligence (AI) l anguage model that uses deep learning \nalgorithms to generate human-like responses to text -based prompts. The introduction of the \nlatest ChatGPT version in November of 2022 has caus ed shockwaves in the industrial and \nacademic communities for its powerful capabilities, plethora of possible applications, and the \ngreat possibility for abuse. At the time of writing this work, several other language models (e.g., \nGoogle\u2019s Bard and Meta\u2019s LLaMA) just came out in an a ttempt to get a foothold in the vast \npossible market. These models have the ability to r evolutionize the way we interact with \ncomputers and have potential applications in many f ields, including education, software \nengineering, healthcare, and marketing. \nHistorically, language models have been around for more than 20 years with some \nattempts go back to the 1960\u2019s. However, recent dev elopments in deep learning AI, the huge \ncomputational power offered by graphical processing units (GPUs), and the accessibility to large \ndatasets have enabled amazing advancements in the c apabilities and the likelihood to human \noperators. Moreover, Chatbots are now being touted as the future of search engines, because \nthey are able to formulate answers to queries rathe r than just point out the links to possible \nanswers. For example, instead of searching for prog ramming tutorials or a lucky similar answer \nto a homework assignment, ChatGPT can easily provid e the necessary code in response to such \na query with varying degrees of complexity. ChatGPT was able to pass bar exams, United States \nmedical licensing exams, and job interviews, among others [1]. \nThe first language models appeared in the 1960s. ELI ZA was developed by Joseph \nWeizenbaum as one of the first Chatbot programs [2] . It used pattern matching and pre-written \nresponses to simulate conversation with a human use", "doc_id": "45838104-6e6e-4c29-beda-59d5941d912e", "embedding": null, "doc_hash": "1726317792dc3b040dabf4d551d9dff525e6b06b58e024529e0eb93106e886a6", "extra_info": null, "node_info": {"start": 0, "end": 3896}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "3": "bfa53f84-e81b-4d85-80a8-28e0864774aa"}}, "__type__": "1"}, "bfa53f84-e81b-4d85-80a8-28e0864774aa": {"__data__": {"text": "Moreover, Chatbots are now being touted as the future of search engines, because \nthey are able to formulate answers to queries rathe r than just point out the links to possible \nanswers. For example, instead of searching for prog ramming tutorials or a lucky similar answer \nto a homework assignment, ChatGPT can easily provid e the necessary code in response to such \na query with varying degrees of complexity. ChatGPT was able to pass bar exams, United States \nmedical licensing exams, and job interviews, among others [1]. \nThe first language models appeared in the 1960s. ELI ZA was developed by Joseph \nWeizenbaum as one of the first Chatbot programs [2] . It used pattern matching and pre-written \nresponses to simulate conversation with a human use r. Fast forward to the 1990s, the artificial \nlinguistic Internet computer entity (ALICE) was deve loped by Richard Wallace. ALICE was another \nearly Chatbot program. It used a similar approach t o ELIZA, but also incorporated machine \nlearning to improve its responses over time [3]. A decade later, Cleverbot was introduced by Rollo \nCarpenter [4]. Cleverbot was a Chatbot program that used artificial neural networks to learn from \nits conversations with users. It was able to genera te more natural and varied responses than \nearlier Chatbots. More recently, in 2018, OpenAI re leased the first version of their Generative \nPre-trained Transformer (GPT) language model. It us ed unsupervised learning to train on large \namounts of text data and could generate coherent an d diverse text based on a given prompt. In \n2019, OpenAI released an improved version of their GPT model called GPT-2. It had 1.5 billion \nparameters, making it one of the largest language m odels at the time. GPT-2 was able to generate \nhigh-quality text that was difficult to distinguish from human writing. In 2020, OpenAI released \nan even more powerful version of their GPT model ca lled GPT-3. It had 175 billion parameters, \nmaking it the largest language model to date. GPT-3 was able to perform a wide range of language \ntasks, including language translation, content gene ration, and answering questions [5]. \nMoreover, the number of users of its service was re ported by media outlets to exceed 100 Million \nin two months after its launch. The latest release, ChatGPT-4 is scheduled for release in mid \nMarch, 2023. Moreover, at the time of writing this paper, Microsoft just released Visual ChatGPT, \nwhich extends the capabilities of ChatGPT by allowi ng sending/receiving images in the chat \ndialogue. \nChatGPTs are built on top of the GPT models develop ed by OpenAI, but with additional \ntraining and customization for conversational appli cations. They represent the cutting edge of AI \nlanguage technology and have the potential to revol utionize the way we interact with computers \nand each other. On February 6th, 2023, Google relea ses their own conversational AI called Bard, \nwhich was quickly followed by Meta\u2019s Large Language M odel Meta AI (LLaMA) on February 24th, \n2023. Other less known models do exist in the liter ature, including bidirectional, extreme \nmultilingual language model (XLNet) [6], GShard (a T ransformer-based deep learning \narchitecture) [7], robustly optimized BERT pretrain ing approach (RoBERTa) [8], and text-to-text \ntransfer transformer (T5) [9]. These AI language mo dels are all based on the transformer \narchitecture and have achieved impressive results i n various natural language processing tasks. \nHowever, each model has its own strengths and weakn esses, and the most appropriate model \ndepends on the specific application and the data av ailable. \nThese powerful language models represent a technolo gical disruption to the current \nacademic and industrial landscape. They may render many existing technologies (e.g., traditional \nsearch engines) obsolete/insufficient. Moreover, th ey may adversely affect the educational \nparadigm with the way current assignments and evalu ations are performed. On the other", "doc_id": "bfa53f84-e81b-4d85-80a8-28e0864774aa", "embedding": null, "doc_hash": "771f0d8053019949b356542730efc955873b26aa7fd13256d820ac0669daee3d", "extra_info": null, "node_info": {"start": 3264, "end": 7279}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "45838104-6e6e-4c29-beda-59d5941d912e", "3": "9015c73c-a129-4422-bb81-2ad60b3e0458"}}, "__type__": "1"}, "9015c73c-a129-4422-bb81-2ad60b3e0458": {"__data__": {"text": "deep learning \narchitecture) [7], robustly optimized BERT pretrain ing approach (RoBERTa) [8], and text-to-text \ntransfer transformer (T5) [9]. These AI language mo dels are all based on the transformer \narchitecture and have achieved impressive results i n various natural language processing tasks. \nHowever, each model has its own strengths and weakn esses, and the most appropriate model \ndepends on the specific application and the data av ailable. \nThese powerful language models represent a technolo gical disruption to the current \nacademic and industrial landscape. They may render many existing technologies (e.g., traditional \nsearch engines) obsolete/insufficient. Moreover, th ey may adversely affect the educational \nparadigm with the way current assignments and evalu ations are performed. On the other hand, \nit may open further avenues for exploration and lea rning if used properly. In this paper, we \ndiscuss the effects that the introduction of sophis ticated have on education, software \nengineering, healthcare, and marketing. The applica tions, drawbacks, and possible research \ndirections are presented in the next few sections. However, the effects of the latest Chatbot \nlanguage models are still being felt and more appli cations/drawbacks are coming up every day. \nThe remainder of this paper is organized as follows . In section 2 we present the possible \napplications of ChatGPT in the four identified fiel ds. The drawbacks are discussed in section 3. \nFuture research directions are explored in section 4. We conclude in section 5. \n \n2 Applications of ChatGPT and Language Models \n In this section, we highlight the possible applica tions of ChatGPT, as well as other \nadvanced language models being rolled out, in the f our aforementioned fields. As more people \nuse and adopt these AI tools, other avenues are pos sible and this is an ongoing and evolving \ntopic. \n2.1 Education \n Language models has several applications in educati on, such as providing personalized \nlearning experiences, generating test questions and answers, and facilitating online discussions. \nIt can also assist teachers in grading assignments and providing feedback to students. Some of \nthe activities involved include: \n \u2022 Language models can assist in providing person alized learning experiences by \nanalyzing student performance data and generating a daptive learning pathways. It can \nrecommend appropriate learning materials, answer st udents\u2019 questions, and provide feedback \non assignments. \n \u2022 Generate test questions and answers for stude nts, which can save time for teachers \nand ensure that tests cover a range of topics and l evels of difficulty. \n \u2022 Facilitate online discussions between student s and teachers by generating \nconversation prompts, answering questions, and prov iding feedback on responses. This can \nenhance collaboration and engagement in online lear ning environments. \n \u2022 Assist teachers in grading assignments and pr oviding feedback to students. It can \nidentify areas where students need improvement and suggest ways to improve their work. \n \u2022 Assist language learners by generating exerci ses, providing pronunciation feedback, \nand answering questions about grammar and vocabular y. It can also provide conversational \npractice for language learners by generating dialog ue prompts. \n \u2022 Assist special education students by generati ng alternative learning materials, \nproviding additional explanations, and answering qu estions in a way that is tailored to their \nindividual needs. \n \nOverall, ChatGPT\u2019s education applications have the potential to enhance student learning \nexperiences, provide teachers with valuable resourc es and assistance, and improve the efficiency \nand effectiveness of online learning environments. Tapalova and Zhiyenbayeva [10] recently \nexplored the possibilities of AI in education. A su rvey of educators at their institute indicated that \neducation can be made more effective with the help of AI. More specifically, AI can facilitate \npersonalization of the educational activities,", "doc_id": "9015c73c-a129-4422-bb81-2ad60b3e0458", "embedding": null, "doc_hash": "da74f35db0f361856268c4971ee62c81b41a2a76b5dc905de3fc44d9f85e6e74", "extra_info": null, "node_info": {"start": 7211, "end": 11311}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "bfa53f84-e81b-4d85-80a8-28e0864774aa", "3": "3e0c2955-89ea-4a88-ab65-323f73c022a6"}}, "__type__": "1"}, "3e0c2955-89ea-4a88-ab65-323f73c022a6": {"__data__": {"text": "\npractice for language learners by generating dialog ue prompts. \n \u2022 Assist special education students by generati ng alternative learning materials, \nproviding additional explanations, and answering qu estions in a way that is tailored to their \nindividual needs. \n \nOverall, ChatGPT\u2019s education applications have the potential to enhance student learning \nexperiences, provide teachers with valuable resourc es and assistance, and improve the efficiency \nand effectiveness of online learning environments. Tapalova and Zhiyenbayeva [10] recently \nexplored the possibilities of AI in education. A su rvey of educators at their institute indicated that \neducation can be made more effective with the help of AI. More specifically, AI can facilitate \npersonalization of the educational activities, incr ease availability of resources, improve \nadaptability of the educational material to individ ual student needs, provide prompt and \ncontinuous feedback, and improve mental motivation and stimulation. However, it is essential to \nensure that ChatGPT is used responsibly and thought fully, with considerations for potential \nbiases and ethical concerns. In another study, Kuma r and Boulanger [11] explored the use of deep \nlearning AI to automatically grade essays using rub ric instead of holistic scores. They concluded \nthat it is possible for language models to aid stud ents in learning proper writing and its strategies. \nLanguage models can be a useful tool for teaching an d learning, particularly in the field of \nlanguage arts and writing. Here are a few ways that language models can be used to enhance \nteaching: \n \u2022 Writing prompts: Language models can be used t o spark students\u2019 creativity and \nengage them in writing. For example, enter a topic or theme and ask the Chabot to generate a \nwriting prompt for students to work on. \n \u2022 Writing feedback: After students have written a piece, the language model can \nprovide feedback on their work. For example, ChatGP T can analyze the writing for grammar, \npunctuation, and spelling errors, as well as provid e suggestions for improving the overall \nstructure and flow of the writing. \n \u2022 Language practice: Advanced language models ca n help students practice their \nlanguage skills. For example, one can ask ChatGPT t o provide synonyms or antonyms for certain \nwords, or to provide sample sentences using certain grammar structures or vocabulary. \n \u2022 Research assistance: language models can be u sed to assist students in their \nresearch by acting as an advanced search engine. Ho wever, this disrupts the current \nhomework/assignment models with the lack of ability to detect plagiarism using ChatGPT. \n \u2022 Personalized learning: Language models can als o be used to create personalized \nlearning experiences for students. For example, Cha tGPT can be used to provide feedback and \nguidance to individual students based on their spec ific strengths and weaknesses in writing. \n \nHowever, assessing the impact of using ChatGPT in t eaching and learning is important to \ndetermine whether it is an effective tool for impro ving students\u2019 skills. The following are a few \nways to measure the impact of using ChatGPT in teac hing: \n \u2022 Pre- and post-assessments to measure the impr ovement in students\u2019 skills. The \nassessments should be aligned with the learning obj ectives and outcomes of using ChatGPT. For \nexample, assess students\u2019 writing skills before and after using ChatGPT to see if there is an \nimprovement in the quality of their writing. \n \u2022 Analyzing student work to see if there is an improvement in their skills. A rubric to \nassess students\u2019 writing, and compare their work be fore and after using ChatGPT to see if there \nis an improvement in areas such as grammar, sentenc e structure, and vocabulary. \n \u2022 Student feedback. Surveys or focus groups can be used to gather their feedback on \nthe usefulness of ChatGPT in improving their skills , as well as their overall experience of using", "doc_id": "3e0c2955-89ea-4a88-ab65-323f73c022a6", "embedding": null, "doc_hash": "82f5548dc3e33700854bfc21389ebbcfb153f20d4c08ba5ec87a45fe2d8d66fc", "extra_info": null, "node_info": {"start": 11329, "end": 15345}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "9015c73c-a129-4422-bb81-2ad60b3e0458", "3": "eb6f75f5-4119-4a47-80f1-f967c1299210"}}, "__type__": "1"}, "eb6f75f5-4119-4a47-80f1-f967c1299210": {"__data__": {"text": "impr ovement in students\u2019 skills. The \nassessments should be aligned with the learning obj ectives and outcomes of using ChatGPT. For \nexample, assess students\u2019 writing skills before and after using ChatGPT to see if there is an \nimprovement in the quality of their writing. \n \u2022 Analyzing student work to see if there is an improvement in their skills. A rubric to \nassess students\u2019 writing, and compare their work be fore and after using ChatGPT to see if there \nis an improvement in areas such as grammar, sentenc e structure, and vocabulary. \n \u2022 Student feedback. Surveys or focus groups can be used to gather their feedback on \nthe usefulness of ChatGPT in improving their skills , as well as their overall experience of using \nthe tool. \n \u2022 Observations to assess student\u2019s engagement a nd level of participation. This involve \nobserving their interactions with ChatGPT, their le vel of motivation, and their ability to use the \ntool effectively. \n \u2022 Comparison with control group. If possible, t he progress of students who used \nChatGPT can be compared with a control group of stu dents who did not use the tool. This can \nhelp to isolate the impact of using ChatGPT and det ermine whether it was a significant factor in \nimproving students\u2019 skills. \n \nBy using these methods to assess the impact of usin g language models in teaching, it is \npossible to determine whether it is an effective to ol for improving students\u2019 skills, and make any \nnecessary adjustments to the teaching methods to fu rther enhance learning outcomes. \n \n2.2 Software Engineering \n ChatGPT can be used in software engineering for ta sks such as generating code, \ndebugging, and software testing. It can also help d evelopers in natural language processing tasks, \nsuch as analyzing user requirements, and generating user interfaces. This can be accomplished \nas follows: \n \u2022 Code generation: Generate code snippets for d evelopers based on natural language \ndescriptions of the desired functionality. This can save time and improve efficiency in the \nsoftware development process. \n \u2022 Debugging: Assist in debugging code by identi fying errors and suggesting fixes based \non natural language descriptions of the issue. \n \u2022 Software testing: generate test cases and tes t data based on natural language \ndescriptions of the desired test scenarios. This ca n improve the efficiency and effectiveness of \nsoftware testing. \n \u2022 Natural language processing: Assist developer s in natural language processing tasks, \nsuch as analyzing user requirements, generating use r interfaces, and providing Chatbot \ninteractions with users. \n \u2022 Documentation generation: Generate software d ocumentation based on natural \nlanguage descriptions of the software\u2019s functionali ty. This can save time for developers and \nimprove the quality and completeness of the documen tation. \n \u2022 Collaboration and knowledge sharing: Facilita te collaboration and knowledge \nsharing between developers by generating conversati on prompts, answering questions, and \nproviding feedback on responses. This can enhance c ommunication and efficiency in software \ndevelopment teams. \n \nOverall, ChatGPT\u2019s software engineering application s have the potential to improve \nefficiency and effectiveness in the software develo pment process, enhance collaboration and \nknowledge sharing, and improve the quality of softw are documentation. Raychev et al. [12] \nidentified early on the potential of natural langua ge processing in synthesizing code completion \nand predicting the probability of sentences. In add ition, they used a similar approach to predict \nsyntactic and semantic variable types and identifie d names. Such efforts have resulted in a slew \nof studies that research the role of language model s in specific software engineering problems \n[13]. In an another study, Tu et al. [14] investiga ted the role of language models in predicting the \nrepetitive, regular, and typical code snippets in h uman-written programs. This has the potential", "doc_id": "eb6f75f5-4119-4a47-80f1-f967c1299210", "embedding": null, "doc_hash": "5b3f3ae0619668d33d2d15218851766f9bf75404fa0e9bca108f75ea1287db27", "extra_info": null, "node_info": {"start": 15425, "end": 19472}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "3e0c2955-89ea-4a88-ab65-323f73c022a6", "3": "e8b520f8-96ef-4abe-b6f5-fcb26be295e7"}}, "__type__": "1"}, "e8b520f8-96ef-4abe-b6f5-fcb26be295e7": {"__data__": {"text": "application s have the potential to improve \nefficiency and effectiveness in the software develo pment process, enhance collaboration and \nknowledge sharing, and improve the quality of softw are documentation. Raychev et al. [12] \nidentified early on the potential of natural langua ge processing in synthesizing code completion \nand predicting the probability of sentences. In add ition, they used a similar approach to predict \nsyntactic and semantic variable types and identifie d names. Such efforts have resulted in a slew \nof studies that research the role of language model s in specific software engineering problems \n[13]. In an another study, Tu et al. [14] investiga ted the role of language models in predicting the \nrepetitive, regular, and typical code snippets in h uman-written programs. This has the potential \nto improve code suggestions in automatic completion systems. Furthermore, Allamanis et al. [15] \ninvestigated the role of language models in detecti ng software bugs [16]. In another avenue, \nlanguage models have been shown to useful in the de velopment and update of software \ncomments and documentation. These efforts show that it is possible to tap into the power of \nlanguage models in the software engineering domain. \n \n2.3 Healthcare \n ChatGPT can assist healthcare professionals by pro viding patient triage, symptom \nanalysis, and medical diagnosis. It can also aid in drug discovery and clinical trials. This can be \naccomplished as follows: \n \u2022 Patient Care: Assist healthcare professionals by providing personalized care to \npatients. It can answer questions about medical con ditions, treatments, and medications, and \nprovide recommendations based on patient symptoms a nd medical history. \n \u2022 Electronic health records (EHR): Assist in up dating electronic health records by \nanalyzing natural language descriptions of patient conditions and treatments and generating \ncorresponding entries in the EHR. \n \u2022 Medical education: Assist medical students an d professionals by generating medical \ncase studies, answering questions about medical ter minology, and providing educational \nresources for medical training. \n \u2022 Mental health: Assist in mental health care b y providing personalized support and \nresources to patients. It can answer questions abou t mental health conditions, provide coping \nstrategies and relaxation techniques, and offer sup port for patients experiencing mental health \ncrises. \n \u2022 Clinical trials: assist in clinical trials by generating eligibility criteria, screening \nquestions, and informed consent forms based on natu ral language descriptions of the trial\u2019s \nobjectives. \n \u2022 Telemedicine: Assist in telemedicine by facil itating communication between \nhealthcare professionals and patients. It can answe r questions about telemedicine procedures, \nprovide technical support for patients using teleme dicine tools, and assist in scheduling \nappointments. \n \nIn general, ChatGPT\u2019s healthcare applications have the potential to improve patient care, \nenhance medical education and training, and improve efficiency in healthcare operations. Adlung \net al. [17v] identified two challenges facing artif icial intelligence in clinical decision making, \nmainly explainability and causability. Explainabili ty refers to the ability of the model to provide \nresults that can be justified (e.g., significant fa ctors that have statistical association with the \noutput). Although examples on the Web were able to show ChatGPT giving reasons for their \nanswers or the lack of a correct answer, language m odels still has the potential to open up deep \nlearning models for easy explanations and transpare ncy. This also relates to causability, from a \nlegal and regulatory perspective, adopting artifici al intelligence algorithms in clinical decision \nmaking may require such methods to provide clear ex planations on why a certain output was \ngenerated. Moreover, Wu et al. [18] pointed out the importance of literature review in the field \nof regulatory science and the role that language mo dels can play in", "doc_id": "e8b520f8-96ef-4abe-b6f5-fcb26be295e7", "embedding": null, "doc_hash": "41ac1a95af4be8d599f5220fd59befdebe356d9d3950212fbd7de6d1a921cc6c", "extra_info": null, "node_info": {"start": 19379, "end": 23486}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "eb6f75f5-4119-4a47-80f1-f967c1299210", "3": "041abddf-0d22-4833-a7aa-45330b5bb49e"}}, "__type__": "1"}, "041abddf-0d22-4833-a7aa-45330b5bb49e": {"__data__": {"text": "Explainabili ty refers to the ability of the model to provide \nresults that can be justified (e.g., significant fa ctors that have statistical association with the \noutput). Although examples on the Web were able to show ChatGPT giving reasons for their \nanswers or the lack of a correct answer, language m odels still has the potential to open up deep \nlearning models for easy explanations and transpare ncy. This also relates to causability, from a \nlegal and regulatory perspective, adopting artifici al intelligence algorithms in clinical decision \nmaking may require such methods to provide clear ex planations on why a certain output was \ngenerated. Moreover, Wu et al. [18] pointed out the importance of literature review in the field \nof regulatory science and the role that language mo dels can play in accelerating this review. \nThese factors bring language models to the fore of the medical AI domain. Lederman [19] \ndiscussed clinical natural language processing (cNLP ) that use textual data in health records to \nsupport the clinical decision making process. They argued for a rethink of cNLP systems to \nimprove their practical adaptation and deployment [ 20]. Specifically, they identify several factors \nthat hinder the usability of the cNLP systems, which may be overcome by advanced language \nmodels, including handling of complex language proc essing, ability to answer \u201chow\u201d and \u201cwhy\u201d \nquestions, and the problem of explainability. In an interesting study, Liu et al. [21] investigated \nthe role of language models in the development and discovery of drugs. Mainly, language models \ncan have a great role in the quick identification o f targets, optimization of clinical trials, \nfacilitation of decision making from a regulatory p erspective, and promoting pharmacovigilance \n[22]. Bhatnagar et al. [23] also reviewed the role of language models in discovering new drugs, \nclinical trials, and pharmacovigilance. \nThe topic of pharmacovigilance is another interesti ng are, where language models can \ngauge, analyze, and detect drug-related problems or adverse effects/interactions from users\u2019 \nprompts and discussions [24]. Ball and Pan [25] exp lored the use of language models in the \nprocessing of the individual case safety reports su bmitted to the Food and Drug Administration \nas part of their adverse event reporting system. Th ey identified several problems that need to be \nresolved in order to facilitate the acceptance of l anguage models in pharmacovigilance. Koneti \net al. [26] proposed using language models in drug development by extracting unstructured data \nfrom pharmacokinetics and pharmacodynamics study re ports. Several language models were \nproposed recently for the purpose of medical text m ining. Wang et al. [27] developed the \n\u201cDeepCausality\u201d model, which is able to include AI language models in order to create a causal \ninference model from fee text. They demonstrated it s effectiveness in detecting idiosyncratic \ndrug-induced liver injury with high accuracy. Lee et al. [28] proposed bidirectional encoder \nrepresentations from transformers for biomedical te xt mining (BioBert) model, which was able \nto answer biomedical questions, extract relations, and recognize named biomedical entities with \nimproved accuracy. Similarly, ClinicalBert [29,30] was proposed to predict hospital readmissions \nusing medical text data from hospital admission not es and discharge summaries. \n \n2.4 Marketing \n ChatGPT can be used in marketing to generate produ ct descriptions, customer reviews, \nand social media content. It can also assist in Cha tbot interactions with customers and provide \npersonalized recommendations based on customer pref erences. The following are some \nelaborations on ChatGPT\u2019s marketing applications: \n \n \u2022 Content creation: Assist in content creation for marketing campaigns by generating \nideas for social media posts, email marketing campa igns, and blog articles. It can also generate \nheadlines, product descriptions, and promotional me ssages based on natural language \ndescriptions of the marketing objectives.", "doc_id": "041abddf-0d22-4833-a7aa-45330b5bb49e", "embedding": null, "doc_hash": "b589fbc40da9cbad2ac9a29578a0dd17adb08a7f41cd8f23434f4cce232c9d85", "extra_info": null, "node_info": {"start": 23506, "end": 27610}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "e8b520f8-96ef-4abe-b6f5-fcb26be295e7", "3": "eec8c5c1-025d-4d48-8c95-02960991813f"}}, "__type__": "1"}, "eec8c5c1-025d-4d48-8c95-02960991813f": {"__data__": {"text": "was proposed to predict hospital readmissions \nusing medical text data from hospital admission not es and discharge summaries. \n \n2.4 Marketing \n ChatGPT can be used in marketing to generate produ ct descriptions, customer reviews, \nand social media content. It can also assist in Cha tbot interactions with customers and provide \npersonalized recommendations based on customer pref erences. The following are some \nelaborations on ChatGPT\u2019s marketing applications: \n \n \u2022 Content creation: Assist in content creation for marketing campaigns by generating \nideas for social media posts, email marketing campa igns, and blog articles. It can also generate \nheadlines, product descriptions, and promotional me ssages based on natural language \ndescriptions of the marketing objectives. \n \n \u2022 Customer service: Assist in customer service by providing personalized support to \ncustomers. It can answer questions about products a nd services, provide recommendations \nbased on customer preferences, and assist in resolv ing customer issues and complaints. \n \n \u2022 Lead generation: Assist in lead generation by analyzing customer data and \ngenerating natural language descriptions of potenti al customers. It can also assist in lead \nqualification by analyzing customer responses and i dentifying potential leads. \n \n \u2022 Market research: Assist in market research by generating surveys, analyzing \ncustomer feedback, and identifying trends and insig hts based on natural language descriptions \nof the research objectives. \n \n \u2022 Personalization: Assist in personalizing mark eting campaigns by analyzing customer \ndata and generating personalized recommendations fo r products and services. It can also assist \nin tailoring marketing messages to specific custome r segments based on natural language \ndescriptions of the customer demographics and prefe rences. \n \n \u2022 Sales: Assist in sales by generating personal ized product recommendations and \nassisting in the sales process. It can also assist in upselling and cross-selling by analyzing \ncustomer data and generating natural language descr iptions of potential add-ons or upgrades. \n \nChatGPT\u2019s marketing applications have the potential to improve efficiency and \neffectiveness in marketing campaigns, enhance custo mer experience and satisfaction, and \nimprove sales performance. Verma et al. [31] discus sed the role of recent disruptive \ntechnologies, mainly AI, in business operations. On e of the areas that they have identified was \nthe use of Chatbots and language models to improve customer experience [32] and customer \nrelationship management (CRM) systems. Language mode ls and Chatbots offer great advantages \nin the form of faster and automated access to data, simpler and efficient processes, accuracy, \nand cost effectiveness [33]. Similarly, De Mauro et al. [34] published a recent taxonomy of the \nuse of machine learning and AI in marketing. The au thors have identified several use cases of AI \nin marketing and divided those into customer side v ersus business side. On the customer facing \nside, they identified personalization of offers, co mmunication, recommendations, and \nassortments as candidates for improvements. Moreove r, they indicated that the consumption \nexperience can also be improved via experience impr ovement and digital customer service. On \nthe business side, machine learning can be benefici al in market understanding and customer \nsensing, among other avenues. In a recent literatur e review of marketing and AI, Duarte et al. \n[35] have identified recommender systems and text a nalysis as promising areas of useful \nChatbots usage in marketing. De Bruyn et al. [36] i nvestigated the opportunities and pitfalls of \nusing AI in marketing. They have identified several risks associated with adopting new AI \ndisruptive technologies, mainly bias, explainabilit y, control, and unsafe/unrealistic learning \nenvironments. Moreover, they conclude with a warnin g of the possibility of AI failure in this \ndomain if these challenges are not resolved by the", "doc_id": "eec8c5c1-025d-4d48-8c95-02960991813f", "embedding": null, "doc_hash": "9605d2c8f2cc0c372b29e7e113b7a25c2236950a29e295e3bad68e986ab8e169", "extra_info": null, "node_info": {"start": 27622, "end": 31695}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "041abddf-0d22-4833-a7aa-45330b5bb49e", "3": "5c196df3-e04a-46bc-89a1-d3406c2f0357"}}, "__type__": "1"}, "5c196df3-e04a-46bc-89a1-d3406c2f0357": {"__data__": {"text": "via experience impr ovement and digital customer service. On \nthe business side, machine learning can be benefici al in market understanding and customer \nsensing, among other avenues. In a recent literatur e review of marketing and AI, Duarte et al. \n[35] have identified recommender systems and text a nalysis as promising areas of useful \nChatbots usage in marketing. De Bruyn et al. [36] i nvestigated the opportunities and pitfalls of \nusing AI in marketing. They have identified several risks associated with adopting new AI \ndisruptive technologies, mainly bias, explainabilit y, control, and unsafe/unrealistic learning \nenvironments. Moreover, they conclude with a warnin g of the possibility of AI failure in this \ndomain if these challenges are not resolved by the implicit marketing knowledge transfer to AI \nmodels. \n \n3 Drawbacks of Language Models \n Although language models and Chatbots clearly offe r great opportunities, they have \ninherent shortcomings that limit their applicabilit y, adoption, and usefulness. In the next few \nsubsections, we go through these drawbacks in detai l. \n3.1 Bias \n Language models can exhibit bias if the training da ta used to create them is biased. As \nSchramowski et al. [37] pointed out, large pre-trai ned models that try to mimic natural languages, \nmay end up repeating the same unfairness and prejud ices. This can lead to discriminatory or \ninaccurate analyses and recommendations. Moreover, this may lead to public outcry (i.e., \npolitical, social, and legal) against the commercia l applications. These biases manifests \nthemselves in several ways, as follows: \n \u2022 Training data bias: Language models are typica lly trained on large datasets of \nhuman language. If these datasets are biased in som e way (e.g., based on race, gender, \nsocioeconomic status, etc.), then the model may lea rn and replicate these biases in its \nresponses. For example, if the training data is bia sed towards a particular gender, then the \nmodel may generate responses that are more favorabl e towards that gender. \n \u2022 User interaction bias: The responses generate d by Chatbots are based on the input \nthey receive from users. If users consistently ask biased or prejudiced questions, then the \nmodel may learn and replicate these biases in its r esponses. For example, if users frequently ask \nquestions that are discriminatory towards a particu lar group, then the model may generate \nresponses that perpetuate these biases. \n \u2022 Algorithmic bias: The algorithms used to trai n and operate language models and \nChatbots may also introduce biases. For example, if the model is trained to optimize for a \nparticular metric (e.g., accuracy, engagement, etc. ), then it may prioritize generating responses \nthat optimize for that metric, even if those respon ses are biased in some way. \n \u2022 Contextual bias: Chatbots generate responses based on the context they receive \nfrom users. If the context is biased in some way (e .g., based on the user\u2019s location, language, \netc.), then the model may generate biased responses . For example, if a user is asking questions \nabout a particular culture or religion, and the mod el is not trained on that culture or religion, it \nmay generate biased responses due to its lack of kn owledge. \n It is important to note that bias in language mode ls are not necessarily intentional or \nmalicious. Although this sometimes is hard to prove or justify to the non-technical public. \nMoreover, it can have harmful consequences, such as perpetuating stereotypes, reinforcing \ndiscriminatory attitudes, or excluding certain grou ps from access to information and resources. \nTo mitigate these risks, it is of paramount importa nce to train and operate the models in a \nresponsible and ethical manner, by carefully select ing and monitoring training data, \nincorporating diversity and inclusion consideration s, and regularly auditing the", "doc_id": "5c196df3-e04a-46bc-89a1-d3406c2f0357", "embedding": null, "doc_hash": "c954a2aab1e96c45105df6cb42564aa1a918c35259b2f21093ed6095e0a43d32", "extra_info": null, "node_info": {"start": 31709, "end": 35641}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "eec8c5c1-025d-4d48-8c95-02960991813f", "3": "76d2b8ea-8fc5-4e16-8413-1e632581c4d7"}}, "__type__": "1"}, "76d2b8ea-8fc5-4e16-8413-1e632581c4d7": {"__data__": {"text": "if a user is asking questions \nabout a particular culture or religion, and the mod el is not trained on that culture or religion, it \nmay generate biased responses due to its lack of kn owledge. \n It is important to note that bias in language mode ls are not necessarily intentional or \nmalicious. Although this sometimes is hard to prove or justify to the non-technical public. \nMoreover, it can have harmful consequences, such as perpetuating stereotypes, reinforcing \ndiscriminatory attitudes, or excluding certain grou ps from access to information and resources. \nTo mitigate these risks, it is of paramount importa nce to train and operate the models in a \nresponsible and ethical manner, by carefully select ing and monitoring training data, \nincorporating diversity and inclusion consideration s, and regularly auditing the model for \npotential biases. \n \n3.2 Lack of Transparency \n Language models, and deep learning models in genera l, are called \u201cblack box\u201d models as \ntheir results can be difficult to interpret and und erstand, making it challenging for researchers to \nassess their validity and accuracy [38]. These mode ls lack transparency, meaning that it is often \ndifficult to understand how the model arrived at a particular output or decision. This lack of \ntransparency can be problematic for several reasons : \n 1. Debugging: If the model generates unexpecte d or incorrect output, it can be \nchallenging to identify the source of the problem w ithout understanding how the model arrived \nat its decision. \n 2. Accountability: In some cases, the output g enerated by the model may have \nsignificant consequences for individuals or society as a whole (e.g., in healthcare or criminal \njustice). If the model lacks transparency, it can b e difficult to hold it accountable for its \ndecisions. \n 3. Bias: As mentioned earlier, language models can be biased in various ways, such as \nin the training data or algorithms used. Without tr ansparency, it can be difficult to identify and \ncorrect these biases. \n 4. Trust: In many cases, users may be hesitant to trust the output generated by the \nmodel if they don\u2019t understand how it arrived at it s decision. This have ramifications in \nobtaining regulatory approvals and adoption by the public. \n \n \n3.3 Explainability \n Researchers are developing new techniques for maki ng deep learning models more \ninterpretable and explainable. For example, techniq ues such as attention mechanisms [39] or \nsaliency maps can highlight which parts of the inpu t the model is focusing on to make its decision \n[40]. Hicks et al. [41] argued that deep learning p redictions and decisions need to be \naccompanied by explanations so that the doctors res ponsible for the clinical decision-making \nprocess trust, understand, and validate such models [42-44]. To this end, they proposed a new \nmethod called electrocardiogram gradient class acti vation map, which produces explanations for \nthe results of the electrocardiogram (EEG) analysis . Along the same line of EEG analysis, \nKhasawneh et al. [45,46] proposed treating the signa l in a similar fashion to the clinicians and \nperform the signal inspection visually using deep l earning object detection algorithms. In the \ncontext of language models, further explainability can be achieved via the following practices: \n \u2022 Documentation: Developers can document how th e model was trained, what data \nwas used, what decisions were made in the training process, and what assumptions were \nmade. Moreover, ethical standards can be developed to standardize the training process. This \ncan help increase transparency and accountability. \n \u2022 Auditing: Regular auditing of the model can h elp identify and correct biases, as well \nas provide insights into how the model is making de cisions. \n \u2022 Collaboration: Collaboration between develope rs, users, and experts in relevant \nfields can help increase transparency and ensure th at the", "doc_id": "76d2b8ea-8fc5-4e16-8413-1e632581c4d7", "embedding": null, "doc_hash": "36d339883c37d21f760fee15228d35e8c8b938f3ee9c6267aab5be6265b342d8", "extra_info": null, "node_info": {"start": 35598, "end": 39579}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "5c196df3-e04a-46bc-89a1-d3406c2f0357", "3": "6695aaa4-3f68-4863-9eff-8e37406349e8"}}, "__type__": "1"}, "6695aaa4-3f68-4863-9eff-8e37406349e8": {"__data__": {"text": "the signal inspection visually using deep l earning object detection algorithms. In the \ncontext of language models, further explainability can be achieved via the following practices: \n \u2022 Documentation: Developers can document how th e model was trained, what data \nwas used, what decisions were made in the training process, and what assumptions were \nmade. Moreover, ethical standards can be developed to standardize the training process. This \ncan help increase transparency and accountability. \n \u2022 Auditing: Regular auditing of the model can h elp identify and correct biases, as well \nas provide insights into how the model is making de cisions. \n \u2022 Collaboration: Collaboration between develope rs, users, and experts in relevant \nfields can help increase transparency and ensure th at the model is being used in an ethical and \nresponsible manner. \n While these approaches can help address the lack o f transparency in deep learning, it is \nimportant to acknowledge that achieving full transp arency may not be possible or desirable (e.g., \nproprietary or copyrighted/patented models) in all cases. \n \n3.4 Over-reliance \n Researchers, professionals, or students may become over-reliant on Chatbots, language \nmodels, and AI in general. Thus, they may neglect c ritical thinking, leading to errors and \ninaccuracies in their research, studies, or practic e/work. Such over-reliance can happen in several \nways, as in the following examples: \n \u2022 Dataset selection: Researchers may rely too h eavily on Chatbots to generate \nsynthetic data or to augment existing datasets. Thi s can be problematic if the generated data is \nbiased or does not accurately reflect the real-worl d data. \n \u2022 Hypothesis generation: Language models can gen erate hypotheses or research \nquestions based on input from researchers. While th is can be a useful tool for exploring new \nareas of research, researchers should be cautious n ot to rely too heavily on the model\u2019s \nsuggestions without independent validation. \n \u2022 Data analysis: Chatbots can be used to analyz e and summarize large datasets. While \nthis can save time and resources, researchers shoul d be cautious not to rely too heavily on the \nmodel\u2019s output without independent verification. \n \u2022 Model selection: Researchers may choose to us e ChatGPT (or other AI language \nmodels) as their primary research tool, rather than as one tool among many. This can lead to \nover-reliance on the model\u2019s output and a failure t o consider alternative hypotheses or \nmethods. \n \nOver-reliance can lead to several problems, includi ng: \n \u2022 Biases: As we discussed earlier, Chatbots and language models can be biased in \nvarious ways. If researchers rely too heavily on th e model\u2019s output, they may unknowingly \nreplicate or amplify these biases. \n \u2022 Errors: ChatGPT (like all models) is not infa llible. If researchers rely too heavily on \nthe model\u2019s output, they may introduce errors or in accuracies into their research. \n \u2022 Over-generalization: ChatGPT is trained on a large corpus of text and may not \naccurately reflect the nuances and complexities of the real world. If researchers rely too heavily \non the model\u2019s output, they may over-generalize or oversimplify their findings. \n \nTo avoid over-reliance on AI and language models, r esearchers should be cautious in their \nuse of the model and should use it in conjunction w ith other research methods and tools. They \nshould also be aware of the model\u2019s limitations and potential biases, and should take steps to \nmitigate these risks. \n \n3.5 Ethical Concerns \n ChatGPT can raise ethical concerns such as privacy violations and job displacement (i.e., \ninvoluntary job loss). ChatGPT can generate respons es that", "doc_id": "6695aaa4-3f68-4863-9eff-8e37406349e8", "embedding": null, "doc_hash": "e133cabca104cddd8f0adf1f7a9ad54f360b39d4acb42fdcc1f6ab5d7317ca46", "extra_info": null, "node_info": {"start": 39607, "end": 43368}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "76d2b8ea-8fc5-4e16-8413-1e632581c4d7", "3": "f7c1761b-4d8d-41a2-85eb-b597ef039513"}}, "__type__": "1"}, "f7c1761b-4d8d-41a2-85eb-b597ef039513": {"__data__": {"text": "ChatGPT is trained on a large corpus of text and may not \naccurately reflect the nuances and complexities of the real world. If researchers rely too heavily \non the model\u2019s output, they may over-generalize or oversimplify their findings. \n \nTo avoid over-reliance on AI and language models, r esearchers should be cautious in their \nuse of the model and should use it in conjunction w ith other research methods and tools. They \nshould also be aware of the model\u2019s limitations and potential biases, and should take steps to \nmitigate these risks. \n \n3.5 Ethical Concerns \n ChatGPT can raise ethical concerns such as privacy violations and job displacement (i.e., \ninvoluntary job loss). ChatGPT can generate respons es that may violate users\u2019 privacy, and the \nuse of ChatGPTs in various industries may lead to j ob displacement. There are several ethical \nconcerns associated with ChatGPT and other AI model s. Here are a few examples: \n \u2022 Bias: As we discussed earlier, language model s can be biased in various ways, such \nas in the training data or algorithms used. These b iases can lead to unfair or discriminatory \noutcomes, such as in employment, healthcare, or cri minal justice. \n \u2022 Privacy: Chatbots can generate highly persona lized output based on input from \nusers, which can raise privacy concerns. For exampl e, if a user inputs sensitive information into \nthe model (such as health or financial data), the m odel\u2019s output could reveal that information \nto others. \n \u2022 Accountability: ChatGPT (and other AI models) can make decisions or generate \noutput with significant consequences for individual s or society as a whole (e.g., in healthcare or \ncriminal justice). If the model makes an incorrect or biased decision, it can be difficult to hold it \naccountable for its actions. \n \u2022 Transparency: As we discussed earlier, deep l earning can lack transparency, \nmeaning that it is often difficult to understand ho w the model arrived at a particular output or \ndecision. This lack of transparency can make it dif ficult to identify and correct biases or to hold \nthe model accountable for its actions. \n \u2022 Misuse: ChatGPT can be misused for nefarious purposes, such as generating fake \nnews or propaganda. Moreover, academia is ringing t he alarm bills about the great possibilities \nfor cheating on academic assignments using language models and Chatbots, coupled with the \nlagging behind of cheating detections software on t his problem. This can have serious \nconsequences for individuals and society as a whole . \n \nTo address these ethical concerns, researchers, dev elopers, and users of ChatGPT should \nprioritize ethical considerations throughout the mo del\u2019s development and use. This can include: \n \u2022 Fairness: Ensuring that the model is trained on diverse and representative data, and \nthat it does not unfairly discriminate against any particular group of people. \n \u2022 Privacy: Ensuring that the model is used in a way that respects users\u2019 privacy and \nthat sensitive data is protected. \n \u2022 Accountability: Ensuring that there are mecha nisms in place to hold the model \naccountable for its decisions and actions. \n \u2022 Transparency: Ensuring that the model\u2019s outpu t is transparent and explainable, so \nthat users can understand how the model arrived at its decisions. \n \n \u2022 Responsible use: Ensuring that the model is u sed in an ethical and responsible \nmanner, and that it is not misused for nefarious pu rposes. \n Each one of these considerations open several aven ues for research. For example, the \nalgorithms for calculating similarity scores and ch eating detection need to be developed to take \nunder consideration the availability of powerful Ch atbots like ChatGPT. \n4 Future Research Directions \n \n4.1 Explainability", "doc_id": "f7c1761b-4d8d-41a2-85eb-b597ef039513", "embedding": null, "doc_hash": "feb29eb1299f6ccd83e275f91d4880cff76617d58c247b2c4d71c102282e4edd", "extra_info": null, "node_info": {"start": 43437, "end": 47247}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "6695aaa4-3f68-4863-9eff-8e37406349e8", "3": "0780cde1-9a19-4cbd-9525-12a03b247a78"}}, "__type__": "1"}, "0780cde1-9a19-4cbd-9525-12a03b247a78": {"__data__": {"text": " \u2022 Accountability: Ensuring that there are mecha nisms in place to hold the model \naccountable for its decisions and actions. \n \u2022 Transparency: Ensuring that the model\u2019s outpu t is transparent and explainable, so \nthat users can understand how the model arrived at its decisions. \n \n \u2022 Responsible use: Ensuring that the model is u sed in an ethical and responsible \nmanner, and that it is not misused for nefarious pu rposes. \n Each one of these considerations open several aven ues for research. For example, the \nalgorithms for calculating similarity scores and ch eating detection need to be developed to take \nunder consideration the availability of powerful Ch atbots like ChatGPT. \n4 Future Research Directions \n \n4.1 Explainability \n One of the most crucial research directions is to develop methods to make deep learning \nin general and language models in particular more e xplainable. Explainability refers to the ability \nto understand how a model arrived at its output or decision, and to be able to explain that \nprocess in a way that is understandable to humans. This will enable researchers to understand \nthe logic behind the models\u2019 decisions and provide transparency in their output. Explainability is \nan important research direction for Chatbots, as it can help address concerns around \ntransparency and accountability. Failing to address explainability has great ramifications on the \nadoption and regulatory certification of AI techniq ues [47,48]. For example, the European general \ndata protection regulation explicitly requires deci sions made in healthcare among other areas to \nbe traceable and explainable [49]. Explainable AI ( XAI) is a research avenue in AI that gaining a \nlot of attention driven by the real-life deployment requirements of AI-based systems. A recent \nsurvey by Bai et al. [50] presents the recent advan cements toward achieving explainable AI in \npattern recognition. \nOne approach to achieving explainability is through the use of attention mechanisms. \nAttention mechanisms allow the model to focus on ce rtain parts of the input when generating its \noutput. They can generate probability distributions relating to the input, which serve as \nindicators on the importance of features. By visual izing the attention weights for each part of the \ninput [51], we can gain insight into which parts of the input the model is focusing on and how it \nis using that information to generate its output. H owever, attention mechanisms according to Liu \net al. [52] may be unable to identify the polarity of the impact of individual features due to \nsuppression effects. \nAnother approach is to use model-agnostic methods t o explain the output of deep \nlearning models. These methods aim to explain the m odel\u2019s output without needing to know the \ninternal workings of the model itself. For example, one such method is LIME (Local Interpretable \nModel-agnostic Explanations) [53], which generates a simple, interpretable model that \napproximates the behavior of the original model on a local scale. Aditya and Pal [54] proposed \nfurther refinements to LIME using Shapley values use d in game theory, which provide several \nadvantages in terms of efficiency, consistency, and symmetry. \nIn addition to these approaches, there is ongoing r esearch into developing new methods \nfor explainability. For example, recent work has ex plored the use of counterfactual explanations, \nwhich aim to explain how the output of the model wo uld have changed if certain parts of the \ninput had been different [31]. Another area of rese arch is developing methods to explain the \noutput of black box models when the input is a sequ ence of events over time, such as in the case \nof medical records or financial transactions. \nOverall, the goal of explainability research is to provide users with a better understanding \nof how language models in particular arrive at thei r output, and to provide mechanisms for \nidentifying and correcting biases or errors in the model\u2019s decision-making. This is an important \narea of research for", "doc_id": "0780cde1-9a19-4cbd-9525-12a03b247a78", "embedding": null, "doc_hash": "095f2375d10187d2c1173ea12893c2419c51167771c13debe896a2ecf9e37816", "extra_info": null, "node_info": {"start": 47235, "end": 51318}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "f7c1761b-4d8d-41a2-85eb-b597ef039513", "3": "6012fe95-dfd4-477c-b0c5-98be7f19251a"}}, "__type__": "1"}, "6012fe95-dfd4-477c-b0c5-98be7f19251a": {"__data__": {"text": " \nIn addition to these approaches, there is ongoing r esearch into developing new methods \nfor explainability. For example, recent work has ex plored the use of counterfactual explanations, \nwhich aim to explain how the output of the model wo uld have changed if certain parts of the \ninput had been different [31]. Another area of rese arch is developing methods to explain the \noutput of black box models when the input is a sequ ence of events over time, such as in the case \nof medical records or financial transactions. \nOverall, the goal of explainability research is to provide users with a better understanding \nof how language models in particular arrive at thei r output, and to provide mechanisms for \nidentifying and correcting biases or errors in the model\u2019s decision-making. This is an important \narea of research for ensuring the ethical use of Ch atGPTs in a variety of applications. \n \n4.2 Bias Detection and Mitigation \n Another research direction is to detect and mitiga te bias in ChatGPTs. Researchers can \ndevelop methods to identify and eliminate bias from Chatbots by using techniques such as \nadversarial training [55]. This method fine tunes l anguage models and deep learning models in \ngeneral through the introduction of adversarial sam ples in the training set, which tends to \nincrease the robustness and generalization of the m odel. Toward this end, several approaches \nand algorithms have been proposed in the literature , including adversarial training for large \nneural language models (ALUM) [56], generative adver sarial training [57], attacking to training \n(A2T) [58], and large-margin classification [59]. B ias detection and mitigation are important steps \nin ensuring that language models are used ethically and fairly. Here are some approaches to bias \ndetection and mitigation: \n \u2022 Data collection: Bias can be introduced in th e training data that is used to train \nChatGPTs. One approach to reducing bias is to ensur e that the training data is diverse and \nrepresentative of the population that the model wil l be used on. This can involve careful \nselection of data sources and cleaning and preproce ssing the data to remove any biases. \n \u2022 Bias metrics: Once the model is trained, it i s important to measure the extent of any \nbias that may be present. This can be done using va rious bias metrics, such as the disparate \nimpact or statistical parity difference. These metr ics can help identify areas of the model that \nmay be more prone to bias. \n \u2022 Mitigation strategies: Once bias has been ide ntified, there are various strategies \nthat can be used to mitigate it. One approach is to modify the training data to reduce bias, for \nexample by oversampling underrepresented groups. An other approach is to modify the model \nitself, for example by adding constraints or penalt ies to the training process that encourage \nfairer predictions. Alternatively, post-processing techniques can be used to adjust the model\u2019s \npredictions to ensure fairness. \n \u2022 Regular monitoring: Bias detection and mitiga tion is an ongoing process, and it is \nimportant to regularly monitor the model for any ne w sources of bias that may emerge. This \ncan involve setting up automated monitoring systems that flag potential bias in real time, as \nwell as regular audits of the model\u2019s performance. \n \n \n4.3 Multimodal Integration \n Researchers can explore the integration of ChatGPT s with other modalities such as \nimages and videos to enhance their applications in fields such as education and healthcare. \nMultimodal integration is an important research dir ection for language models, as it involves \ncombining multiple modalities of information, such as text, images, and audio, to generate more \ncomprehensive and accurate outputs. Multimodal inte gration can help Chatbots better \nunderstand and respond to complex inputs, and can e nable more natural and intuitive \ninteractions between humans and machines. \nOne approach to multimodal integration in ChatGPTs is", "doc_id": "6012fe95-dfd4-477c-b0c5-98be7f19251a", "embedding": null, "doc_hash": "01eab58188ce2f2e31bbecdc4f945f59e1910edf9e2ec4f284bba450965cb5f9", "extra_info": null, "node_info": {"start": 51245, "end": 55273}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "0780cde1-9a19-4cbd-9525-12a03b247a78", "3": "9aa77e4d-8cdc-4d30-98f1-053ec73fcb4a"}}, "__type__": "1"}, "9aa77e4d-8cdc-4d30-98f1-053ec73fcb4a": {"__data__": {"text": "automated monitoring systems that flag potential bias in real time, as \nwell as regular audits of the model\u2019s performance. \n \n \n4.3 Multimodal Integration \n Researchers can explore the integration of ChatGPT s with other modalities such as \nimages and videos to enhance their applications in fields such as education and healthcare. \nMultimodal integration is an important research dir ection for language models, as it involves \ncombining multiple modalities of information, such as text, images, and audio, to generate more \ncomprehensive and accurate outputs. Multimodal inte gration can help Chatbots better \nunderstand and respond to complex inputs, and can e nable more natural and intuitive \ninteractions between humans and machines. \nOne approach to multimodal integration in ChatGPTs is to use a multimodal transformer \narchitecture [60], which incorporates multiple moda lities of input into a single transformer \nmodel. This approach has been used in a number of a pplications, such as image captioning (e.g., \nXGPT [61]) and video question answering (e.g., AVQA [62), with promising results. Another \napproach is to use multimodal fusion techniques to combine the outputs of separate models \ntrained on different modalities [63]. For example, Zhu et al. used separate models for text and \nimages, and then combined their outputs using a fus ion approach based on self-attention [64]. \nIn addition to these approaches, there is ongoing r esearch into developing new methods for \nmultimodal integration in language models. For exam ple, recent work has explored the use of \nreinforcement learning to learn how to weight diffe rent modalities of input based on their \nrelative importance [65]. Another area of research is developing methods to incorporate \nmultimodal inputs that are not synchronous, such as when text and audio inputs are recorded \nseparately [66]. In general, multimodal integration is an important research direction for \nlanguage models, as it enables more flexible and po werful interactions with users and can \nimprove the accuracy and comprehensiveness of the m odel\u2019s output. \n4.4 Transfer Learning \n \nTransfer learning is a research direction that invo lves using pre-trained models to perform \nspecific tasks in various fields. Researchers can e xplore how ChatGPTs can be fine-tuned for \nspecific applications in education, software engine ering, healthcare, and marketing. \nTransfer learning is a powerful technique that has been successfully applied to language \nmodels [66]. Transfer learning refers to the proces s of training a model on one task or domain, \nand then transferring that knowledge to a new task or domain [67]. In the case of Chatbots, \ntransfer learning involves pre-training a model on a large corpus of text data, and then fine-tuning \nthe model on a specific task or domain. Such method has several benefits for ChatGPTs. First, it \ncan help address the issue of limited training data for specific tasks or domains, by allowing the \nmodel to leverage the knowledge it has gained from pre-training on large amounts of data. \nSecond, transfer learning can help reduce the compu tational cost of training a new model from \nscratch, as the pre-trained model can serve as a st arting point for fine-tuning. \nThere are several approaches to transfer learning i n ChatGPTs. One common approach is \nto use a pre-trained model such as GPT-2 or GPT-3, which have been trained on large amounts \nof diverse text data. The pre-trained model can the n be fine-tuned on a specific task or domain, \nsuch as sentiment analysis or question answering, b y further training the model on a smaller \ndataset specific to that task. Another approach is to use transfer learning to adapt a pre-trained \nmodel to a new language. This involves pre-training the model on a large corpus of text data in \nthe new language, and then fine-tuning the model on specific tasks or domains in that language. \nOverall, transfer learning is a powerful technique for language models, as it enables the model \nto leverage knowledge", "doc_id": "9aa77e4d-8cdc-4d30-98f1-053ec73fcb4a", "embedding": null, "doc_hash": "2f2c6bbdd753e9b91656d525c47c7ce63470678033397425231436b5d18ab5cd", "extra_info": null, "node_info": {"start": 55289, "end": 59354}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "6012fe95-dfd4-477c-b0c5-98be7f19251a", "3": "f7b61543-9a3f-4a9e-908a-ebf80a05d53e"}}, "__type__": "1"}, "f7b61543-9a3f-4a9e-908a-ebf80a05d53e": {"__data__": {"text": " \nThere are several approaches to transfer learning i n ChatGPTs. One common approach is \nto use a pre-trained model such as GPT-2 or GPT-3, which have been trained on large amounts \nof diverse text data. The pre-trained model can the n be fine-tuned on a specific task or domain, \nsuch as sentiment analysis or question answering, b y further training the model on a smaller \ndataset specific to that task. Another approach is to use transfer learning to adapt a pre-trained \nmodel to a new language. This involves pre-training the model on a large corpus of text data in \nthe new language, and then fine-tuning the model on specific tasks or domains in that language. \nOverall, transfer learning is a powerful technique for language models, as it enables the model \nto leverage knowledge gained from pre-training on l arge amounts of data, and can help address \nthe issue of limited training data for specific tas ks or domains. \n \n5 Conclusions \n \nChatGPT and other advanced language models/chatbots are powerful disruptive tools \nthat have the potential to revolutionize various fi elds such as education, software engineering, \nhealthcare, and marketing. However, its drawbacks, such as plagiarism, bias and lack of \ntransparency, need to be addressed, and researchers need to explore research directions such \nas explainability, bias detection and mitigation, m ultimodal integration, and transfer learning to \nensure ChatGPTs are used responsibly and thoughtful ly. The work in this paper surveyed the \npossible avenues where language models can positive ly or negatively contribute to that area, \nwhat possible changes need to be made to counter th e negatives or misuse scenarios, and the \nfuture research directions necessary to achieve wid e, effective, and proper deployment. \n \n \n \n \nReferences \n \n \n1. Kung, T.H.; Cheatham, M.; Medenilla, A.; Sillos, C.; Leon, L.D.; Elepa\u00f1o, C.; Madriaga, M.; \nAggabao, R.; Diaz-Candido, G.; Maningo, J.; et al. Performance of ChatGPT on USMLE: \nPotential for AI-Assisted Medical Education Using La rge Language Models 2022 . \nhttps://doi.org/10.1101/2022.12.19.22283643. \n \n2. Weizenbaum, J. Computer power and human reason ; W.H. Freeman: New York, NY, \nUSA, 1976. \n \n3. Wallace, R.S. The Anatomy of A.L.I.C.E. In Parsing the Turing Test ; Springer \nNetherlands, 2007; pp. 181\u2013210. https://doi.org/10 .1007/978-1-4020-6710-5_13. \n \n4. Carpenter, R. Cleverbot \u2014 cleverbot.com. https:// www.cleverbot.com/, 2008. \n[Accessed 27-Feb-2023]. \n \n5. OpenAI. ChatGPT: Optimizing Language Models for Dia logue \u2014 openai.com. \nhttps://openai.com/blog/chatgpt/, 2022. [Accessed 27-Feb-2023]. \n \n6. Yang, Z.; Dai, Z.; Yang, Y.; Carbonell, J.; Salakhu tdinov, R.R.; Le, Q.V. XLNet: Generalized \nAutoregressive Pretraining for Language Understandin g. In Proceedings of the Advances \nin Neural Information Processing Systems; Wallach, H.; Larochelle, H.; Beygelzimer, A.; \nd\u00c1lch\u00e9 Buc, F.; Fox, E.; Garnett, R., Eds. Curran A ssociates, Inc., 2019, Vol. 32. \n \n7. Lepikhin, D.; Lee, H.; Xu, Y.; Chen, D.; Firat, O.; H uang, Y.; Krikun, M.; Shazeer, N.; Chen, \nZ. GShard: Scaling Giant Models with Conditional C", "doc_id": "f7b61543-9a3f-4a9e-908a-ebf80a05d53e", "embedding": null, "doc_hash": "330b041a424b4099694487e34da27b691ff0ad79c73dc09daa2dac9decbcb70b", "extra_info": null, "node_info": {"start": 59377, "end": 62559}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "9aa77e4d-8cdc-4d30-98f1-053ec73fcb4a", "3": "c5242644-4e19-4413-9b0f-20b838b08201"}}, "__type__": "1"}, "c5242644-4e19-4413-9b0f-20b838b08201": {"__data__": {"text": "Z.; Yang, Y.; Carbonell, J.; Salakhu tdinov, R.R.; Le, Q.V. XLNet: Generalized \nAutoregressive Pretraining for Language Understandin g. In Proceedings of the Advances \nin Neural Information Processing Systems; Wallach, H.; Larochelle, H.; Beygelzimer, A.; \nd\u00c1lch\u00e9 Buc, F.; Fox, E.; Garnett, R., Eds. Curran A ssociates, Inc., 2019, Vol. 32. \n \n7. Lepikhin, D.; Lee, H.; Xu, Y.; Chen, D.; Firat, O.; H uang, Y.; Krikun, M.; Shazeer, N.; Chen, \nZ. GShard: Scaling Giant Models with Conditional C omputation and Automatic Sharding. \nCoRR 2020 , abs/2006.16668 , http://xxx.lanl.gov/abs/2006.16668 [2006.16668]. \n \n8. Liu, Y.; Ott, M.; Goyal, N.; Du, J.; Joshi, M.; Chen , D.; Levy, O.; Lewis, M.; Zettlemoyer, L.; \nStoyanov, V. RoBERTa: A Robustly Optimized BERT Pr etraining Approach. CoRR 2019 , \nabs/1907.11692 , http://xxx.lanl.gov/abs/1907.11692 [1907.11692]. \n \n9. Raffel, C.; Shazeer, N.; Roberts, A.; Lee, K.; Narang , S.; Matena, M.; Zhou, Y.; Li, W.; Liu, \nP.J. Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer. \nJournal of Machine Learning Research 2020 , 21 , 1\u201367. \n \n10. Tapalova, O.; Zhiyenbayeva, N. Artificial Intellig ence in Education: AIEd for Personalised \nLearning Pathways. Electronic Journal of e-Learning 2022 , 20 , 639\u2013653. \nhttps://doi.org/10.34190/ejel.20.5.2597. \n \n11. Kumar, V.; Boulanger, D. Explainable Automated Essa y Scoring: Deep Learning Really \nHas Pedagogical Value. Frontiers in Education 2020 , 5. \nhttps://doi.org/10.3389/feduc.2020.572367. \n \n12. Raychev, V.; Vechev, M.; Yahav, E. Code completion with statistical language models. In \nProceedings of the Proceedings of the 35th ACM SIGP LAN Conference on Programming \nLanguage Design and Implementation. ACM, 2014. \nhttps://doi.org/10.1145/2594291.2594321. \n \n13. Le, K.T.; Rashidi, G.; Andrzejak, A. A methodology f or refined evaluation of neural code \ncompletion approaches. Data Mining and Knowledge Discovery 2022 , 37 , 167\u2013204. \nhttps://doi.org/10.1007/s10618-022-00866-9. \n \n14. Tu, Z.; Su, Z.; Devanbu, P. On the localness of so ftware. In Proceedings of the \nProceedings of the 22nd ACM SIGSOFT International S ymposium on Foundations of \nSoftware Engineering. ACM, 2014. https://doi.org/1 0.1145/2635868.2635875. \n \n15. Allamanis, M.; Brockschmidt, M.; Khademi, M. Learnin g to Represent Programs with \nGraphs, 2017. https://doi.org/10.48550/ARXIV.1711. 00740. \n \n16. Allamanis, M.; Barr, E.T.; Devanbu, P.; Sutton, C. A Survey of Machine Learning for Big \nCode and Naturalness. ACM Computing Surveys 2018 , 51 , 1\u201337. ", "doc_id": "c5242644-4e19-4413-9b0f-20b838b08201", "embedding": null, "doc_hash": "c6275dfe81b828294b7f07fe6471694c37fe5a9ec51b67079f593bc9f71db73d", "extra_info": null, "node_info": {"start": 62799, "end": 65431}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "f7b61543-9a3f-4a9e-908a-ebf80a05d53e", "3": "fb85b6f9-c436-44bb-a256-53b372536df4"}}, "__type__": "1"}, "fb85b6f9-c436-44bb-a256-53b372536df4": {"__data__": {"text": "P. On the localness of so ftware. In Proceedings of the \nProceedings of the 22nd ACM SIGSOFT International S ymposium on Foundations of \nSoftware Engineering. ACM, 2014. https://doi.org/1 0.1145/2635868.2635875. \n \n15. Allamanis, M.; Brockschmidt, M.; Khademi, M. Learnin g to Represent Programs with \nGraphs, 2017. https://doi.org/10.48550/ARXIV.1711. 00740. \n \n16. Allamanis, M.; Barr, E.T.; Devanbu, P.; Sutton, C. A Survey of Machine Learning for Big \nCode and Naturalness. ACM Computing Surveys 2018 , 51 , 1\u201337. \nhttps://doi.org/10.1145/3212695. \n \n17. Adlung, L.; Cohen, Y.; Mor, U.; Elinav, E. Machine learning in clinical decision making. \nMed 2021 , 2, 642\u2013665. https://doi.org/10.1016/j.medj.2021.04. 006. \n18. Wu, L.; Chen, S.; Guo, L.; Shpyleva, S.; Harris, K.; F ahmi, T.; Flanigan, T.; Tong, W.; Xu, J.; \nRen, Z. Development of benchmark datasets for text mining and sentiment analysis to \naccelerate regulatory literature review. Regulatory Toxicology and Pharmacology \n2023 , 137 , 105287. https://doi.org/10.1016/j.yrtph.2022.105 287. \n \n19. Lederman, A.; Lederman, R.; Verspoor, K. Tasks as nee ds: reframing the paradigm of \nclinical natural language processing research for r eal-world decision support. Journal of \nthe American Medical Informatics Association 2022 , 29 , 1810\u20131817. \nhttps://doi.org/10.1093/jamia/ocac121. \n \n20. Gururangan, S.; Marasovic, A.; Swayamdipta, S.; Lo, K.; Beltagy, I.; Downey, D.; Smith, \nN.A. Don\u2019t Stop Pretraining: Adapt Language Models to Domains and Tasks. CoRR \n2020 , abs/2004.10964 , http://xxx.lanl.gov/abs/2004.10964 [2004.10964]. \n \n21. Liu, Z.; Roberts, R.A.; Lal-Nag, M.; Chen, X.; Huang, R.; Tong, W. AI-based language \nmodels powering drug discovery and development. Drug Discovery Today 2021 , 26 , \n2593\u20132607. https://doi.org/10.1016/j.drudis.2021.0 6.009. \n \n22. Tripathi, A.; Misra, K.; Dhanuka, R.; Singh, J.P. A rtificial Intelligence in Accelerating Drug \nDiscovery and Development. Recent Patents on Biotechnology 2023 , 17 , 9\u201323. \nhttps://doi.org/10.2174/1872208316666220802151129. \n \n23. Bhatnagar, R.; Sardar, S.; Beheshti, M.; Podichetty , J.T. How can natural language \nprocessing help model informed drug development?: a review. JAMIA Open 2022 , 5. \nhttps://doi.org/10.1093/jamiaopen/ooac043. \n \n24. Leyens, L.; Reumann, M.; Malats, N.; Brand, A. Use o f big data for drug development \nand for public and personal health and care. Genetic Epidemiology 2016 , 41 , 51\u201360. \nhttps://doi.org/10.1002/gepi.22012. \n \n25. Ball, R.; Pan, G.D. \u201cArtificial Intelligence\u201d for", "doc_id": "fb85b6f9-c436-44bb-a256-53b372536df4", "embedding": null, "doc_hash": "a7c9c327eaa28c1a37685a7405789b3a4fcd275c0c77b8a15637fb7d3994b06f", "extra_info": null, "node_info": {"start": 65406, "end": 68047}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "c5242644-4e19-4413-9b0f-20b838b08201", "3": "25027da7-f677-499d-90e0-5665fa5133c5"}}, "__type__": "1"}, "25027da7-f677-499d-90e0-5665fa5133c5": {"__data__": {"text": " \n \n23. Bhatnagar, R.; Sardar, S.; Beheshti, M.; Podichetty , J.T. How can natural language \nprocessing help model informed drug development?: a review. JAMIA Open 2022 , 5. \nhttps://doi.org/10.1093/jamiaopen/ooac043. \n \n24. Leyens, L.; Reumann, M.; Malats, N.; Brand, A. Use o f big data for drug development \nand for public and personal health and care. Genetic Epidemiology 2016 , 41 , 51\u201360. \nhttps://doi.org/10.1002/gepi.22012. \n \n25. Ball, R.; Pan, G.D. \u201cArtificial Intelligence\u201d for Pharmacovigilance: Ready for Prime Time? \nDrug Safety 2022 , 45 , 429\u2013438. https://doi.org/10.1007/s40264-022-0115 7-4. \n \n26. Koneti, G.; Das, S.S.; Bahl, J.; Ranjan, P.; Ramamur thi, N. Discovering the Knowledge in \nUnstructured Early Drug Development Data Using NLP a nd Advanced Analytics. In \nProceedings of the 2022 IEEE International Conferen ce on Bioinformatics and \nBiomedicine (BIBM), 2022, pp. 3840\u20133842. \nhttps://doi.org/10.1109/BIBM55620.2022.9995435. \n \n27. Wang, X.; Xu, X.; Tong, W.; Liu, Q.; Liu, Z. DeepCa usality: A general AI-powered causal \ninference framework for free text: A case study of LiverTox. Frontiers in Artificial \nIntelligence 2022 , 5. https://doi.org/10.3389/frai.2022.999289. \n \n28. Lee, J.; Yoon, W.; Kim, S.; Kim, D.; Kim, S.; So, C.H .; Kang, J. BioBERT: a pre-trained \nbiomedical language representation model for biomed ical text mining. Bioinformatics \n2019 , 36 , 1234\u20131240. https://doi.org/10.1093/bioinformatic s/btz682. \n \n29. Huang, K.; Altosaar, J.; Ranganath, R. ClinicalBERT : Modeling Clinical Notes and \nPredicting Hospital Readmission. CoRR 2019 , abs/1904.05342 , \nhttp://xxx.lanl.gov/abs/1904.05342 [1904.05342]. \n \n30. Alsentzer, E.; Murphy, J.; Boag, W.; Weng, W.H.; Ji ndi, D.; Naumann, T.; McDermott, M. \nPublicly Available Clinical. In Proceedings of the Proceedings of the 2nd Clinical Natural \nLanguage Processing Workshop. Association for Comput ational Linguistics, 2019. \nhttps://doi.org/10.18653/v1/w19-1909. \n \n31. Verma, S.; Sharma, R.; Deb, S.; Maitra, D. Artific ial intelligence in marketing: Systematic \nreview and future research direction. International Journal of Information \nManagement Data Insights 2021 , 1, 100002. \nhttps://doi.org/10.1016/j.jjimei.2020.100002. \n \n32. Nguyen, Q.N.; Sidorova, A.; Torres, R. User intera ctions with chatbot interfaces vs. \nMenu-based interfaces: An empirical study. Computers in Human Behavior 2022 , 128 , \n107093. https://doi.org/10.1016/j.chb.2021.107093. \n \n33. Chipman, S. What is CRM artificial intelligence an d what can it do for my business? - \nCRM Switch \u2014 crmswitch.com, 2023. [Accessed 06-Mar -2023]. \n \n34. ", "doc_id": "25027da7-f677-499d-90e0-5665fa5133c5", "embedding": null, "doc_hash": "f53f49e06053ca0c480ae781ff6177a173e3f22217f1969f62f1bc5ba098db3d", "extra_info": null, "node_info": {"start": 68074, "end": 70786}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "fb85b6f9-c436-44bb-a256-53b372536df4", "3": "e62b0869-d6f1-4863-9be0-173f9fbfae9d"}}, "__type__": "1"}, "e62b0869-d6f1-4863-9be0-173f9fbfae9d": {"__data__": {"text": " International Journal of Information \nManagement Data Insights 2021 , 1, 100002. \nhttps://doi.org/10.1016/j.jjimei.2020.100002. \n \n32. Nguyen, Q.N.; Sidorova, A.; Torres, R. User intera ctions with chatbot interfaces vs. \nMenu-based interfaces: An empirical study. Computers in Human Behavior 2022 , 128 , \n107093. https://doi.org/10.1016/j.chb.2021.107093. \n \n33. Chipman, S. What is CRM artificial intelligence an d what can it do for my business? - \nCRM Switch \u2014 crmswitch.com, 2023. [Accessed 06-Mar -2023]. \n \n34. Mauro, A.D.; Sestino, A.; Bacconi, A. Machine lear ning and artificial intelligence use in \nmarketing: a general taxonomy. Italian Journal of Marketing 2022 , 2022 , 439\u2013457. \nhttps://doi.org/10.1007/s43039-022-00057-w. \n \n35. Duarte, V.; Zuniga-Jara, S.; Contreras, S. Machine Learning and Marketing: A Systematic \nLiterature Review. IEEE Access 2022 , 10 , 93273\u201393288. \nhttps://doi.org/10.1109/access.2022.3202896. \n \n36. Bruyn, A.D.; Viswanathan, V.; Beh, Y.S.; Brock, J.K. U.; von Wangenheim, F. Artificial \nIntelligence and Marketing: Pitfalls and Opportunit ies. Journal of Interactive Marketing \n2020 , 51 , 91\u2013105. https://doi.org/10.1016/j.intmar.2020.04 .007. \n \n37. Schramowski, P.; Turan, C.; Andersen, N.; Rothkopf, C.A.; Kersting, K. Large pre-trained \nlanguage models contain human-like biases of what i s right and wrong to do. Nature \nMachine Intelligence 2022 , 4, 258\u2013268. https://doi.org/10.1038/s42256-022-0045 8-8. \n \n \n38. Rudin, C. Stop explaining black box machine learni ng models for high stakes decisions \nand use interpretable models instead. Nature Machine Intelligence 2019 , 1, 206\u2013215. \nhttps://doi.org/10.1038/s42256-019-0048-x. \n \n \n39. Yang, X. An Overview of the Attention Mechanisms i n Computer Vision. Journal of \nPhysics: Conference Series 2020 , 1693 , 012173. https://doi.org/10.1088/1742-\n6596/1693/1/012173. \n \n40. An, J.; Joe, I. Attention Map-Guided Visual Explan ations for Deep Neural Networks. \nApplied Sciences 2022 , 12 , 3846. https://doi.org/10.3390/app12083846. \n \n41. Hicks, S.A.; Isaksen, J.L.; Thambawita, V.; Ghouse, J.; Ahlberg, G.; Linneberg, A.; Grarup, \nN.; Str\u00fcmke, I.; Ellervik, C.; Olesen, M.S.; et al. Explaining deep neural networks for \nknowledge discovery in electrocardiogram analysis. Scientific Reports 2021 , 11 . \nhttps://doi.org/10.1038/s41598-021-90285-5. \n \n42. Riegler, M.; Lux, M.; Griwodz, C.; Spampinato, C.; d e Lange, T.; Eskeland, S.L.; Pogorelov, \nK.; Tavanapong, W.; Schmidt, P.T.; Gurrin, C.; et al . Multimedia and Medicine. In", "doc_id": "e62b0869-d6f1-4863-9be0-173f9fbfae9d", "embedding": null, "doc_hash": "688488b63effcb96f645323bed3441480a9d9ac0aec1b9cad761890e6a14c33c", "extra_info": null, "node_info": {"start": 70756, "end": 73390}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "25027da7-f677-499d-90e0-5665fa5133c5", "3": "f2a49d34-de3c-4aa1-8081-4ef7b9e79a60"}}, "__type__": "1"}, "f2a49d34-de3c-4aa1-8081-4ef7b9e79a60": {"__data__": {"text": "V.; Ghouse, J.; Ahlberg, G.; Linneberg, A.; Grarup, \nN.; Str\u00fcmke, I.; Ellervik, C.; Olesen, M.S.; et al. Explaining deep neural networks for \nknowledge discovery in electrocardiogram analysis. Scientific Reports 2021 , 11 . \nhttps://doi.org/10.1038/s41598-021-90285-5. \n \n42. Riegler, M.; Lux, M.; Griwodz, C.; Spampinato, C.; d e Lange, T.; Eskeland, S.L.; Pogorelov, \nK.; Tavanapong, W.; Schmidt, P.T.; Gurrin, C.; et al . Multimedia and Medicine. In \nProceedings of the Proceedings of the 24th ACM inte rnational conference on \nMultimedia. ACM, 2016. https://doi.org/10.1145/296 4284.2976760. \n \n43. Kelly, C.J.; Karthikesalingam, A.; Suleyman, M.; Corr ado, G.; King, D. Key challenges for \ndelivering clinical impact with artificial intellig ence. BMC Medicine 2019 , 17 . \nhttps://doi.org/10.1186/s12916-019-1426-2. \n \n44. Chen, D.; Liu, S.; Kingsbury, P.; Sohn, S.; Storlie, C.B.; Habermann, E.B.; Naessens, J.M.; \nLarson, D.W.; Liu, H. Deep learning and alternative learning strategies for retrospective \nreal-world clinical data. npj Digital Medicine 2019 , 2. https://doi.org/10.1038/s41746-\n019-0122-0. \n \n45. Khasawneh, N.; Fraiwan, M.; Fraiwan, L. Detection of K-complexes in EEG waveform \nimages using faster R-CNN and deep transfer learnin g. BMC Medical Informatics and \nDecision Making 2022 , 22 . https://doi.org/10.1186/s12911-022-02042-x. \n \n \n46. Khasawneh, N.; Fraiwan, M.; Fraiwan, L. Detection of K-complexes in EEG signals using \ndeep transfer learning and YOLOv3. Cluster Computing 2022 . \nhttps://doi.org/10.1007/s10586-022-03802-0. \n \n \n47. Samek, W.; Montavon, G.; Vedaldi, A.; Hansen, L.K.; M\u00fcller, K.R. Explainable AI: \nInterpreting, explaining and visualizing deep learn ing ; Springer Nature: Cham, \nSwitzerland, 2019. \n \n \n48. Samek, W.; M\u00fcller, K.R. Towards Explainable Artific ial Intelligence. In Explainable AI: \nInterpreting, Explaining and Visualizing Deep Learni ng ; Springer International Publishing, \n2019; pp. 5\u201322. https://doi.org/10.1007/978-3-030- 28954-6_1. \n \n \n49. Singh, A.; Sengupta, S.; Lakshminarayanan, V. Expla inable Deep Learning Models in \nMedical Image Analysis. Journal of Imaging 2020 , 6, 52. \nhttps://doi.org/10.3390/jimaging6060052. \n \n \n50. Bai, X.; Wang, X.; Liu, X.; Liu, Q.; Song, J.; Sebe, N.; Kim, B. Explainable deep learning for \nefficient and robust pattern recognition: A survey of recent developments. Pattern \nRecognition 2021 , 120 , 108102. https://doi.org/10.1016/j.patcog.2021.10 8102. \n \n \n51. Choo, J.; Liu, S. Visual Analytics for Explainable Deep Learning. ", "doc_id": "f2a49d34-de3c-4aa1-8081-4ef7b9e79a60", "embedding": null, "doc_hash": "eb2e1fe28f046e882d541656cb9923f4adb8044a85c857d88950e044c9e41f77", "extra_info": null, "node_info": {"start": 73451, "end": 76075}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "e62b0869-d6f1-4863-9be0-173f9fbfae9d", "3": "bf73a3f1-3f4f-4c5d-8809-5acbede2d600"}}, "__type__": "1"}, "bf73a3f1-3f4f-4c5d-8809-5acbede2d600": {"__data__": {"text": "S.; Lakshminarayanan, V. Expla inable Deep Learning Models in \nMedical Image Analysis. Journal of Imaging 2020 , 6, 52. \nhttps://doi.org/10.3390/jimaging6060052. \n \n \n50. Bai, X.; Wang, X.; Liu, X.; Liu, Q.; Song, J.; Sebe, N.; Kim, B. Explainable deep learning for \nefficient and robust pattern recognition: A survey of recent developments. Pattern \nRecognition 2021 , 120 , 108102. https://doi.org/10.1016/j.patcog.2021.10 8102. \n \n \n51. Choo, J.; Liu, S. Visual Analytics for Explainable Deep Learning. IEEE Computer Graphics \nand Applications 2018 , 38 , 84\u201392. https://doi.org/10.1109/MCG.2018.04273166 1. \n \n \n52. Liu, Y.; Li, H.; Guo, Y.; Kong, C.; Li, J.; Wang, S. Rethinking Attention-Model Explainability \nthrough Faithfulness Violation Test. CoRR 2022 , abs/2201.12114 , \nhttp://xxx.lanl.gov/abs/2201.12114 [2201.12114]. \n \n \n53. Ribeiro, M.T.; Singh, S.; Guestrin, C. \"Why Should I Trust You?\": Explaining the \nPredictions of Any Classifier. CoRR 2016 , abs/1602.04938 , \nhttp://xxx.lanl.gov/abs/1602.04938 [1602.04938]. \n \n \n54. Aditya, P.S.R.; Pal, M. Local Interpretable Model Agnostic Shap Explanations for \nmachine learning models, 2022. https://doi.org/10. 48550/ARXIV.2210.04533. \n \n \n55. Zhao, W.; Alwidian, S.; Mahmoud, Q.H. Adversarial Training Methods for Deep Learning: \nA Systematic Review. Algorithms 2022 , 15 , 283. https://doi.org/10.3390/a15080283. \n \n56. Liu, X.; Cheng, H.; He, P.; Chen, W.; Wang, Y.; Poon , H.; Gao, J. Adversarial Training for \nLarge Neural Language Models. CoRR 2020 , abs/2004.08994 , \nhttp://xxx.lanl.gov/abs/2004.08994 [2004.08994]. \n \n \n57. Movahedi, S.; Shakery, A. Generative Adversarial T raining Can Improve Neural Language \nModels, 2022. https://doi.org/10.48550/ARXIV.2211. 09728. \n \n \n58. Yoo, J.Y.; Qi, Y. Towards Improving Adversarial Tr aining of NLP Models. In Proceedings \nof the Findings of the Association for Computationa l Linguistics: EMNLP 2021; \nAssociation for Computational Linguistics: Punta Ca na, Dominican Republic, 2021; pp. \n945\u2013956. https://doi.org/10.18653/v1/2021.findings -emnlp.81. \n \n \n59. Wang, D.; Gong, C.; Liu, Q. Improving Neural Languag e Modeling via Adversarial \nTraining. In Proceedings of the Proceedings of the 36th International Conference on \nMachine Learning; Chaudhuri, K.; Salakhutdinov, R., E ds. PMLR, 2019, Vol. 97, \nProceedings of Machine Learning Research , pp. 6555\u20136565. \n \n \n60. Xu, P.; Zhu, X.; Clifton, D.A. Multimodal Learning with Transformers: A Survey, 2022. \nhttps://doi.org/10.48550/ARXIV.2206.06488. \n \n \n61. ", "doc_id": "bf73a3f1-3f4f-4c5d-8809-5acbede2d600", "embedding": null, "doc_hash": "292cdeb011353a6ad2ed315a65f8556f35cc6cbdb8d3497519257890f429292e", "extra_info": null, "node_info": {"start": 76031, "end": 78645}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "f2a49d34-de3c-4aa1-8081-4ef7b9e79a60", "3": "ee3a3718-eeac-4615-808c-897a216b27a5"}}, "__type__": "1"}, "ee3a3718-eeac-4615-808c-897a216b27a5": {"__data__": {"text": "-emnlp.81. \n \n \n59. Wang, D.; Gong, C.; Liu, Q. Improving Neural Languag e Modeling via Adversarial \nTraining. In Proceedings of the Proceedings of the 36th International Conference on \nMachine Learning; Chaudhuri, K.; Salakhutdinov, R., E ds. PMLR, 2019, Vol. 97, \nProceedings of Machine Learning Research , pp. 6555\u20136565. \n \n \n60. Xu, P.; Zhu, X.; Clifton, D.A. Multimodal Learning with Transformers: A Survey, 2022. \nhttps://doi.org/10.48550/ARXIV.2206.06488. \n \n \n61. Xia, Q.; Huang, H.; Duan, N.; Zhang, D.; Ji, L.; Sui , Z.; Cui, E.; Bharti, T.; Zhou, M. XGPT: \nCross-modal Generative Pre-Training for Image Capti oning. In Natural Language \nProcessing and Chinese Computing ; Springer International Publishing, 2021; pp. 786\u2013\n797. https://doi.org/10.1007/978-3-030-88480-2_63. \n \n \n62. Yun, H.; Yu, Y.; Yang, W.; Lee, K.; Kim, G. Pano-AVQA : Grounded Audio-Visual Question \nAnswering on 360 Videos. In Proceedings of the 202 1 IEEE/CVF International \nConference on Computer Vision (ICCV). IEEE, 2021. \nhttps://doi.org/10.1109/iccv48922.2021.00204. \n \n \n63. Paw \u0142owski, M.; Wr\u00f3blewska, A.; Sysko-Roma\u0144czuk, S. Eff ective Techniques for \nMultimodal Data Fusion: A Comparative Analysis. Sensors 2023 , 23 , 2381. \nhttps://doi.org/10.3390/s23052381. \n \n \n64. ] Zhu, H.; Wang, Z.; Shi, Y.; Hua, Y.; Xu, G.; Den g, L. Multimodal Fusion Method Based \non Self-Attention Mechanism. Wireless Communications and Mobile Computing 2020 , \n2020 , 1\u20138. https://doi.org/10.1155/2020/8843186. \n \n65. Bernardino, G.; Jonsson, A.; Loncaric, F.; Castello te, P.M.M.; Sitges, M.; Clarysse, P.; \nDuchateau, N. Reinforcement Learning for Active Mod ality Selection During Diagnosis. \nIn Lecture Notes in Computer Science ; Springer Nature Switzerland, 2022; pp. 592\u2013601. \nhttps://doi.org/10.1007/978-3-031-16431-6_56. \n \n \n66. Bayoudh, K.; Knani, R.; Hamdaoui, F.; Mtibaa, A. A s urvey on deep multimodal learning \nfor computer vision: advances, trends, applications , and datasets. The Visual Computer \n2021 , 38 , 2939\u20132970. https://doi.org/10.1007/s00371-021-02 166-7. \n \n \n67. Fraiwan, M.; Audat, Z.; Fraiwan, L.; Manasreh, T. U sing deep transfer learning to detect \nscoliosis and spondylolisthesis from x-ray images. PLOS ONE 2022 , 17 , e0267851. \nhttps://doi.org/10.1371/journal.pone.0267851. \n \n \n \n ", "doc_id": "ee3a3718-eeac-4615-808c-897a216b27a5", "embedding": null, "doc_hash": "e40fcbd51a88772e814351ed641ac753eff39b3b1d34e6de77941100423b5660", "extra_info": null, "node_info": {"start": 78595, "end": 80951}, "relationships": {"1": "878bdaad-e099-44e7-8cfb-d214c9e407d1", "2": "bf73a3f1-3f4f-4c5d-8809-5acbede2d600"}}, "__type__": "1"}}}
|
storage/index_store.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"index_store/data": {"d84ac1e3-f027-4285-87bb-d246398dcdc5": {"__type__": "vector_store", "__data__": {"index_id": "d84ac1e3-f027-4285-87bb-d246398dcdc5", "summary": null, "nodes_dict": {"45838104-6e6e-4c29-beda-59d5941d912e": "45838104-6e6e-4c29-beda-59d5941d912e", "bfa53f84-e81b-4d85-80a8-28e0864774aa": "bfa53f84-e81b-4d85-80a8-28e0864774aa", "9015c73c-a129-4422-bb81-2ad60b3e0458": "9015c73c-a129-4422-bb81-2ad60b3e0458", "3e0c2955-89ea-4a88-ab65-323f73c022a6": "3e0c2955-89ea-4a88-ab65-323f73c022a6", "eb6f75f5-4119-4a47-80f1-f967c1299210": "eb6f75f5-4119-4a47-80f1-f967c1299210", "e8b520f8-96ef-4abe-b6f5-fcb26be295e7": "e8b520f8-96ef-4abe-b6f5-fcb26be295e7", "041abddf-0d22-4833-a7aa-45330b5bb49e": "041abddf-0d22-4833-a7aa-45330b5bb49e", "eec8c5c1-025d-4d48-8c95-02960991813f": "eec8c5c1-025d-4d48-8c95-02960991813f", "5c196df3-e04a-46bc-89a1-d3406c2f0357": "5c196df3-e04a-46bc-89a1-d3406c2f0357", "76d2b8ea-8fc5-4e16-8413-1e632581c4d7": "76d2b8ea-8fc5-4e16-8413-1e632581c4d7", "6695aaa4-3f68-4863-9eff-8e37406349e8": "6695aaa4-3f68-4863-9eff-8e37406349e8", "f7c1761b-4d8d-41a2-85eb-b597ef039513": "f7c1761b-4d8d-41a2-85eb-b597ef039513", "0780cde1-9a19-4cbd-9525-12a03b247a78": "0780cde1-9a19-4cbd-9525-12a03b247a78", "6012fe95-dfd4-477c-b0c5-98be7f19251a": "6012fe95-dfd4-477c-b0c5-98be7f19251a", "9aa77e4d-8cdc-4d30-98f1-053ec73fcb4a": "9aa77e4d-8cdc-4d30-98f1-053ec73fcb4a", "f7b61543-9a3f-4a9e-908a-ebf80a05d53e": "f7b61543-9a3f-4a9e-908a-ebf80a05d53e", "c5242644-4e19-4413-9b0f-20b838b08201": "c5242644-4e19-4413-9b0f-20b838b08201", "fb85b6f9-c436-44bb-a256-53b372536df4": "fb85b6f9-c436-44bb-a256-53b372536df4", "25027da7-f677-499d-90e0-5665fa5133c5": "25027da7-f677-499d-90e0-5665fa5133c5", "e62b0869-d6f1-4863-9be0-173f9fbfae9d": "e62b0869-d6f1-4863-9be0-173f9fbfae9d", "f2a49d34-de3c-4aa1-8081-4ef7b9e79a60": "f2a49d34-de3c-4aa1-8081-4ef7b9e79a60", "bf73a3f1-3f4f-4c5d-8809-5acbede2d600": "bf73a3f1-3f4f-4c5d-8809-5acbede2d600", "ee3a3718-eeac-4615-808c-897a216b27a5": "ee3a3718-eeac-4615-808c-897a216b27a5"}, "doc_id_dict": {"878bdaad-e099-44e7-8cfb-d214c9e407d1": ["45838104-6e6e-4c29-beda-59d5941d912e", "bfa53f84-e81b-4d85-80a8-28e0864774aa", "9015c73c-a129-4422-bb81-2ad60b3e0458", "3e0c2955-89ea-4a88-ab65-323f73c022a6", "eb6f75f5-4119-4a47-80f1-f967c1299210", "e8b520f8-96ef-4abe-b6f5-fcb26be295e7", "041abddf-0d22-4833-a7aa-45330b5bb49e", "eec8c5c1-025d-4d48-8c95-02960991813f", "5c196df3-e04a-46bc-89a1-d3406c2f0357", "76d2b8ea-8fc5-4e16-8413-1e632581c4d7", "6695aaa4-3f68-4863-9eff-8e37406349e8", "f7c1761b-4d8d-41a2-85eb-b597ef039513", "0780cde1-9a19-4cbd-9525-12a03b247a78", "6012fe95-dfd4-477c-b0c5-98be7f19251a", "9aa77e4d-8cdc-4d30-98f1-053ec73fcb4a", "f7b61543-9a3f-4a9e-908a-ebf80a05d53e", "c5242644-4e19-4413-9b0f-20b838b08201", "fb85b6f9-c436-44bb-a256-53b372536df4", "25027da7-f677-499d-90e0-5665fa5133c5", "e62b0869-d6f1-4863-9be0-173f9fbfae9d", "f2a49d34-de3c-4aa1-8081-4ef7b9e79a60", "bf73a3f1-3f4f-4c5d-8809-5acbede2d600", "ee3a3718-eeac-4615-808c-897a216b27a5"]}, "embeddings_dict": {}}}}}
|
storage/vector_store.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
storage1/docstore.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"docstore/metadata": {"950acfd7-35b6-40e0-bf25-1f85e4b3951d": {"doc_hash": "115f908b343e3fbfc115d2d35fc40121180065cfa0a45347c25968fd00585996"}, "b3e21c43-f122-4228-800c-1f3b373b3960": {"doc_hash": "d098bb5f2200807de284af0693d8ca5e48f9c980b95b8a1b438583a8dc241e95"}, "ecd2b3d1-32cd-40e9-a2cb-2a1318531b90": {"doc_hash": "12abe7258d39bcc8c697ce5683d58295831d4bc34ed4bd1ffab9ac698f98e390"}, "b2006ef0-2c96-48f2-b5a4-9f5ecebab917": {"doc_hash": "a440d7b733705c8d893d807288782fab81aef68d9447049c7d078521883697bb"}, "4d4caa9d-39d9-4341-8719-5c55510fc86f": {"doc_hash": "4997f4928a0d7b04b9bdca09e7d2d2d84735a83b981d4e994af437f4c780374d"}, "4753dd74-9764-40be-8baa-81348163e0bd": {"doc_hash": "dc1d91cfe2474537aca56dea190192ed6d6119b9b20370fc8a9c529d2e6c9aeb"}, "27891564-af8b-4c1b-b331-bae9cd5f0d38": {"doc_hash": "3defb0441bbacc24d7af3eec133fae96aad0894cf8f2d61aa5d4b36ea8f539e9"}, "67231d57-e066-4cda-84fe-b3df6c8e09a8": {"doc_hash": "f6774b2ad52f55383766520d0c887c8a752e8f1d48e31584245ff8738fdecb79"}, "84db127e-7780-4291-987b-f77a9b7f9dc4": {"doc_hash": "ae075fdcd61b5d6e17b02fd331837cffda11840c53f9317ca4325f5e01ca3d66"}, "7e380a6d-4fc5-4186-b584-08ef2cf17f80": {"doc_hash": "183c0390ce98320f1efc3a25d3ed26af27b203a053cc2c6360864631b90c8cb3"}, "e282d256-9f11-4515-b813-b90e589f831e": {"doc_hash": "52a88528898827b36ca9fd1337b9b45eeb03c04dc56e9942b8786cdb13a86d86"}, "bba7a515-7ef3-4ba6-9572-9438b8bc5ebb": {"doc_hash": "d312cb2862d10d41648bdbba37d3d7fb89d3d51ea236838c0f8b9a846385c5aa"}, "851bfea7-7ea6-48e4-a4ae-3ed590d606c2": {"doc_hash": "141dab415abe67e410cc6f369155ff1a1a62e80b09276d348e64705d9eda762b"}, "0631eda8-3675-4993-bc6f-915690b5017e": {"doc_hash": "ca73dab4289caa0018ad9b5d032ad89f64c179e14e4cd3cf82c49afefb7c3e74"}, "3487ebc9-ffaa-48a7-aa8e-872ddaa89c8c": {"doc_hash": "19efe4c97b68584f1dea866c5bb0f2c6baf0688a80caed2dfd3369de55d770a8"}, "30fa272d-b0ff-4b7f-b7cb-17f3e098b8bd": {"doc_hash": "3d40cbd3dfcc5b054dea5fb7a0f02dab5dc95e4b06bed2cabce48d634840ab3c"}, "701d0aff-6665-4b22-98b5-4bd41470f1db": {"doc_hash": "5664f702c503e98901ae466b2f5b76b027003ec81c92883f03bce55f71e01d3c"}, "f5f3457d-3d38-45f9-bea0-60efa8549999": {"doc_hash": "d07dbc2768ea7cad29c7adc66e3ea6d99466633df038de89036ff4f836541741"}, "4ab5ea03-d5fa-4de3-9a71-cd0118f66b5e": {"doc_hash": "b9fbec865dfee7f2dfc4365eeb7997e9eaa5f50a5c7d4a80c484714866c129d1"}, "e8595aab-231a-4568-b65b-36a42d7ef43d": {"doc_hash": "3f8147e4834fbd7098232bc749c91ec7a5472bc45df3a09e03da7fe0b1d40601"}, "d2ba30fc-0e5f-4891-895d-6d148e261c7d": {"doc_hash": "80b40250f066146c9829a508255ba8e80a84b970f65564c0377d4d3d3e05c06b"}, "d1be3b9a-914d-42cd-92c5-efe7ebb6f33e": {"doc_hash": "20f220fa1f7ae1983261aba8c2144f6155bd484dd423820af04d97c7a3a353d7"}, "6995bb4c-d4f0-412b-8b28-2350734610c8": {"doc_hash": "5c53c1cb8eebb1b4489fea60eea22a3af5e047a1b2a07df3fa50f7f8f11cc1c6"}, "cb1939e2-a3e0-4684-9c87-7cbf0c6f5314": {"doc_hash": "c6f55f67f8b25ae149d4795dc1605505565e16fb62fb9efcea3732b0b8bd1e45"}, "12fb5d28-5df9-418b-95ed-2632fdfc8513": {"doc_hash": "be20b20392adf17fe5fa38e0b8d56cb92e923180c3e8d30b0465804034c4b995"}, "9a1f2bb5-a1d8-42b2-bcf0-4af29dd3142d": {"doc_hash": "d1c082619393529521241debd6fc3f1e29a721ce617ac543c5e604285a679ee1"}, "569a331a-329c-4d7b-9508-a7b361c3cac0": {"doc_hash": "0c112ca8347fc8ad43aad6378eb6d93c4f34244486345e35b6a4ef6d31955097"}}, "docstore/data": {"b3e21c43-f122-4228-800c-1f3b373b3960": {"__data__": {"text": "Finite-size effects of avalanche dynamics\nChristian W. Eurich *\nInstitut fu\u00a8r Theoretische Physik, Universita \u00a8t Bremen, Otto-Hahn-Allee 1, D-28334 Bremen, Germany\nJ. Michael Herrmann\nMax-Planck-Institut fu \u00a8r Stro\u00a8mungsforschung, Bunsenstrasse 10, D-37073 Go \u00a8ttingen, Germany\nUdo A. Ernst\nInstitut fu\u00a8r Theoretische Physik, Universita \u00a8t Bremen, Otto-Hahn-Allee 1, D-28334 Bremen, Germany\n~Received 14 September 2000; published 31 December 2002 !\nWe study the avalanche dynamics of a system of globally coupled threshold elements receiving random\ninput. The model belongs to the same universality class as the random-neighbor version of the Olami-Feder-Christensen stick-slip model. A closed expression for avalanche size distributions is derived for arbitrarysystem sizes Nusing geometrical arguments in the system\u2019s con\ufb01guration space. For \ufb01nite systems, approxi-\nmate power-law behavior is obtained in the nonconservative regime, whereas for N!\u2018, critical behavior with\nan exponent of 23/2 is found in the conservative case only. We compare these results to the avalanche\nproperties found in networks of integrate-and-\ufb01re neurons, and relate the different dynamical regimes to theemergence of synchronization with and without oscillatory components.\nDOI: 10.1103/PhysRevE.66.066137 PACS number ~s!: 05.65. 1b, 05.70.Ln, 45.70.Ht, 87.18.Sn\nI. INTRODUCTION\nIn the last decade, a considerable number of publications\nhave been dedicated to the occurrence of power-law behaviorin systems involving interacting threshold elements drivenby slow external input. The dynamics accounts for phenom-ena occurring in such diverse systems as piles of granularmatter @1#, earthquakes @2#, the game of life @3#, friction @4#,\nand sound generated in the lung during breathing @5#.A n\navalanche of theoretical investigations was triggered by Bak,Tang, and Wiesenfeld @6#who linked the occurrence of\npower laws to the notion of self-organized criticality ~SOC!.\nIn the so-called sandpile models, locally connected elementsreceiving random input self-organize into a critical statecharacterized by power-law distributions of avalanches with-out the explicit tuning of a model parameter ~e.g., Refs.\n@7\u201318 #!. Analytical results were derived for sandpile models\n@14,15 #, and it was shown that the existence of a conserva-\ntion law is a necessary prerequisite to obtain SOC @16\u201318 #.\nA second class of models inspired by earthquake dynam-\nics employs continuous driving and nonconservative interac-tion between the elements of the system @4,19#. In the Olami-\nFeder-Christensen ~OFC!model @19#, where the amount of\ndissipation is controlled by a parameter\na, power-law behav-\nior of avalanches occurs for a wide range of avalues. Sub-\nsequent investigations emphasized the importance of bound-ary conditions and tied the existence of the observed scalingbehavior to synchronization phenomena induced by spatialinhomogeneities @20\u201324 #. More speci\ufb01cally, Lise and Jensen\n@25#introduced a random-neighbor interaction in the OFC\nmodel to avoid the buildup of spatial correlations. Furtheranalysis indeed revealed that the random-neighbor OFCmodel does not display SOC in the dissipative regime @26\u2013\n28#.\nIn these avalanche models with nonconservative interac-\ntion, analytical results have been obtained only for system\nsizeN!\u2018so far @26,29 #. Here we introduce a model that\nnot only circumvents the problem of system boundaries,", "doc_id": "b3e21c43-f122-4228-800c-1f3b373b3960", "embedding": null, "doc_hash": "d098bb5f2200807de284af0693d8ca5e48f9c980b95b8a1b438583a8dc241e95", "extra_info": null, "node_info": {"start": 0, "end": 3410}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "3": "ecd2b3d1-32cd-40e9-a2cb-2a1318531b90"}}, "__type__": "1"}, "ecd2b3d1-32cd-40e9-a2cb-2a1318531b90": {"__data__": {"text": "a wide range of avalues. Sub-\nsequent investigations emphasized the importance of bound-ary conditions and tied the existence of the observed scalingbehavior to synchronization phenomena induced by spatialinhomogeneities @20\u201324 #. More speci\ufb01cally, Lise and Jensen\n@25#introduced a random-neighbor interaction in the OFC\nmodel to avoid the buildup of spatial correlations. Furtheranalysis indeed revealed that the random-neighbor OFCmodel does not display SOC in the dissipative regime @26\u2013\n28#.\nIn these avalanche models with nonconservative interac-\ntion, analytical results have been obtained only for system\nsizeN!\u2018so far @26,29 #. Here we introduce a model that\nnot only circumvents the problem of system boundaries, butyields an analytical access also for \ufb01nite system sizes N. The\nelements are globally connected, which makes the system amean-\ufb01eld model. Randomness is not introduced throughrandom neighbors but by providing a random external input.During an avalanche, the elements become unstable and re-lax in a \ufb01xed order determined by the state of the systemimmediately prior to the avalanche. Therefore, the system is\nstrictly Abelian for dissipation parameters\nasmaller than a\nthreshold value, which can be readily worked out. In thiscase, a geometrical approach in the N-dimensional con\ufb01gu-\nration space yields an exact equation for the distribution ofavalanche sizes.\nIn Sec. II, the model is speci\ufb01ed and compared with other\ndissipative avalanche models, in particular, with the random-neighbor OFC model. In Sec. III, avalanche properties arepresented both numerically and analytically, whereby detailsof the analytical calculation of the avalanche size distribu-tions can be found in Appendixes A\u2013C. Extensions and ap-plications of the model are formulated in the terminology ofneural networks: The model allows for an interpretation interms of a fully connected neural network of nonleakyintegrate-and-\ufb01re neurons. Implications of this view such asthe synchronization behavior of local, densely connectedpopulations of cortical neurons will be discussed in Sec. IV.The paper concludes with a brief summary and discussion.\nII. THE AVALANCHE MODEL\nA. De\ufb01nition\nIn the model, time is measured in discrete steps, t\n50,1,2,....Consider a set of Nidentical threshold ele- *Electronic address: eurich@physik.uni-bremen.dePHYSICAL REVIEW E 66, 066137 ~2002!\n1063-651X/2002/66 ~6!/066137 ~15!/$20.00 \u00a92002 The American Physical Society 66066137-1\nments characterized by a state variable u>0, which will\nhenceforth be called energy. The system is initialized with\narbitrary values uiP@0,U)(i51 ,...,N), where Uis the\nthreshold above which elements become unstable and relax.\nDepending on the state of the system at time t, theith ele-\nment receives external input Iiext(t) or internal input Iiint(t)\nfrom other elements, resulting in an activation u\u02dcat timet\n11,\nu\u02dci~t11!5ui~t!1Iiext~t!1Iiint~t!. ~1!\nFrom the activation u\u02dci(t11), the energy of the ith ele-\nment at time t11 is computed as\nui~t11!5Hu\u02dci~t11!ifu\u02dci~t11!,U,\ne~u\u02dci~t11!2U!ifu\u02dci~t11!>U,~2!\ni.e., if the activation exceeds the threshold U, it is reset but\nretains a fraction e(0<e<1) of the suprathreshold portion\nu\u02dci(t11)2Uof the energy.\nFor the external input Iiext(t), one element is randomly\nchosen from a uniform distribution at each time step, and", "doc_id": "ecd2b3d1-32cd-40e9-a2cb-2a1318531b90", "embedding": null, "doc_hash": "12abe7258d39bcc8c697ce5683d58295831d4bc34ed4bd1ffab9ac698f98e390", "extra_info": null, "node_info": {"start": 2782, "end": 6105}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "b3e21c43-f122-4228-800c-1f3b373b3960", "3": "b2006ef0-2c96-48f2-b5a4-9f5ecebab917"}}, "__type__": "1"}, "b2006ef0-2c96-48f2-b5a4-9f5ecebab917": {"__data__": {"text": "timet\n11,\nu\u02dci~t11!5ui~t!1Iiext~t!1Iiint~t!. ~1!\nFrom the activation u\u02dci(t11), the energy of the ith ele-\nment at time t11 is computed as\nui~t11!5Hu\u02dci~t11!ifu\u02dci~t11!,U,\ne~u\u02dci~t11!2U!ifu\u02dci~t11!>U,~2!\ni.e., if the activation exceeds the threshold U, it is reset but\nretains a fraction e(0<e<1) of the suprathreshold portion\nu\u02dci(t11)2Uof the energy.\nFor the external input Iiext(t), one element is randomly\nchosen from a uniform distribution at each time step, and a\nconstant amount of energy DUP(0,U#is added to the el-\nement\u2019s energy. The external input is considered to be deliv-ered slowly compared to the internal relaxation dynamics,i.e., it occurs only if no element has exceeded the thresholdin the previous time step. This corresponds to an in\ufb01niteseparation of the time scales of external driving and ava-lanche dynamics discussed in the SOC literature @11\u201313 #.\nThe external input can formally be written as I\niext(t)\n5dr(t),idM(t21),0DU.r(t) is an integer random variable\ndrawn at time step tfrom a uniform distribution between 1\nandN, indicating the chosen element, M(t21) is the num-\nber of suprathreshold elements in the previous time step, and\ndi,jis the Kronecker delta.\nThe internal input Iiint(t) is given by Iiint(t)5M(t\n21)aU/N, where aU/Nis the coupling strength between\nthe elements. We assume connections to be excitatory, that\nis,a.0.\nAt some time t0an avalanche starts, M(t0)51, provided\nthe element receiving external input becomes unstable. Thesystem is globally coupled, such that during an avalanche allelements receive internal input, including the unstable ele-\nments themselves. The avalanche duration D>0 is de\ufb01ned\nto be the smallest integer for which the stopping condition\nM(t\n01D)50 is satis\ufb01ed. The avalanche size Lis given by\nL5(k50D21M(t01k). The model allows the calculation of the\nprobability P(L,N,a) of an avalanche of size L>0 in the\nregime 0 <L<Nof a system consisting of Nelements with\ncoupling parameter a. Avalanche size distributions can alter-\nnatively be described by a function p(L,N,a) forL>1,\nwhich is related to P(L,N,a) via\np~L,N,a![P~L,N,a!\n12P~0,N,a!. ~3!\nAvalanche duration distributions will be denoted by\npd(D,N,a)(D>1).Due to the global coupling of the elements, there are no\nboundary conditions to be speci\ufb01ed in the model.\nB. The case e\u02dc1\nBoth the coupling parameter aand the reset parameter e\ncontrol the amount of dissipation in the system.An analytical\napproach will be possible for e51, that is, if all suprathresh-\nold elements are reset such that they lose an identical amountUof energy @cf. Eq. ~2!#. We will therefore restrict further\nanalysis to this case and only brie\ufb02y return to the generalsituation in Sec. IV.\nFor\ne51, the value a51 corresponds to the conservative\ncase with respect to the internal dynamics: upon resetting ofa suprathreshold element, the energy it loses is completely\ndistributed in the network. For\na>1, an in\ufb01nite avalanche\nmay eventually occur and we will therefore restrict ourselves\nto the case", "doc_id": "b2006ef0-2c96-48f2-b5a4-9f5ecebab917", "embedding": null, "doc_hash": "a440d7b733705c8d893d807288782fab81aef68d9447049c7d078521883697bb", "extra_info": null, "node_info": {"start": 6328, "end": 9320}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "ecd2b3d1-32cd-40e9-a2cb-2a1318531b90", "3": "4d4caa9d-39d9-4341-8719-5c55510fc86f"}}, "__type__": "1"}, "4d4caa9d-39d9-4341-8719-5c55510fc86f": {"__data__": {"text": "the coupling parameter aand the reset parameter e\ncontrol the amount of dissipation in the system.An analytical\napproach will be possible for e51, that is, if all suprathresh-\nold elements are reset such that they lose an identical amountUof energy @cf. Eq. ~2!#. We will therefore restrict further\nanalysis to this case and only brie\ufb02y return to the generalsituation in Sec. IV.\nFor\ne51, the value a51 corresponds to the conservative\ncase with respect to the internal dynamics: upon resetting ofa suprathreshold element, the energy it loses is completely\ndistributed in the network. For\na>1, an in\ufb01nite avalanche\nmay eventually occur and we will therefore restrict ourselves\nto the case a,1. In order to avoid side effects resulting\nfrom the null set of rational values of a,U,o rDU,w e\nassume one of the fractions a/UorDU/Uto be irrational.\nAs will be shown below, a variation of aleads to qualita-\ntively different avalanche size distributions.\nC. Comparison with other avalanche models\nAclass of models discussed in the SOC literature employs\na parameter controlling the amount of dissipation @4,19\u201328 #.\nThe numerically observed power-law behavior in such sys-tems, however, could be ascribed to spatial inhomogeneitiesand the employed boundary conditions ~e.g., @21\u201324 #!.I n\norder to study avalanches of activity in the presence of dis-sipation independent of spatial correlations among elements,Lise and Jensen @25#introduced a random-neighbor version\nof the Olami-Feder-Christensen model described in Ref.@19#. In this model, threshold elements receive a constant,\nuniform input and have random nearest neighbors to whichthey are connected during an avalanche. The temporal vari-ability of the network connectivity avoids the buildup of spa-tial correlations, thus ruling out boundary effects in shapingavalanche distributions. Subsequent studies, however, dem-onstrated that the random-neighbor OFC model does nothave scaling behavior in the dissipative regime @26\u201328 #.\nBro\u00a8ker and Grassberger @26#, in their analytical consider-\nations of the random-neighbor OFC model, applied thetheory of branching processes to yield avalanche size distri-butions. For this purpose it was necessary to consider the\nlimitsd!\u2018~wheredis the dimension of the lattice !and\nN!\u2018in order to make the model effectively Abelian and\navoid correlations among elements @26#. This prevents ava-\nlanches from visiting elements more than once and allowssubavalanches to spread independently of each other suchthat each suprathreshold element has a distinctive predeces-sor which triggered it.\nOur model poses an alternative of the random-neighbor\nOFC model: the global coupling of elements prevents spatialcorrelations and the putative dependence of the system be-havior on boundary conditions. Randomness is introducedthrough the external input rather than the random assignmentof nearest neighbors. This approach has the advantage of not\nrequiring the limit N!\u2018: For\ne51, the system is AbelianEURICH, HERRMANN, AND ERNST PHYSICAL REVIEW E 66, 066137 ~2002!\n066137-2\nfor an arbitrary system size Nbecause at each time step t\nduring an avalanche, all elements receive the same input de-\npending only on the number M(t21) of suprathreshold ele-\nments at time t21.\nThe random-neighbor OFC model and the globally\ncoupled model are complementary in the following sense: inthe random-neighbor OFC model, randomness is introducedthrough the random choice of neighbors during the ava-lanche activity, while the interavalanche dynamics is a\nsimple shift of the energy distribution\nr(u) on theuaxis due\nto the uniform input. In our globally coupled", "doc_id": "4d4caa9d-39d9-4341-8719-5c55510fc86f", "embedding": null, "doc_hash": "4997f4928a0d7b04b9bdca09e7d2d2d84735a83b981d4e994af437f4c780374d", "extra_info": null, "node_info": {"start": 9139, "end": 12760}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "b2006ef0-2c96-48f2-b5a4-9f5ecebab917", "3": "4753dd74-9764-40be-8baa-81348163e0bd"}}, "__type__": "1"}, "4753dd74-9764-40be-8baa-81348163e0bd": {"__data__": {"text": "the system is AbelianEURICH, HERRMANN, AND ERNST PHYSICAL REVIEW E 66, 066137 ~2002!\n066137-2\nfor an arbitrary system size Nbecause at each time step t\nduring an avalanche, all elements receive the same input de-\npending only on the number M(t21) of suprathreshold ele-\nments at time t21.\nThe random-neighbor OFC model and the globally\ncoupled model are complementary in the following sense: inthe random-neighbor OFC model, randomness is introducedthrough the random choice of neighbors during the ava-lanche activity, while the interavalanche dynamics is a\nsimple shift of the energy distribution\nr(u) on theuaxis due\nto the uniform input. In our globally coupled model, thestochasticity is due to the random external input betweenavalanches, whereas the avalanche activity corresponds to a\nrotation of\nr(u) on a circle @0,U) with periodic boundary\nconditions. The latter property is due to ~i!the fact that all\nelements\u2014including the unstable ones\u2014receive the same in-\nputIiint(t) at each time step, and ~ii!the update rule ~2!which\nreinjects unstable elements according to the suprathreshold\nportionu\u02dci(t11)2Uof their energy. Therefore, the elements\nbecome unstable in a \ufb01xed order depending on the actual\ndistribution r(u). Below it will be shown that for coupling\ncoef\ufb01cients a,max$12DU/U,N/(N11)%, avalanche sizes\nmay not exceed N, which means that each element can be\nactivated only once. In this regime, avalanche distributionsturn out to be very similar for the random-neighbor OFCmodel and the current model, demonstrating that the differ-ences between the models barely change the statistical prop-\nerties of the avalanches. However, in the globally coupledmodel, this regime can be described by a closed expression\nfor avalanche distributions, p(L,N,\na).\nIII. AVALANCHE PROPERTIES\nA. Avalanche sizes\nFigure 1 shows avalanche size distributions for different\nvalues of a.N510000 was chosen as the system size, but\nthe curves look very similar for any other choice of N.\nFour qualitatively different regimes can be distinguished\nwhich will be termed subcritical, critical, supracritical, and\nmultipeaked. For small values of a, subcritical avalanche\nsize distributions exist, which can be approximated by thegeneral expression\np\n~L,N,a!\u2019p\u02c6~L,N,a!5Lgexp~2L/l!, ~4!\nwhere gis an exponent independent of Nto be characterized\nbelow, and l5l(N,a) describes the range of avalanche\nsizes over which power-law behavior is observed @Fig. 1 ~a!#.\nFor \ufb01xed N,l(N,a) is a monotonically increasing function\nofaas long as a,acwhich we refer to as the \u2018\u2018critical\ncase\u2019\u2019 ~Fig. 2 !. For ac, the system has avalanche distribu-\ntions with an approximate power-law behavior with expo-\nnent 23/2 from L51 almost up to the size of the system,\nwhere the usual exponential cutoff is observed @49#@Fig.\n1~b!#. For \ufb01nite N,acis in the dissipative regime.Above the\ncritical value ac, avalanche size distributions become non-\nmonotonic @Fig. 1 ~c!#. Such supracritical curves have a mini-\nmum at some intermediate avalanche size.\nIn order to \ufb01nd the critical coupling coef\ufb01cient acas a\nfunction of system size N, we computed a conveniently de-\n\ufb01ned distance K(a) between the distribution p(L,N,a) and\nFIG. 1. Probability distributions of avalanche sizes, p(x,N,a),\nand avalanche durations,", "doc_id": "4753dd74-9764-40be-8baa-81348163e0bd", "embedding": null, "doc_hash": "dc1d91cfe2474537aca56dea190192ed6d6119b9b20370fc8a9c529d2e6c9aeb", "extra_info": null, "node_info": {"start": 12777, "end": 16049}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "4d4caa9d-39d9-4341-8719-5c55510fc86f", "3": "27891564-af8b-4c1b-b331-bae9cd5f0d38"}}, "__type__": "1"}, "27891564-af8b-4c1b-b331-bae9cd5f0d38": {"__data__": {"text": "from L51 almost up to the size of the system,\nwhere the usual exponential cutoff is observed @49#@Fig.\n1~b!#. For \ufb01nite N,acis in the dissipative regime.Above the\ncritical value ac, avalanche size distributions become non-\nmonotonic @Fig. 1 ~c!#. Such supracritical curves have a mini-\nmum at some intermediate avalanche size.\nIn order to \ufb01nd the critical coupling coef\ufb01cient acas a\nfunction of system size N, we computed a conveniently de-\n\ufb01ned distance K(a) between the distribution p(L,N,a) and\nFIG. 1. Probability distributions of avalanche sizes, p(x,N,a),\nand avalanche durations, pd(x,N,a), in the subcritical @~a!,a\n50.8], critical @~b!,a50.99], supracritical @~c!,a50.999], and\nmultipeaked @~d!,a50.99997] regime. ~a!\u2013~c!Solid lines and\nsymbols denote the analytical and the numerical results for the ava-lanche size distributions, respectively. In ~d!, the solid line shows\nthe numerically calculated avalanche size distribution. The dashedlines in ~a!\u2013~d!show the numerically evaluated avalanche duration\ndistributions. In all cases, the presented curves are temporal aver-ages over 10\n7avalanches with N510000, DU50.022, and U\n51.\nFIG.2. Range l(N,a) ofavalanchesizesoverwhichpower-law\nbehavior is observed in the subcritical regime. l(N,a) has been\nplotted for four different system sizes, namely, for N5102~solid\nline!,N5103~dashed line !,N5104~dashed-dotted line !, andN\n5105~dotted line !.To obtain l,p\u02c6(L,N,a) as de\ufb01ned in Eq. ~4!has\nbeen \ufb01tted to the analytically calculated avalanche size distribution\np(L,N,a) by maximizing the symmetric version of the Kullback-\nLeibler distance K(l) as de\ufb01ned by K(l)5(L(p2p\u02c6)@ln(p)\n2ln(p\u02c6)#.FINITE-SIZE EFFECTS OF AVALANCHE DYNAMICS PHYSICAL REVIEW E 66, 066137 ~2002!\n066137-3\nan \u2018\u2018ideal\u2019\u2019 power-law distribution p\u02dc(L,N)5L23/2/(LL23/2.\nThenK(a) was numerically minimized to yield the param-\neteracfor which the distribution is closest to a power law.\nWe chose the symmetric version of the Kullback-Leibler dis-\ntance as de\ufb01ned by K(a)5(L(p2p\u02dc)@ln(p)2ln(p\u02dc)#, which\nrevealed a critical coupling constant\nac~N!\u201912N2mwith m50.560.01 ~5!\n~obtained for system sizes ranging from N5102up toN\n5107). An alternative approach to obtain the exponent mis\nto compute the slope of the avalanche size distribution\np(L,N,a) for avalanche sizes L5N/2 using the analytical\nexpression to be derived below. The result is m50.5, in\nagreement with the numerics.\nAbove the supracritical case, a fourth regime exists for\nvalues of aclose to 1, where the distributions show multiple\npeaks located at L5N,2N11,3N11 ,....These peaks arise\nfrom the high coupling strength because elements can be-come suprathreshold more than only once during an ava-lanche. This is not possible in the subcritical, critical, andsupracritical regimes. Figure 1 ~d!shows an example with\nthree peaks ~note that the last maximum is not referred to as\na peak !.\nConditions for the occurrence of", "doc_id": "27891564-af8b-4c1b-b331-bae9cd5f0d38", "embedding": null, "doc_hash": "3defb0441bbacc24d7af3eec133fae96aad0894cf8f2d61aa5d4b36ea8f539e9", "extra_info": null, "node_info": {"start": 16116, "end": 19019}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "4753dd74-9764-40be-8baa-81348163e0bd", "3": "67231d57-e066-4cda-84fe-b3df6c8e09a8"}}, "__type__": "1"}, "67231d57-e066-4cda-84fe-b3df6c8e09a8": {"__data__": {"text": "compute the slope of the avalanche size distribution\np(L,N,a) for avalanche sizes L5N/2 using the analytical\nexpression to be derived below. The result is m50.5, in\nagreement with the numerics.\nAbove the supracritical case, a fourth regime exists for\nvalues of aclose to 1, where the distributions show multiple\npeaks located at L5N,2N11,3N11 ,....These peaks arise\nfrom the high coupling strength because elements can be-come suprathreshold more than only once during an ava-lanche. This is not possible in the subcritical, critical, andsupracritical regimes. Figure 1 ~d!shows an example with\nthree peaks ~note that the last maximum is not referred to as\na peak !.\nConditions for the occurrence of kpeaks in the avalanche\nsize distributions can be readily worked out. Consider the\ncasek51 corresponding to the situation that neurons may\n\ufb01re twice at most during an avalanche. First, an avalanche\nsizeL5N11 must be possible. Since all elements receive\nthe same internal input and \ufb01re in a \ufb01xed order as describedabove, this is equivalent to the condition that the elementwhich originally triggered the avalanche may \ufb01re twice. Af-terN\ufb01ring events, this element has received the total input\nDU1\naU. A second \ufb01ring can thus occur if this input ex-\nceeds the threshold, or a.12DU/U. Second, after N11\n\ufb01ring events, the total internal input to each element must\nexceed the threshold to allow for further \ufb01ring, ( N\n11)aU/N.Uora.N/(N11). Similar arguments hold\nfor the general case of kpeaks. The above conditions must\nthen be replaced by\na.amin~k!5maxH12DU\nkU,kN\nkN11J. ~6!\namin(k),a<amin(k11) then gives the range of coupling con-\nstants for which kpeaks can be observed.\nExamples for avalanche distributions in the multipeaked\nregime are shown in Fig. 3. The distribution functions be-\ntween two peaks at L5kNandL5(k11)Nare always non-\nmonotonic. This can be seen as follows: In an avalanche of\nsize larger than kN, the energies uimust have been in an\nappropriate order to allow for this size. Because the interava-lanche dynamics corresponds to a simple shift of\nr~U!on the\ncircle ~0,U!, the ordering after kNevents is nearly similar to\nthe ordering prior to the start of the avalanche, except for theelement which received external input. This element hasbeen responsible for triggering the avalanche, and only thiselementhaschangeditspositionrelativetotheothers.There-fore, it is highly probable that again all Nelements will takepart in the continuing avalanche, which explains the increase\nof the distribution towards L5(k11)N. As can be seen\nfrom Fig. 3, all distributions have minima at avalanche sizes\nL5N/2,3N/ 2 ,...,(k11/2)N,....\nB. Avalanche durations\nIn comparison to the avalanche size distributions de-\nscribed before, Fig. 1 also shows examples of avalanche du-\nrationdistributions in the four different regimes. Qualita-\ntively, the duration distributions have similar shapes. In thesubcritical regime, the distributions are described by mono-tonically decreasing functions as in Eq. ~4!, and above the\ncritical regime, the functions show one or more maxima as\nthe coupling\naincreases, going from the supracritical to the\nmultipeaked regime.\nThe critical case occurs for the same value acfor which\nthe size distribution is also critical @Fig. 1 ~b!#, and the critical\nexponent is the same. This holds for all system sizes Nwe\nhave tested ~data not shown !. That is, the dependence of the\ncritical aon the system size Nis given by the", "doc_id": "67231d57-e066-4cda-84fe-b3df6c8e09a8", "embedding": null, "doc_hash": "f6774b2ad52f55383766520d0c887c8a752e8f1d48e31584245ff8738fdecb79", "extra_info": null, "node_info": {"start": 18926, "end": 22375}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "27891564-af8b-4c1b-b331-bae9cd5f0d38", "3": "84db127e-7780-4291-987b-f77a9b7f9dc4"}}, "__type__": "1"}, "84db127e-7780-4291-987b-f77a9b7f9dc4": {"__data__": {"text": "before, Fig. 1 also shows examples of avalanche du-\nrationdistributions in the four different regimes. Qualita-\ntively, the duration distributions have similar shapes. In thesubcritical regime, the distributions are described by mono-tonically decreasing functions as in Eq. ~4!, and above the\ncritical regime, the functions show one or more maxima as\nthe coupling\naincreases, going from the supracritical to the\nmultipeaked regime.\nThe critical case occurs for the same value acfor which\nthe size distribution is also critical @Fig. 1 ~b!#, and the critical\nexponent is the same. This holds for all system sizes Nwe\nhave tested ~data not shown !. That is, the dependence of the\ncritical aon the system size Nis given by the same expres-\nsion~5!for the avalanche sizes and the avalanche durations.\nThe main difference to size distributions lies in the fact\nthat duration distributions start to differ from an \u2018\u2018ideal\u2019\u2019power-law distribution at lower values of L. This behavior\ncan be explained by an intuitive argument. For avalanche\nsizes ofL5N, it is unimportant how many elements are\ntriggered in each step of the avalanche as long as the totalnumber of toppling elements is N. For an avalanche duration\nofN, it is not only required that the avalanche composed of\nNelements is being triggered, but it is also necessary that in\neach step of the avalanche, exactly oneelement is triggered.\nHence large avalanche durations have a far lower probabilitythan large avalanche sizes.\nC. Analytical considerations\nWe use combinatorial arguments in the system\u2019s\nN-dimensional con\ufb01guration space to derive expressions for\navalanche size distributions in the subcritical, critical, and\nsupracritical regimes. The con\ufb01guration space PN(0,U)~or\nsimplyPiN) is de\ufb01ned to be the Cartesian product\nFIG. 3. Different avalanche size distributions p(L,N,a) with\na50.996 for N550~thin solid line, four peaks !,N5100~thin\ndashed line, two peaks !,N5200~thick solid line, one peak !, and\nN5250~thick dashed line !.The curves show maxima at L5kNand\nminima at L5(k10.5)N~both marked with dotted lines !.I na l l\ncases, the presented curves are temporal averages over 2 3108ava-\nlanches with DU50.022 and U51.EURICH, HERRMANN, AND ERNST PHYSICAL REVIEW E 66, 066137 ~2002!\n066137-4\nPN(0,U)5@0,U)Nwith periodic boundary conditions, i.e., it\nhas the topology of an N-torus ~see Appendix A !.\n1. An example with two elements\nThe case N52 demonstrates the basic mechanisms for\nevaluating the avalanche dynamics. The avalanche distribu-tion is calculated by determining the equilibrium density of\nstates in P\n2,r(u1,u2), and subsequently considering the\nregions which lead to avalanches of sizes 0,1, and 2. Figure4 shows the con\ufb01guration space P\n2and the shifts resulting\nfrom external input DU, internal input aU/2, and ava-\nlanches of size L51. In the latter case, the system leaves P2\nand is reinjected on the opposite side. We consider r(u1,u2)\nonly at times between avalanches. Then, the total internalinput distributed during an avalanche leads to a shift vectorwhich guarantees that systems will never be reinjected into\nthe region denoted by L\n2(a,U), i.e., r(u1,u2)50 for\n(u1,u2)PL2. The density in P2\\L2is solely determined by\nthe randomly distributed external input. This input can be\ndecomposed into deterministic shifts of size DU/A2 along\nthe diagonal u15u2and a random walk", "doc_id": "84db127e-7780-4291-987b-f77a9b7f9dc4", "embedding": null, "doc_hash": "ae075fdcd61b5d6e17b02fd331837cffda11840c53f9317ca4325f5e01ca3d66", "extra_info": null, "node_info": {"start": 22357, "end": 25717}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "67231d57-e066-4cda-84fe-b3df6c8e09a8", "3": "7e380a6d-4fc5-4186-b584-08ef2cf17f80"}}, "__type__": "1"}, "7e380a6d-4fc5-4186-b584-08ef2cf17f80": {"__data__": {"text": "the shifts resulting\nfrom external input DU, internal input aU/2, and ava-\nlanches of size L51. In the latter case, the system leaves P2\nand is reinjected on the opposite side. We consider r(u1,u2)\nonly at times between avalanches. Then, the total internalinput distributed during an avalanche leads to a shift vectorwhich guarantees that systems will never be reinjected into\nthe region denoted by L\n2(a,U), i.e., r(u1,u2)50 for\n(u1,u2)PL2. The density in P2\\L2is solely determined by\nthe randomly distributed external input. This input can be\ndecomposed into deterministic shifts of size DU/A2 along\nthe diagonal u15u2and a random walk orthogonally to u1\n5u2. As a consequence of this stochasticity in combination\nwith the reinjection after avalanches, the density in P2\\L2\nbecomes constant for large times t, and a normalization\nyields the value r(u1,u2)5@U2(12a)#21. Figure 4 also\nidenti\ufb01es those regions in P2which lead to avalanches of\nsizes 0 (B), 1 (C), and 2 ~D!following external input to\nelement 1. Avalanche probabilities P(L,2,a) are obtained by\nintegrating r(u1,u2) over the respective region. Using Eq.\n~3!, the result is\np~1,2,a!52~12a!\n22aandp~2,2,a!5a\n22a.~7!2. The general case of Nelements\nSimilar arguments hold for the general situation of Nel-\nements. The topology of region LN\u2014the region which is not\ninhabited between avalanches after transients havedecayed\u2014and the regions leading to avalanches of certainsizes, however, are more complicated. We will outline thederivation of the distribution functions in the following; thedetailed, rather tedious calculations can be found in Appen-dixesA\u2013C.The \ufb01rst step is to obtain a general expression for\nthe volume of region L\nN,V\u0084LN(a,U)\u0085,i nPN. For this\npurpose, a rule can be derived showing how LNis composed\nof direct products of N-dimensional and lower-dimensional\nhypercubes of varying side lengths; V(LN) is then given by\nthe sum of the products of the volumes of these hypercubes.\nAs a result, the particularly simple expression V\u0084LN(a,U)\u0085\n5aUNis obtained for arbitrary N~seeAppendixesAand B !.\nFor the regions in PN(0,U) leading to different avalanche\nsizes, we suppose without loss of generality that the external\ninput DUis given to element 1. Upon receiving input, ele-\nment 1 \ufb01res if u1.U2DU. In the second step, the corre-\nsponding phase space region, whose volume is given by\nUN21DU, has to be partitioned into regions where L21\n50,1,2,...,N21 further elements will \ufb01re in the respective\navalanche. The volumes of these regions will be denoted as\nZ(L,N,a). The regions and their volumes are constructed\niteratively as shown in Appendix C. In the last step, ava-\nlanche probabilities p(L,N,a) are obtained by subtracting\nthe volumes of the intersections of the regions Z(L,N,a)\nwith region LN, and subsequently normalizing by the vol-\nume of PN\\LN~see Appendix C !. Using Eq. ~3!, the ava-\nlanche distributions become independent of DUandU,\np~L,N,a!5LL22SN21\nL21DSa\nNDL21S12La\nNDN2L21\n3N~12a!\nN2~N21!afor 1 <L<N. ~8!\nAs an example, Figs. 1", "doc_id": "7e380a6d-4fc5-4186-b584-08ef2cf17f80", "embedding": null, "doc_hash": "183c0390ce98320f1efc3a25d3ed26af27b203a053cc2c6360864631b90c8cb3", "extra_info": null, "node_info": {"start": 25790, "end": 28811}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "84db127e-7780-4291-987b-f77a9b7f9dc4", "3": "e282d256-9f11-4515-b813-b90e589f831e"}}, "__type__": "1"}, "e282d256-9f11-4515-b813-b90e589f831e": {"__data__": {"text": "denoted as\nZ(L,N,a). The regions and their volumes are constructed\niteratively as shown in Appendix C. In the last step, ava-\nlanche probabilities p(L,N,a) are obtained by subtracting\nthe volumes of the intersections of the regions Z(L,N,a)\nwith region LN, and subsequently normalizing by the vol-\nume of PN\\LN~see Appendix C !. Using Eq. ~3!, the ava-\nlanche distributions become independent of DUandU,\np~L,N,a!5LL22SN21\nL21DSa\nNDL21S12La\nNDN2L21\n3N~12a!\nN2~N21!afor 1 <L<N. ~8!\nAs an example, Figs. 1 ~a!\u20131~c!demonstrate the perfect\nagreement between the analytical result ~8!and the numeri-\ncal avalanche size distributions for N5104.\nEquation ~8!resembles the avalanche size distribution\nwhich Bro \u00a8ker and Grassberger @@26#, Eq. ~36!#have found\nfor the random-neighbor OFC model using branching theory.The results differ, in that the result in Ref. @26#yields an\nexpression for avalanche sizes in systems of an arbitrary sizeN, but is valid only in the in\ufb01nite-size limit where simulta-\nneous avalanches are nonoverlapping. In contrast, Eq. ~8!\nholds for arbitrary system sizes Nin our model. Formally,\nEq.~8!contains a correction factor which is calculated by\nconsidering the region L\nNwhere the density of states even-\ntually vanishes, instead of assuming a uniform density over\nthe whole con\ufb01guration space PN(0,U) divided into regions\nleading to different avalanche sizes.\n3. The thermodynamic limit\nAvalanche behavior in the thermodynamic limit N!\u2018\ncan directly be assessed from Eq. ~8!. Numerical results and\nFIG. 4. The dynamics in the con\ufb01guration space for N52 ele-\nments. Effects of an external input to element 1 ~line marked as\nDU/U) followed by an avalanche of size 1 resulting in an input to\nboth elements ~arrow pointing along the diagonal !.L2denotes the\nregion where the density of states eventually vanishes; A,B, andC\ndenote regions leading to avalanches of size L50,1,2, respectively,\nif triggered by an external input to element 1. The hatched arealeads to an avalanche of size 1 but lies within L\n2.FINITE-SIZE EFFECTS OF AVALANCHE DYNAMICS PHYSICAL REVIEW E 66, 066137 ~2002!\n066137-5\nanalytical considerations @26#suggest a critical coupling pa-\nrameter ac51 forN!\u2018. Indeed, an evaluation of Eq. ~8!\nshows that the local exponent\ng~L!5lim\na!1lim\nN!\u2018lnp~L,N,a!\np~L11,N,a!YlnL\nL11~9!\nbecomes constant for L!\u2018: limL!\u2018g(L)523/2. Thus, in\nthe conservative system, power-law behavior with an expo-\nnent of 23/2 is reached in the regime of large avalanche\nsizes.The critical exponent is identical to that of the random-neighbor OFC model @26#and, for example, for mean-\ufb01eld\npercolation @30#.\nFor \ufb01nite L, the distribution is actually very close to\npower-law behavior. Since the critical case corresponds to\nthe conservative system\na51, the supracritical regime be-\ncomes smaller and smaller as N!\u2018: the occurrence of non-\nmonotonic avalanches is a \ufb01nite-size effect.\n4. Avalanche durations\nFor avalanche durations pd(D,N,a), an iterative equation\nfor the corresponding regions and their volumes in the", "doc_id": "e282d256-9f11-4515-b813-b90e589f831e", "embedding": null, "doc_hash": "52a88528898827b36ca9fd1337b9b45eeb03c04dc56e9942b8786cdb13a86d86", "extra_info": null, "node_info": {"start": 28920, "end": 31943}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "7e380a6d-4fc5-4186-b584-08ef2cf17f80", "3": "bba7a515-7ef3-4ba6-9572-9438b8bc5ebb"}}, "__type__": "1"}, "bba7a515-7ef3-4ba6-9572-9438b8bc5ebb": {"__data__": {"text": "system, power-law behavior with an expo-\nnent of 23/2 is reached in the regime of large avalanche\nsizes.The critical exponent is identical to that of the random-neighbor OFC model @26#and, for example, for mean-\ufb01eld\npercolation @30#.\nFor \ufb01nite L, the distribution is actually very close to\npower-law behavior. Since the critical case corresponds to\nthe conservative system\na51, the supracritical regime be-\ncomes smaller and smaller as N!\u2018: the occurrence of non-\nmonotonic avalanches is a \ufb01nite-size effect.\n4. Avalanche durations\nFor avalanche durations pd(D,N,a), an iterative equation\nfor the corresponding regions and their volumes in the con-\ufb01guration space can be derived; cf. Eq. ~C23!. A closed ex-\npression corresponding to the avalanche size distribution ~8!,\nhowever, is not available.\nIV. EXTENSIONS AND APPLICATIONS OF THE MODEL\nIN THE CONTEXT OF NEURAL NETWORKS\nModels of SOC can usually be interpreted in terms of\nneural networks ~e.g., Refs. @23,31\u201334 #!. Single elements are\nidenti\ufb01ed with model neurons that receive both external andinternal input. The energy variable corresponds to some in-ternal state of a neuron, usually interpreted as its excitationor membrane potential. Upon reaching a threshold, the neu-ron is reset and subsequently sends an input to other neuronsin the network. In the following, we will study extensionsand applications of the avalanche model using neural net-work terminology.\nA. The case e\u00b81\nThe results described in Sec. III are valid for the Abelian\ncasee51. In terms of neural networks, this corresponds to a\nfast neural relaxation such that the excess energy u\u02dci2Uis\naccumulated afterthe reset. For e,1 in Eq. ~2!, the reset of\na neuron is slower, such that a fraction 1 2eof the excess\nenergy is lost @34#.\nWe show examples of avalanche size distributions in Fig.\n5~a!, and examples of duration distributions in Fig. 5 ~b!, for\ne50.1.\nA conspicuous feature is the appearance of additional\npeaks also in the regime where avalanches are restricted to\nsizesL<N. The distributions thus deviate from a power law\nwith a single exponent as in the conservative case e51.\nWhen some neurons cross the threshold, the differences be-\ntween their membrane potentials before the avalanche, ui\n2uj, will become smaller after the avalanche stopped,\ne(ui2uj). Therefore e,1 induces peaks in r(u), whichintroduce length scales in the distributions pandpd, when\nr(u) is rotated in uduring an avalanche. The differences\nbetween distributions for e51 and e,1 are most pro-\nnounced above a5ac, as can easily be seen in Fig. 5. Small\necan also prevent avalanches larger than Nin the multi-\npeaked regime\u2014the dissipation during the reset of the mem-brane potentials eats up the excess energy which otherwisewould make the same neuron \ufb01re twice during an avalanche.\nSimilar avalanche size distributions were described by\nCorralet al. @23#for locally connected networks of integrate-\nand-\ufb01re neurons receiving uniform input to which somenoise was added. As in our model, the dissipation of energywas responsible for the occurrence of the peaks whereas inthe conservative case, approximate power-law behavior wasobserved.\nB. Avalanches in networks of leaky integrate-and-\ufb01re neurons\nIn the context of biologically motivated neural networks,\nadditional parameters such as time delays of interaction or\nFIG. 5. Distributions of ~a!avalanche sizes and ~b!avalanche\ndurations for a subcritical coupling strength a50.8~dashed line", "doc_id": "bba7a515-7ef3-4ba6-9572-9438b8bc5ebb", "embedding": null, "doc_hash": "d312cb2862d10d41648bdbba37d3d7fb89d3d51ea236838c0f8b9a846385c5aa", "extra_info": null, "node_info": {"start": 31825, "end": 35278}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "e282d256-9f11-4515-b813-b90e589f831e", "3": "851bfea7-7ea6-48e4-a4ae-3ed590d606c2"}}, "__type__": "1"}, "851bfea7-7ea6-48e4-a4ae-3ed590d606c2": {"__data__": {"text": "same neuron \ufb01re twice during an avalanche.\nSimilar avalanche size distributions were described by\nCorralet al. @23#for locally connected networks of integrate-\nand-\ufb01re neurons receiving uniform input to which somenoise was added. As in our model, the dissipation of energywas responsible for the occurrence of the peaks whereas inthe conservative case, approximate power-law behavior wasobserved.\nB. Avalanches in networks of leaky integrate-and-\ufb01re neurons\nIn the context of biologically motivated neural networks,\nadditional parameters such as time delays of interaction or\nFIG. 5. Distributions of ~a!avalanche sizes and ~b!avalanche\ndurations for a subcritical coupling strength a50.8~dashed line !\nfor a critical coupling a50.99~solid line !, a supracritical coupling\na50.999 ~dashed-dotted line !, and a coupling strength of a\n50.9998 ~dotted line !. Compare also the distributions shown in Fig.\n1 using identical a\u2019s. In all cases, the presented curves are temporal\naverages over 106avalanches with N510000, DU50.022, e\n50.1, andU51. For comparison, the thick solid line in ~a!shows\nthe critical size distribution for e51.EURICH, HERRMANN, AND ERNST PHYSICAL REVIEW E 66, 066137 ~2002!\n066137-6\ndecay time constants for the elements\u2019 dynamical variable u\nare usually employed ~see, e.g., Refs. @35\u201341 #!. Here we\nbrie\ufb02y show how the avalanche statistics changes by the in-troduction of a leak term into the dynamical equation ~1!.\nWithout input to element i, this leak term yields an exponen-\ntial decay of u\nito zero with time constant t. For our simu-\nlations with leaky threshold neurons, we used a discretizedversion of the continuous dynamical system\ntu\u02d9i~t!52ui~t!1Iiext~t!1Iiint~t!~i51 ,...,N!,~10!\nwith external input Iiext(t)5d(t2kDt)dr(k),iDU,kPZ.W e\nde\ufb01ne DUto be\nDU5u0@12exp~2Dt/t!N#, ~11!\nwhere 1/ Dtis the rate of the external input and u0the\nasymptotic energy to which an uncoupled neuron would bedrivenintheabsenceofa\ufb01ringthreshold.Ifneuron ireaches\nits threshold U, the energy is reset to u\ni50.\nIn the previous, Abelian case we had only one parameter\nDUcontrolling the input, which had apparently no in\ufb02uence\non the shape of the avalanche distributions @see Eq. ~8!#.\nNow, there are two parameters controlling the neuron\u2019s\ninput-output characteristics, u0andDt. In the following, we\ndemonstrate the phenomena resulting from varying these in-put parameters.\nIn Fig. 6, we choose the critical case\na5acin a system of\nN51000 neurons, while varying u0. The respective time in-\nterval Dtis chosen such that the input DUis constant. Ef-\nfectively, the case u05\u2018@Fig. 6 ~a!#corresponds to neurons\nwithout leakage, and decreasing u0yields the network be-\nhavior for increasing leakage. Such a decrease imposes twochanges: \ufb01rst, large avalanches get more and more improb-able, and second, oscillations are induced into the size dis-tributions. Both effects can be understood by observing the\nenergy densities\nr(u)~small insets in Fig. 6 !. While in the\nnonleaky case @Fig. 6 ~a!#,ris nearly uniform, a leaky inte-\ngration causes more neurons to have energies", "doc_id": "851bfea7-7ea6-48e4-a4ae-3ed590d606c2", "embedding": null, "doc_hash": "141dab415abe67e410cc6f369155ff1a1a62e80b09276d348e64705d9eda762b", "extra_info": null, "node_info": {"start": 35225, "end": 38299}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "bba7a515-7ef3-4ba6-9572-9438b8bc5ebb", "3": "0631eda8-3675-4993-bc6f-915690b5017e"}}, "__type__": "1"}, "0631eda8-3675-4993-bc6f-915690b5017e": {"__data__": {"text": "u0. The respective time in-\nterval Dtis chosen such that the input DUis constant. Ef-\nfectively, the case u05\u2018@Fig. 6 ~a!#corresponds to neurons\nwithout leakage, and decreasing u0yields the network be-\nhavior for increasing leakage. Such a decrease imposes twochanges: \ufb01rst, large avalanches get more and more improb-able, and second, oscillations are induced into the size dis-tributions. Both effects can be understood by observing the\nenergy densities\nr(u)~small insets in Fig. 6 !. While in the\nnonleaky case @Fig. 6 ~a!#,ris nearly uniform, a leaky inte-\ngration causes more neurons to have energies near the \ufb01ringthreshold Uthan energies near the resting potential, \ufb01nally\nintroducing oscillations and peaks in\nr@Figs. 6 ~b\u2013d!#. These\ndensity oscillations lead to the observed oscillations in thesize distributions due to the deterministic readout mechanismof the avalanches: during an avalanche, the neural energiesare uniformly shifted on the uaxis. We observe that the\nnumber of oscillatory peaks decreases as u\n0decreases, while\nthe oscillation amplitude increases.\nIn a second numerical experiment, we held the leakiness\nconstant, while we varied both the rate 1/ Dtat which exter-\nnal input DUwas delivered, and the coupling constant a\nsuch that a transition from subcritical to supracritical oc-\ncurred. Our results are summarized in Fig. 7. With highlyvariable external driving, and subcritical coupling ~upper left\nplot in Fig. 7 !, the neurons do not show any sign of synchro-\nnization. When the external driving gets more frequent~lower left plot in Fig. 7 !, even a small coupling leads to\nsynchronization, accompanied by a strong oscillation.Thingsdo not change signi\ufb01cantly when the coupling gets stronger~lower right plot in Fig. 7 !, only the oscillation period getsshorter and the noise appears to have a stronger in\ufb02uence on\nthe dynamics. When both the variability of the external inputand the coupling is high, the system synchronizes without\noscillating. Here, one element is likely to trigger a large por-tion of the other elements in the network ~synchronization !,\nbut the input variability ensures that the membrane potentialsof the elements get desynchronized before another avalancheis triggered, preventing an oscillatory component to build upin the cross-correlation functions.\nV. SUMMARYAND CONCLUSION\nIn summary, we presented an avalanche model involving\nrandom input and global coupling between its elements. Ava-lanche size distributions can be calculated exactly for an ar-bitrary system size through combinatorial arguments in thesystem\u2019s con\ufb01guration space. The model therefore accountsfor phenomena in \ufb01nite systems and elucidates the transitionto the thermodynamic limit.\nThe model belongs to the same universality class as the\nrandom-neighbor OFC model, showing similar distributionsin the subcritical and critical regimes, and the same critical\nexponent 23/2 in the conservative case\na51a sN!\u2018.\nThe analytical access to avalanche size and duration dis-\ntributions in \ufb01nite systems is especially important when mod-\neling systems that in reality have some 100 to 10000 ele-\nments. For example, cortical columns are examples neural\nnetworks with an order of 1000 to 10000 elements which are\nFIG. 6. Distributions of avalanche sizes, p(L,N,a), of a fully\nconnected network of leaky threshold elements receiving randominput for different leakiness constants u\n0, namely, for ~a!u05\u2018,\n~b!u055,~c!u051.5, and ~d!u051.01. The insets show the cor-\nresponding mean energy", "doc_id": "0631eda8-3675-4993-bc6f-915690b5017e", "embedding": null, "doc_hash": "ca73dab4289caa0018ad9b5d032ad89f64c179e14e4cd3cf82c49afefb7c3e74", "extra_info": null, "node_info": {"start": 38393, "end": 41900}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "851bfea7-7ea6-48e4-a4ae-3ed590d606c2", "3": "3487ebc9-ffaa-48a7-aa8e-872ddaa89c8c"}}, "__type__": "1"}, "3487ebc9-ffaa-48a7-aa8e-872ddaa89c8c": {"__data__": {"text": "and the same critical\nexponent 23/2 in the conservative case\na51a sN!\u2018.\nThe analytical access to avalanche size and duration dis-\ntributions in \ufb01nite systems is especially important when mod-\neling systems that in reality have some 100 to 10000 ele-\nments. For example, cortical columns are examples neural\nnetworks with an order of 1000 to 10000 elements which are\nFIG. 6. Distributions of avalanche sizes, p(L,N,a), of a fully\nconnected network of leaky threshold elements receiving randominput for different leakiness constants u\n0, namely, for ~a!u05\u2018,\n~b!u055,~c!u051.5, and ~d!u051.01. The insets show the cor-\nresponding mean energy densities r(u). In all cases, the presented\ncurves are temporal averages over 106avalanches with N51000,\nU51,DU50.17, and t51. With these parameters, the discretiza-\ntion time step Dtwas chosen to satisfy Eq. ~11!.FINITE-SIZE EFFECTS OF AVALANCHE DYNAMICS PHYSICAL REVIEW E 66, 066137 ~2002!\n066137-7\ndensely connected to each other, but sparsely connected to\nother columns. Our approach may help to understand thesynchronization properties of these local networks receivingapparently stochastic input. Even when the analytically solv-able Abelian model may abstract from a real neuron, theproperties of the avalanche distributions are stable with re-spect to changes in the underlying model itself\u2014we alreadypointed out its similarity with the distributions seen in therandom-neighbor OFC model. In general, it is not easy tomotivate the random-neighbor OFC model, because it em-ploys a coupling changing randomly in each step of an ava-\nlanche. In the neuronal context, however, the model may bean example of a constantly driven, densely connected net-work of elements subjected to synaptic failures that occurrelatively often in reality.\nAmong other dynamical properties, we also observe syn-\nchronization without oscillations. While this phenomenonhas already been observed in biology @42#and modeling\nstudies ~see, e.g., Refs. @43,44 #!, we link its occurrence to the\ntransition from the critical to the supracritical regime. Thefact that the latter disappears for large networks goes to-gether with the synchronized dephasing due to \ufb01nite size \ufb01rstmentioned in Ref. @45#~cf. also Ref. @43#!.\nWith the advance of experimental technologies such as\ne.g., stable long-time multielectrode recordings, the questionof whether one can \ufb01nd similar phenomena in our \u2018\u2018toy\u2019\u2019model as well as in reality arises\u2014our analysis could then\nprovide a tool to understand the mechanisms behind the dy-namics. While there are hints that in some cases, power lawscan be found in the brain\u2019s dynamics @46\u201348 #, it remains to\nelucidate which functional advantage a critical state mayhave for the information processing going on in the brain.\nACKNOWLEDGMENTS\nWe would like to thank Professor Theo Geisel for most\nfruitful discussions at the Max-Planck-Institute for Fluid Dy-namics in Go \u00a8ttingen. This work has been supported by the\nDFG, Sonderforschungsbereich 517 \u2018\u2018Neurokognition\u2019\u2019 ~U.E.\nand C.W.E. !, and by theVolkswagen Foundation, Project No.\n5425 ~U.E.!.\nAPPENDIX A: PRELIMINARIES\nIn the appendixes, we derive the exact avalanche distribu-\ntionsp(L,N,a) for arbitrary system sizes N. Appendix A\nwill introduce a suitable notation for partitioning the con-\n\ufb01guration space Pinto products of lower-dimensional sub-\nsets. InAppendix B, we calculate the volume", "doc_id": "3487ebc9-ffaa-48a7-aa8e-872ddaa89c8c", "embedding": null, "doc_hash": "19efe4c97b68584f1dea866c5bb0f2c6baf0688a80caed2dfd3369de55d770a8", "extra_info": null, "node_info": {"start": 41867, "end": 45252}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "0631eda8-3675-4993-bc6f-915690b5017e", "3": "30fa272d-b0ff-4b7f-b7cb-17f3e098b8bd"}}, "__type__": "1"}, "30fa272d-b0ff-4b7f-b7cb-17f3e098b8bd": {"__data__": {"text": "for most\nfruitful discussions at the Max-Planck-Institute for Fluid Dy-namics in Go \u00a8ttingen. This work has been supported by the\nDFG, Sonderforschungsbereich 517 \u2018\u2018Neurokognition\u2019\u2019 ~U.E.\nand C.W.E. !, and by theVolkswagen Foundation, Project No.\n5425 ~U.E.!.\nAPPENDIX A: PRELIMINARIES\nIn the appendixes, we derive the exact avalanche distribu-\ntionsp(L,N,a) for arbitrary system sizes N. Appendix A\nwill introduce a suitable notation for partitioning the con-\n\ufb01guration space Pinto products of lower-dimensional sub-\nsets. InAppendix B, we calculate the volume of the region L\nin phase space which is not inhabited between avalanches,by using a partitioning of the con\ufb01guration space leading toa recursion formula for subregions. In Appendix C, a recur-\nFIG. 7. Raster plots showing\nthe \ufb01ring dynamics of a networkofN5100 neurons. Each spike is\ndrawn as a small black tick in de-pendence of the time t, and the\nnumber of the neuron which emit-ted that spike. The coupling pa-rameter\nawas chosen to be a\n50.6~top left !, 0.92 ~top right !,\n0.6~bottom left !, and 0.92 ~bot-\ntom right !, while the time interval\nbetween two external inputs wasgiven by Dt520/N,20/N,1/Nand\n1/Nm, respectively. The insets\nshow the mean over the cross-correlation functions from 300pairs selected out of the N5100\nneurons. The cross-correlationfunctions have been scaled arbi-trarily, but identically for all fourinsets.u\n0was chosen to be u0\n51.05 and t532.84 ms, yielding\nan output rate of 10 Hz for an un-coupled neuron. The critical cou-pling strength for N5100 neurons\nis\nacrit50.9.EURICH, HERRMANN, AND ERNST PHYSICAL REVIEW E 66, 066137 ~2002!\n066137-8\nsion formula for regions leading to avalanches of a speci\ufb01c\nsizeLwill be derived, and subsequently be modi\ufb01ed by sub-\ntracting the noninhabited regions. This modi\ufb01cation \ufb01nally\nleads to the exact avalanche distributions p(L,N,a).\nBefore starting the analysis, we will shortly summarize\nthe terminology used in the appendix: x,y,zPR, andx,y,z\nPRm;i,j,k,l,m,n,p,q,rPNdenote indices; I,J,Kdenote\nsets of indices. I,J,Kdenote second-order sets of indices;\nPm,Vm,Gm,Qm,Fm#Rmdenote subsets in Rm;D,S,Vde-\nnote volumes of subsets. Overlined symbols will denote re-gions and volumes excluding the subset of the non-inhabitedvolume in con\ufb01guration space.\n1. Subsets and sets of indices\nLetIk,mdenote an arbitrary k-element subset of Im\n\u201c$1 ,...,m%. The superset of all different Ik,mis denoted by\nIk,m.Ik,mcontains thus (km)k-element subsets of Imas its\nelements.\nFor the following analysis, it is convenient to de\ufb01ne\nm-dimensional subsets Pm,\nPm~xmin,xmax!\u201c$xP@xmin,xmax!m#Rm%. ~A1!\nThe con\ufb01guration space of Nunits can then be denoted by\nPN(0,U).\nWe also de\ufb01ne subsets Gkm(l,u,Ik,m) for 0 ,k<mand", "doc_id": "30fa272d-b0ff-4b7f-b7cb-17f3e098b8bd", "embedding": null, "doc_hash": "3d40cbd3dfcc5b054dea5fb7a0f02dab5dc95e4b06bed2cabce48d634840ab3c", "extra_info": null, "node_info": {"start": 45314, "end": 48042}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "3487ebc9-ffaa-48a7-aa8e-872ddaa89c8c", "3": "701d0aff-6665-4b22-98b5-4bd41470f1db"}}, "__type__": "1"}, "701d0aff-6665-4b22-98b5-4bd41470f1db": {"__data__": {"text": "in con\ufb01guration space.\n1. Subsets and sets of indices\nLetIk,mdenote an arbitrary k-element subset of Im\n\u201c$1 ,...,m%. The superset of all different Ik,mis denoted by\nIk,m.Ik,mcontains thus (km)k-element subsets of Imas its\nelements.\nFor the following analysis, it is convenient to de\ufb01ne\nm-dimensional subsets Pm,\nPm~xmin,xmax!\u201c$xP@xmin,xmax!m#Rm%. ~A1!\nThe con\ufb01guration space of Nunits can then be denoted by\nPN(0,U).\nWe also de\ufb01ne subsets Gkm(l,u,Ik,m) for 0 ,k<mand 0\n,l<1,\nGkm~l,u,Ik,m!\u201cHxPPm~0,u!Uxi,lk\nmu,iPIk,mJ.\n~A2!\nLetLmdenote a union of Gkm\u2019s,\nLm~l,u!\u201c\u0142\nk51m\n\u0142\nIk,mPIk,mGkm~l,u,Ik,m!. ~A3!\nFor the special case of l51,Gmm(1,u)5Pm(0,u) and, there-\nfore,\nLm~1,u!5Pm~0,u!. ~A4!\nIn order to be able to combine lower-dimensional subsets,\nwe \ufb01nally de\ufb01ne the direct product Qk^Fm2kuIk,mbetween\ntwo subsets. Ik,mdetermines the indices of the components\nof the elements yin the resulting volume assigned to com-\nponents belonging to elements xinQk,\nQk\u201c$xPA#Rk%,\nFm2k\u201c$xPB#Rm2k%,\nQk^Fm2kuIk,m5$yPRmu$yi%iPIk,mPA,$yi%iPImnIk,mPB%.\n~A5!\nNote that this de\ufb01nition is well de\ufb01ned only for sets Abeing\ninvariant under a permutation of the components of xPA.\nThe operator ^is assumed to have higher precedence than\n\u0142,\u00f8, and \\.2. Lemmas\nThe following three lemmas will help to shorten the deri-\nvation of the recursion formulas in the following sections.\nLemma 1. ;k,l<m;;u.0;;l:0,l<1,\nA\u201cPl~0,lu!^Pm2l~lu,u!uJl,m\u00f8Gkm~l,u,Ik,m!\n\u00deB,Ik,m#Jl,m. ~A6!\nProof.Let us choose a suitable disjoint decomposition of\nthe index set Imas\nIm5~Ik,m\u00f8Jl,m!\n\u0142~Ik,m\\Jl,m!\n\u0142~Jl,m\\Ik,m!\n\u0142~Im\\\u0084Ik,m\u0142Jl,m!\u0085. ~A7!\nUsing Eqs. ~A1!,~A3!, and ~A5!, we can then explicitly\nwriteAas\nA5$xPRmu0<xi,luk/m:iPIk,m\u00f8Jl,m,\nlu<xi,luk/m:iPIk,m\\Jl,m,\n0<xi,lu:iPJl,m\\Ik,m,\nlu<xi,u:iPIm\\~Ik,m\u0142Jl,m!%. ~A8!\nBecause of lu>luk/m,Ais nonempty if and only if\nIk,m\\Jl,m5B; and this implies that Ik,m#Jl,m,A\u00deB. Note\nthat ifk.l, condition Ik,m#Jl,mis never ful\ufb01lled.\nLemma 2.", "doc_id": "701d0aff-6665-4b22-98b5-4bd41470f1db", "embedding": null, "doc_hash": "5664f702c503e98901ae466b2f5b76b027003ec81c92883f03bce55f71e01d3c", "extra_info": null, "node_info": {"start": 48125, "end": 50039}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "30fa272d-b0ff-4b7f-b7cb-17f3e098b8bd", "3": "f5f3457d-3d38-45f9-bea0-60efa8549999"}}, "__type__": "1"}, "f5f3457d-3d38-45f9-bea0-60efa8549999": {"__data__": {"text": "~A1!,~A3!, and ~A5!, we can then explicitly\nwriteAas\nA5$xPRmu0<xi,luk/m:iPIk,m\u00f8Jl,m,\nlu<xi,luk/m:iPIk,m\\Jl,m,\n0<xi,lu:iPJl,m\\Ik,m,\nlu<xi,u:iPIm\\~Ik,m\u0142Jl,m!%. ~A8!\nBecause of lu>luk/m,Ais nonempty if and only if\nIk,m\\Jl,m5B; and this implies that Ik,m#Jl,m,A\u00deB. Note\nthat ifk.l, condition Ik,m#Jl,mis never ful\ufb01lled.\nLemma 2. ;l<m;;u.0;;l:0,l<1,\n\u0142\nk51l\n\u0142\nIk,m#Jl,mGkm~l,u,Ik,m!\n5Ll~ll/m,u!^Pm2l~0,u!uJl,m. ~A9!\nProof.Inserting de\ufb01nition ~A2!into the innermost union\nin Eq. ~A9!yields\n\u0142\nIk,m#Jl,mGkm~l,u,Ik,m!\n5\u0142\nIk,m#Jl,mHxPPm~0,u!Uxj,lk\nmu,jPIk,mJ.\nIn this union, exactly m2lcomponents of xcover the whole\ninterval @0,u). By separating these components forming a-FINITE-SIZE EFFECTS OF AVALANCHE DYNAMICS PHYSICAL REVIEW E 66, 066137 ~2002!\n066137-9\nsubset Pm2l(0,u), the union can be written as a direct prod-\nuct of Pm2lwith a union of dimension l, using suitably\nchosen index sets Kk,l;\nS\u0142\nKk,lPKk,lHxPPl~0,u!UxjPKk,l,ll\nmk\nluJD\n^Pm2l~0,u!uJl,m\n5S\u0142\nKk,lPKl,kGkl~ll/m,u!D^Pm2l~0,u!U\nJl,m.\n~A10!\nThen Eq. ~A9!follows immediately, using the de\ufb01nition\n~A3!forLl.\nLemma 3. ;z<y,\nLm~x,y!\u00f8Pm~0,z!5Lm~xy/z,z!. ~A11!\nProof.This can be achieved by rescaling the parameters u\nandlin de\ufb01nition ~A2!to the smaller subset Pm(0,z), and\ninserting the rescaled de\ufb01nition into Eq. ~A3!.\nAPPENDIX B: CALCULATION OF THE NONINHABITED\nVOLUME\nIn a con\ufb01guration space of dimension Nand volume UN,\nthe volume not inhabited between avalanches mediated by a\ncoupling of strength aU/Nis denoted by LN(a,U). The\npurpose of this section will be to calculate its volume V,\nwhich is done iteratively. The reason for using this strategycan be illustrated by comparing the phase spaces and their\npartitionings for N52~Fig. 4 !andN53~Fig. 8 !. The par-\ntitioning for N52 is similar to the partitioning of the u\n1-u2\nplane in Fig. 8, except for a change in the side lengths of the\nvolumes. This \u2018\u2018self-similiarity\u2019\u2019 continues when proceedingto higher Nand enables the iterative calculation of the vol-\numes L\nN. Note that already L3has a relatively complex\nstructure.\nTheorem. ;l,uand;m.0,V\u0084Lm(l,u)\u0085is given by\nthe particularly simple expression\nV\u0084Lm~l,u!\u00855lum. ~B1!\nThe proof will", "doc_id": "f5f3457d-3d38-45f9-bea0-60efa8549999", "embedding": null, "doc_hash": "d07dbc2768ea7cad29c7adc66e3ea6d99466633df038de89036ff4f836541741", "extra_info": null, "node_info": {"start": 50153, "end": 52301}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "701d0aff-6665-4b22-98b5-4bd41470f1db", "3": "4ab5ea03-d5fa-4de3-9a71-cd0118f66b5e"}}, "__type__": "1"}, "4ab5ea03-d5fa-4de3-9a71-cd0118f66b5e": {"__data__": {"text": "using this strategycan be illustrated by comparing the phase spaces and their\npartitionings for N52~Fig. 4 !andN53~Fig. 8 !. The par-\ntitioning for N52 is similar to the partitioning of the u\n1-u2\nplane in Fig. 8, except for a change in the side lengths of the\nvolumes. This \u2018\u2018self-similiarity\u2019\u2019 continues when proceedingto higher Nand enables the iterative calculation of the vol-\numes L\nN. Note that already L3has a relatively complex\nstructure.\nTheorem. ;l,uand;m.0,V\u0084Lm(l,u)\u0085is given by\nthe particularly simple expression\nV\u0084Lm~l,u!\u00855lum. ~B1!\nThe proof will be given by induction over m.\nBasis.From de\ufb01nitions ~A3!and~A2!it is obvious that\nform51,\nV\u0084L1~l,u!\u00855V\u0084G11~l,u,I1,1!\u00855lu. ~B2!\nInduction. For the induction we assume that Eq. ~B2!has\nbeen proven for m5n21. Thus we have to prove that Eq.\n~B2!holds also for m5n.The phase space Pn(0,u) can be expressed as a union of\ndisjoint subsets,\nPn~0,u!5\u0142\nl50n\n\u0142\nJl,nPIl,nPl~0,lu!^Pn2l~lu,u!uJl,n,\n~B3!\nwhose volumes are related to a binomial expansion of\nV(Pn),\nV\u0084Pn~0,u!\u00855un@l1~12l!#n5un(\nl50nSn\nlDll~12l!n2l.\n~B4!\nUsing the de\ufb01nitions ~A1!~A3!, it is clear that\nLn(l,u)#Pn(0,u)nPn(lu,u). Inserting Eqs. ~A3!~B3!\ninto this expression,\nLn~l,u!5@Pn~0,u!nPn~lu,u!#\u00f8Ln~l,u!\n5S\u0142\nl51n\n\u0142\nJl,nPIl,nPl~0,lu!^Pn2l~lu,u!uJl,nD\n\u00f8S\u0142\nk51n\n\u0142\nIk,nPIk,nGkn~l,u,Ik,n!D.\nWe subsequently use Lemmas ~A6!,~A9!, and ~A11!and\nobtain\nFIG. 8. Example of the con\ufb01guration space P3and its partition-\ning. The noninhabited volume L3is highlighted in shades of gray.\nThe volumes V\u00af(L,3,a) leading to avalanches of sizes L50,L\n51,L52, andL53 are outlined with thick black lines at their\nedges.EURICH, HERRMANN, AND ERNST PHYSICAL REVIEW E 66, 066137", "doc_id": "4ab5ea03-d5fa-4de3-9a71-cd0118f66b5e", "embedding": null, "doc_hash": "b9fbec865dfee7f2dfc4365eeb7997e9eaa5f50a5c7d4a80c484714866c129d1", "extra_info": null, "node_info": {"start": 52112, "end": 53782}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "f5f3457d-3d38-45f9-bea0-60efa8549999", "3": "e8595aab-231a-4568-b65b-36a42d7ef43d"}}, "__type__": "1"}, "e8595aab-231a-4568-b65b-36a42d7ef43d": {"__data__": {"text": "subsequently use Lemmas ~A6!,~A9!, and ~A11!and\nobtain\nFIG. 8. Example of the con\ufb01guration space P3and its partition-\ning. The noninhabited volume L3is highlighted in shades of gray.\nThe volumes V\u00af(L,3,a) leading to avalanches of sizes L50,L\n51,L52, andL53 are outlined with thick black lines at their\nedges.EURICH, HERRMANN, AND ERNST PHYSICAL REVIEW E 66, 066137 ~2002!\n066137-10\n\u0142\nl51n\n\u0142\nJl,n\nPIl,nSPl~0,lu!^Pn2l~lu,u!uJl,n\u00f8\u0142\nk51l\n\u0142\nIk,n#Jl,nGkn~l,u,Ik,n!D@Eq.~A6!#\n5\u0142\nl51n\n\u0142\nJl,n\nPIl,nSPl~0,lu!^Pn2l~lu,u!uJl,n\u00f8Ll~ll/n,u!^Pn2l~0,u!uJl,nD@Eq.~A9!#\n5\u0142\nl51n\n\u0142\nJl,nPIl,n\u0084Pl~0,lu!\u00f8Ll~ll/n,u!\u0085^\u0084Pn2l~lu,u!\u00f8Pn2l~0,u!\u0085uJl,n,\n5\u0142\nl51n\n\u0142\nJl,n\nPIl,nLlSl\nn,luD^Pn2l~lu,u!U\nJl,n@Eq.~A11!#. ~B5!\nBy construction @see Eq. ~B3!#, the subsets are disjoint and\nthe volume Vof their union can be written as a sum over the\nsubvolumes. In addition, volumes of subsets for different in-\ndex sets Jl,nfor \ufb01xed nandlare identical. Thus we can\ninsert Eq. ~B1!forl,n, and Eq. ~A4!forl5n. Through this\nprocedure we close the induction\nV\u0084Ln~l,u!\u00855(\nl51nSn\nlDl\nn~lu!lun2l~12l!n2l\n5lun(\nl51nSn21\nl21Dll21~12l!n2l\n5lun(\nk50n21Sn21\nkDlk~12l!(n21)2k5lun.j\n~B6!\nBy choosing n5N,u5U, and l5a, we obtain the vol-\numeVfor the noninhabited region as\nV\u0084LN~a,U!\u00855aUN. ~B7!\nAPPENDIX C: AVALANCHE DISTRIBUTIONS\nIn this section, we will prove the following theorem for\nthe avalanche probabilities p(L,N,a).\nTheorem.\np~L,N,a!5LL22SN21\nL21DSa\nNDL21\n3S12La\nNDN2L21N~12a!\nN2~N21!a.~C1!\nProof.It is convenient to divide the proof into three steps.\nThe \ufb01rst step will be to identify the regions in con\ufb01gurationspace leading to avalanches of a certain size L. The second\nstep will be to subtract the noninhabited subset L\nNfrom\nthese regions. By calculating their volume, one \ufb01nally ob-\ntains the correct avalanche probabilities p(L,N,a). As in thepreceding appendix, we will use an iterative procedure, as\nsuggested by comparing Figs. 4 and 8.\n1. Regions representing different avalanche sizes\nTo convey the idea behind the analysis, we \ufb01rst recall the\ndynamics during one event in an avalanche. Typically, m\nunits have still not been active yet, lunits are just \ufb01ring, k\nelements have already \ufb01red and will not be activated", "doc_id": "e8595aab-231a-4568-b65b-36a42d7ef43d", "embedding": null, "doc_hash": "3f8147e4834fbd7098232bc749c91ec7a5472bc45df3a09e03da7fe0b1d40601", "extra_info": null, "node_info": {"start": 53955, "end": 56133}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "4ab5ea03-d5fa-4de3-9a71-cd0118f66b5e", "3": "d2ba30fc-0e5f-4891-895d-6d148e261c7d"}}, "__type__": "1"}, "d2ba30fc-0e5f-4891-895d-6d148e261c7d": {"__data__": {"text": "step will be to identify the regions in con\ufb01gurationspace leading to avalanches of a certain size L. The second\nstep will be to subtract the noninhabited subset L\nNfrom\nthese regions. By calculating their volume, one \ufb01nally ob-\ntains the correct avalanche probabilities p(L,N,a). As in thepreceding appendix, we will use an iterative procedure, as\nsuggested by comparing Figs. 4 and 8.\n1. Regions representing different avalanche sizes\nTo convey the idea behind the analysis, we \ufb01rst recall the\ndynamics during one event in an avalanche. Typically, m\nunits have still not been active yet, lunits are just \ufb01ring, k\nelements have already \ufb01red and will not be activated again,andjof themremaining units will be activated until the\navalanche stops. If the coupling strength is\nb5aU/N,n o\nstate variable uof the remaining munits could have been\ninitially larger than U2kb. We will denote the\nm-dimensional subsets of the con\ufb01guration space, which will\nevolve into the situation described above, with Vk,lm(j). The\nfollowing considerations will lead to a recursion formula for\nVk,lm(j) over the variable j.\nLet us start with the subspace Pm(0,U2kb), which can\nbe written as a union over all V\u2019s with \ufb01xed k,l, andm,\nPm~0,U2kb!5\u0142\nj50m\nVk,lm~j!. ~C2!\nIn other words, Eq. ~C2!expresses that an m-dimensional\ncon\ufb01guration space of side length U2kb, onto which an\ninput oflbis given, can be decomposed into subsets where\njunits will \ufb01re. It is obvious that for the case j50 in which\nan avalanche stops, the subset Vk,lm(0) is given by\nVk,lm~0!5Pm\u00840,U2~k1l!b\u0085. ~C3!\nWhile decomposition ~C2!partitions Pmconsidering the\nwhole remaining part of an avalanche with junits \ufb01ring, one\ncan equally well partition Pmconsidering only the next step\nin an avalanche, where the input of lbcan trigger up to m\nunits to \ufb01re immediately. With idenoting the number of\nthese units, the disjoint decomposition then readsFINITE-SIZE EFFECTS OF AVALANCHE DYNAMICS PHYSICAL REVIEW E 66, 066137 ~2002!\n066137-11\nPm~0,U2kb!5\u0142\ni50m\n\u0142\nIi,m\nPIi,mPm2i\u00840,U2~k1l!b\u0085\n^Pi\u0084U2~k1l!b,U2kb\u0085U\nIm2i,m.\n~C4!\nUsing an appropriately scaled ~C2!as a decomposition of\nPm2i, the common input ibdue to the iunits \ufb01ring will\nsubsequently trigger j82ielements until the avalanche\nstops,\nPm2i\u00840,U2~k1l!b\u00855\u0142\nj85im\nVk1l,im2i~j82i!. ~C5!\nInserting Eq. ~C5!into Eq. ~C4!, and comparing Eqs. ~C2!\n~C4!, one obtains after changing the precedence of the\nunions over", "doc_id": "d2ba30fc-0e5f-4891-895d-6d148e261c7d", "embedding": null, "doc_hash": "80b40250f066146c9829a508255ba8e80a84b970f65564c0377d4d3d3e05c06b", "extra_info": null, "node_info": {"start": 55882, "end": 58281}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "e8595aab-231a-4568-b65b-36a42d7ef43d", "3": "d1be3b9a-914d-42cd-92c5-efe7ebb6f33e"}}, "__type__": "1"}, "d1be3b9a-914d-42cd-92c5-efe7ebb6f33e": {"__data__": {"text": "an appropriately scaled ~C2!as a decomposition of\nPm2i, the common input ibdue to the iunits \ufb01ring will\nsubsequently trigger j82ielements until the avalanche\nstops,\nPm2i\u00840,U2~k1l!b\u00855\u0142\nj85im\nVk1l,im2i~j82i!. ~C5!\nInserting Eq. ~C5!into Eq. ~C4!, and comparing Eqs. ~C2!\n~C4!, one obtains after changing the precedence of the\nunions over iandj8,\n\u0142\nj51m\nVk,lm~j!5\u0142\ni51m\n\u0142\nIi,mPIi,mS\u0142\nj85im\nVk1l,im2i~j82i!D\n^Pi\u0084U2~k1l!b,U2kb\u0085U\nIm2i,m\n5\u0142\nj851m\n\u0142\ni51j8\n\u0142\nIi,mPIi,mVk1l,im2i~j82i!\n^Pi\u0084U2~k1l!b,U2kb\u0085U\nIm2i,m.~C6!In Eq. ~C6!, we excluded subsets where the input lbtriggers\nnone of the units, because we already know the result fromEq.~C3!.\nIf we require Eq. ~C6!to represent a recursive description\nof the avalanche dynamics, then one speci\ufb01c V\nk,lm(j) should\nbe composed of terms with j8satisfying ( j82i)1i5j,\nVk,lm~j!5\u0142\ni51j\n\u0142Ii,mPIi,mVk1l,im2i~j2i!\n^Pi\u0084U2~k1l!^b,U2kb\u0085uIm2i,m.~C7!\nThis expression is the required recursion formula.\n2. Subtraction of the noninhabited region\nFor the following considerations, we introduce the abbre-\nviation Fn\u201cPn\u0084U2(k1l)b,U2kb\u0085.\nWe de\ufb01ne V\u00afby subtracting LNfrom V,\nV\u00afk,lm~j!^FN2muIm,N\u201cVk,lm~j!^FN2muIm,NnLN~a,U!.\n~C8!\nIfk1l<N, using Eqs. ~A2!and ~A3!reveals that\nFn\u00f8LN(a,U)5B. Through this property, Eq. ~C7!re-\nmains valid if one replaces the V\u2019s by the V\u00af\u2019s.\nThus it suf\ufb01ces to explicitly compute V\u00afk,lm(j) forj50.\nInserting Eq. ~A3!into Eq. ~C8!, and using Lemmas ~A6!,\n~A9!, and ~A11!yields\nV\u00afk,lm~0!^FN2muIm,N\n5Vk,lm~0!^FN2muIm,Nn\u0142\ni51m\n\u0142\nJi,N#Im,NGiN~a,U,Ji,N!@Eq.~A6!#\n5Vk,lm~0!^FN2muIm,NnLm~am/N,U!^PN2m~0,U!uIm,N@Eq.~A9!#\n5FVk,lm~0!nLmSmaU/N\nU2~k1l!b,U2~k1l!bDG^FN2muIm,N@Eq.~A11!#. ~C9!\nFrom this expression, V\u00afk,lm(0) can be extracted as\nV\u00afk,lm~0!5Vk,lm~0!nLmSmaU/N\nU2~k1l!b,U2~k1l!bD.\n~C10!3. Calculation of the volumes of the", "doc_id": "d1be3b9a-914d-42cd-92c5-efe7ebb6f33e", "embedding": null, "doc_hash": "20f220fa1f7ae1983261aba8c2144f6155bd484dd423820af04d97c7a3a353d7", "extra_info": null, "node_info": {"start": 58553, "end": 60321}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "d2ba30fc-0e5f-4891-895d-6d148e261c7d", "3": "6995bb4c-d4f0-412b-8b28-2350734610c8"}}, "__type__": "1"}, "6995bb4c-d4f0-412b-8b28-2350734610c8": {"__data__": {"text": "~C9!\nFrom this expression, V\u00afk,lm(0) can be extracted as\nV\u00afk,lm~0!5Vk,lm~0!nLmSmaU/N\nU2~k1l!b,U2~k1l!bD.\n~C10!3. Calculation of the volumes of the regions\nWithSk,lm(j)\u201cV\u0084Vk,lm(j)\u0085andS\u00afk,lm(j)\u201cV\u0084V\u00afk,lm(j)\u0085, Eqs.\n~C3!and~C7!de\ufb01ne recursions for con\ufb01guration space vol-\numesEURICH, HERRMANN, AND ERNST PHYSICAL REVIEW E 66, 066137 ~2002!\n066137-12\nSk,lm~j!5H\u0084U2~k1l!b\u0085m, j50\n(\ni51jSm\niD~lb!iSk1l,im2i~j2i!,j<m,~C11!\nS\u00afk,lm~j!5H\u0084U2~k1l!b\u0085m21\u0084U2~k1l1m!b\u0085,j50\n(\ni51jSm\niD~lb!iS\u00afk1l,im2i~j2i!, j<m,\n~C12!\nwhereS\u00afk,lm(0) was possible to calculate from Sk,lm(0) by sim-\nply subtracting the volume of Lm, because its size in Eq.\n~C10!has been scaled not to extend over Vk,lm(0),\nS\u00afk,lm~0!5Sk,lm~0!2aUm\nN\u0084U2~k1l!b\u0085m21.~C13!\nUsing similar arguments, one also obtains a recursion for\nthe volumes Dk,lm(j) corresponding to regions of avalanche\ndurations j,\nDk,lm~j!5(\ni51m2j11Sm\niD~lb!iDk1l,im2i~j21!, ~C14!\nfor 0 ,j<NandDk,lm(0)5Sk,lm(0).Correcting for the nonin-\nhabited volume leads to the same recursion for the volumes\nD\u00afk,lm(j) for 0 ,j<NwithD\u00afk,lm(0)5S\u00afk,lm(0).To obtain a closed expression for the volumes SandS\u00af,w e\nwill now prove the following proposition.\nProposition. ForU.0,k1l1j,N, andj<m,\nSk,lm~j!5Sm\njDbjl~j1l!j21\u0084U2~k1l1j!b\u0085m2j,\nS\u00afk,lm~j!5Sm\njDbjl~j1l!j21\u0084U2~k1l1j!b\u0085m2j21\n3\u0084U2~m1k1l!b\u0085. ~C15!\nThe proof is possible by induction over n, and it is very\nsimilar for SandS\u00af. We will therefore only give the proof for\nS\u00afin order to shorten this appendix.\nBasis.Form51,jcan either be 0 or 1, and using Eq.\n~C13!leads to\nS\u00afk,l1~0!5\u0084U2~11k1l!b\u00851, ~C16!\nS\u00afk,l1~1!5~lb!1. ~C17!\nInduction. For the induction we assume that Eq. ~C15!has\nbeen proven for m<n21. Thus we have to prove that Eq.\n~C15!holds also for", "doc_id": "6995bb4c-d4f0-412b-8b28-2350734610c8", "embedding": null, "doc_hash": "5c53c1cb8eebb1b4489fea60eea22a3af5e047a1b2a07df3fa50f7f8f11cc1c6", "extra_info": null, "node_info": {"start": 60486, "end": 62188}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "d1be3b9a-914d-42cd-92c5-efe7ebb6f33e", "3": "cb1939e2-a3e0-4684-9c87-7cbf0c6f5314"}}, "__type__": "1"}, "cb1939e2-a3e0-4684-9c87-7cbf0c6f5314": {"__data__": {"text": "~C15!\nThe proof is possible by induction over n, and it is very\nsimilar for SandS\u00af. We will therefore only give the proof for\nS\u00afin order to shorten this appendix.\nBasis.Form51,jcan either be 0 or 1, and using Eq.\n~C13!leads to\nS\u00afk,l1~0!5\u0084U2~11k1l!b\u00851, ~C16!\nS\u00afk,l1~1!5~lb!1. ~C17!\nInduction. For the induction we assume that Eq. ~C15!has\nbeen proven for m<n21. Thus we have to prove that Eq.\n~C15!holds also for m5n,\nS\u00afk,lm~j!5(\ni51jSm\niD~lb!iS\u00afk1l,im2i~j2i!\n5(\ni51jSm\niD~lb!iSm2i\nj2iDbj2i\u0084U2~m1k1l!b\u0085\u0084U2~k1l1j!b\u0085m2j21i~j2i1i!j2i21\n5Sm\njDbj\u0084U2~k1l1j!b\u0085m2j21\u0084U2~m1k1l!b\u0085H(\ni51j\nliSj\niDijj2i21J\n5Sm\njDbj\u0084U2~k1l1j!b\u0085m2j21\u0084U2~m1k1l!b\u0085Hl(\ni850j21Sj21\ni8Dli8j(j21)2i8J\n5Sm\njDbjl~l1j!r21\u0084U2~k1l1j!b\u0085m2j21\u0084U2~m1k1l!b\u0085. j\n~C18!\nWith this closed expression, it will be possible to \ufb01nally\ncalculate an expression of the avalanche probabilities.\n4. Avalanche probabilities\nAn avalanche starts if one unit is triggered by an input of\nstrength DUto \ufb01re. Thus the phase space volumes\nV\u00af(L,N,a) andV(L,N,a) for avalanches of size L.0 areobtained by multiplying DUwithS\u00af0,1N21(L21) andS0,1N21(L\n21), respectively. These speci\ufb01c S\u2019s are the volumes of the\nsubsets of dimension N21 containing states for which j\n5L21 neurons will subsequently \ufb01re, triggered by an input\noflbwithl51.V\u00af(0,N,a) andV(0,N,a) can be compute-\ndas the remaining part of the whole phase space, and weobtainFINITE-SIZE EFFECTS OF AVALANCHE DYNAMICS PHYSICAL REVIEW E 66, 066137 ~2002!\n066137-13\nV~L,N,a!5HUN21~U2DU!,L50\nDUS0,1N21~L21!,L.0,\n5HUN21~U2DU!, L50\nDU\nLUN21SN21\nL21DSLa\nNDL21S12La\nNDN2L\n,L.0~C19!\nand\nV\u00af~L,N,a!5HUN21~12a!FU2DUS12a2a\nNDG, L50\nDU\nLUN21SN21\nL21DSLa\nNDL21S12La\nNDN2L21\n~12a!,L.0.~C20!\nThe probability of an avalanche P(L,N,a) is then given\nbyP(L,N,a)5V\u00af(L,N,a)/V\u0084PN(0,a)nLN(0,a)\u0085. With Eq.\n~B1!,V\u0084PN(0,a)nLN(0,a)\u00855UN(12a); then using Eq. ~3!\nleads to the \ufb01nal result @see also Eq.", "doc_id": "cb1939e2-a3e0-4684-9c87-7cbf0c6f5314", "embedding": null, "doc_hash": "c6f55f67f8b25ae149d4795dc1605505565e16fb62fb9efcea3732b0b8bd1e45", "extra_info": null, "node_info": {"start": 61968, "end": 63825}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "6995bb4c-d4f0-412b-8b28-2350734610c8", "3": "12fb5d28-5df9-418b-95ed-2632fdfc8513"}}, "__type__": "1"}, "12fb5d28-5df9-418b-95ed-2632fdfc8513": {"__data__": {"text": "L50\nDU\nLUN21SN21\nL21DSLa\nNDL21S12La\nNDN2L21\n~12a!,L.0.~C20!\nThe probability of an avalanche P(L,N,a) is then given\nbyP(L,N,a)5V\u00af(L,N,a)/V\u0084PN(0,a)nLN(0,a)\u0085. With Eq.\n~B1!,V\u0084PN(0,a)nLN(0,a)\u00855UN(12a); then using Eq. ~3!\nleads to the \ufb01nal result @see also Eq. ~8!#\np~L,N,a!5V\u00af~L,N,a!\nUN~12a!2V\u00af~0,N,a!51\nLSN21\nL21D\n3SLa\nNDL21S12La\nNDN2L21N~12a!\nN2~N21!a.j\n~C21!\nIfp(L,N,a) had been calculated without subtracting thenoninhabited region, the \ufb01nal expression would have read\n@50#\np~L,N,a!51\nLSN21\nL21DSLa\nNDL21S12La\nNDN2L\n.\n~C22!\nWith similar arguments, the expression for the avalanche\ndurations pd(L,N,a) becomes\npd~L,N,a!5DUD\u00af0,1N21~L21!\nUN~12a!F12U1DUS12a2a\nNDG.\n~C23!\n@1#V. Frette, K. Christensen, A.M. Malthe-So \u0094renssen, J. Feder, T.\nJo\u0094ssang, and P. Meakin, Nature ~London !397,4 9~1996!.\n@2#B. Gutenberg and C.F. Richter,Ann. Geophys. ~C.N.R.S. !9,1\n~1956!.\n@3#P. Bak, K. Chen, and M. Creutz, Nature ~London !342, 780\n~1989!.\n@4#H.J.S. Feder and J. Feder, Phys. Rev. Lett. 66, 2669 ~1991!.\n@5#A.M. Alencar, S.V. Buldyrev, A. Majumdar, H.E. Stanley, and\nB. Suki, Phys. Rev. Lett. 87, 088101 ~2001!.\n@6#P. Bak, C. Tang, and K. Wiesenfeld, Phys. Rev. Lett. 59, 381\n~1987!.\n@7#P. Bak, C. Tang, and K. Wiesenfeld, Phys. Rev. A 38, 364\n~1988!.\n@8#L.P. Kadanoff, S.R. Nagel, L. Wu, and S. Zhou, Phys. Rev. A\n39, 6524 ~1989!.\n@9#G. Grinstein, D.-H. Lee, and S. Sachdev, Phys. Rev. Lett. 64,\n1927 ~1990!.\n@10#L. Pietronero, A. Vespignani, and S. Zapperi, Phys. Rev. Lett.\n72, 1690 ~1994!.\n@11#D. Sornette, A. Johansen, and I. Dornic, J. Phys. I 5, 325\n~1995!.\n@12#A. Vespignani, S. Zapperi, and V. Loreto, Phys. Rev. Lett. 77,\n4560 ~1996!.@13#A. Vespignani and S. Zapperi, Phys. Rev. Lett. 78, 4793\n~1997!.\n@14#D. Dhar and R. Ramaswamy, Phys. Rev. Lett. 63, 1659 ~1989!.\n@15#D. Dhar, Phys. Rev. Lett. 64, 1613 ~1990!.\n@16#T. Hwa and M. Kardar, Phys. Rev. Lett. 62, 1813 ~1989!.\n@17#S.S. Manna, L.B. Kiss, and J. Kerte \u00b4sz, J. Stat. Phys.", "doc_id": "12fb5d28-5df9-418b-95ed-2632fdfc8513", "embedding": null, "doc_hash": "be20b20392adf17fe5fa38e0b8d56cb92e923180c3e8d30b0465804034c4b995", "extra_info": null, "node_info": {"start": 63946, "end": 65877}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "cb1939e2-a3e0-4684-9c87-7cbf0c6f5314", "3": "9a1f2bb5-a1d8-42b2-bcf0-4af29dd3142d"}}, "__type__": "1"}, "9a1f2bb5-a1d8-42b2-bcf0-4af29dd3142d": {"__data__": {"text": "J. Phys. I 5, 325\n~1995!.\n@12#A. Vespignani, S. Zapperi, and V. Loreto, Phys. Rev. Lett. 77,\n4560 ~1996!.@13#A. Vespignani and S. Zapperi, Phys. Rev. Lett. 78, 4793\n~1997!.\n@14#D. Dhar and R. Ramaswamy, Phys. Rev. Lett. 63, 1659 ~1989!.\n@15#D. Dhar, Phys. Rev. Lett. 64, 1613 ~1990!.\n@16#T. Hwa and M. Kardar, Phys. Rev. Lett. 62, 1813 ~1989!.\n@17#S.S. Manna, L.B. Kiss, and J. Kerte \u00b4sz, J. Stat. Phys. 61, 923\n~1990!.\n@18#T. Tsuchiya and M. Katori, Phys. Rev. E 61, 1183 ~2000!.\n@19#Z. Olami, H.J.S. Feder, and K. Christensen, Phys. Rev. Lett.\n68, 1244 ~1992!.\n@20#K. Christensen and Z. Olami, Phys. Rev. A 46, 1829 ~1992!.\n@21#J.E.S. Socolar, G. Grinstein, and C. Jayaprakash, Phys. Rev. E\n47, 2366 ~1993!.\n@22#P. Grassberger, Phys. Rev. E 49, 2436 ~1994!.\n@23#A\u00b4. Corral, C.J. Pe \u00b4rez, A. D\u0131 \u00b4az-Guilera, and A. Arenas, Phys.\nRev. Lett. 74,1 1 8 ~1995!.\n@24#A.A. Middleton and C. Tang, Phys. Rev. Lett. 74, 742 ~1995!.\n@25#S. Lise and H.J. Jensen, Phys. Rev. Lett. 76, 2326 ~1996!.\n@26#H.-M. Bro \u00a8ker and P. Grassberger, Phys. Rev. E 56, 3944\n~1997!.\n@27#M.-L.ChabanolandV.Hakim,Phys.Rev.E 56,R2343 ~1997!.\n@28#O. Kinouchi, S.T.R. Pinho, and C.P.C. Prado, Phys. Rev. E 58,\n3997 ~1998!.EURICH, HERRMANN, AND ERNST PHYSICAL REVIEW E 66, 066137 ~2002!\n066137-14\n@29#H.-M. Bro \u00a8ker and P. Grassberger, Europhys. Lett. 30, 319\n~1995!.\n@30#D. Stauffer, Introduction to Percolation Theory ~Taylor &\nFrancis, London, 1985 !.\n@31#A. Corral, C.J. Pe \u00b4rez, A. D\u0131 \u00b4az-Guilera, and A. Arenas, Phys.\nRev. Lett. 75, 3697 ~1995!.\n@32#M. Usher, M. Stemmler, and Z. Olami, Phys. Rev. Lett. 74,\n326~1995!.\n@33#S. Bottani, Phys. Rev. Lett. 74, 4189 ~1995!.\n@34#A.V.M. Herz and J.J. Hop\ufb01eld, Phys. Rev. Lett. 75, 1222\n~1995!.\n@35#U. Ernst, K. Pawelzik, and T. Geisel, Phys. Rev. Lett. 74, 1570\n~1995!.\n@36#J.J. Hop\ufb01eld, Nature ~London !376,3 3~1995!.\n@37#J.J. Hop\ufb01eld and A.V.M. Herz, Proc. Natl. Acad. Sci. U.S.A.\n92, 6655 ~1995!.\n@38#W. Gerstner, Phys. Rev. Lett. 76, 1755 ~1996!.\n@39#J. Foss, F. Moss, and J.G. Milton, Phys. Rev. E 55, 4536\n~1997!.\n@40#B. Ruf and M. Schmitt, IEEE Trans. Neural Netw. 9,", "doc_id": "9a1f2bb5-a1d8-42b2-bcf0-4af29dd3142d", "embedding": null, "doc_hash": "d1c082619393529521241debd6fc3f1e29a721ce617ac543c5e604285a679ee1", "extra_info": null, "node_info": {"start": 65770, "end": 67859}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "12fb5d28-5df9-418b-95ed-2632fdfc8513", "3": "569a331a-329c-4d7b-9508-a7b361c3cac0"}}, "__type__": "1"}, "569a331a-329c-4d7b-9508-a7b361c3cac0": {"__data__": {"text": "Ernst, K. Pawelzik, and T. Geisel, Phys. Rev. Lett. 74, 1570\n~1995!.\n@36#J.J. Hop\ufb01eld, Nature ~London !376,3 3~1995!.\n@37#J.J. Hop\ufb01eld and A.V.M. Herz, Proc. Natl. Acad. Sci. U.S.A.\n92, 6655 ~1995!.\n@38#W. Gerstner, Phys. Rev. Lett. 76, 1755 ~1996!.\n@39#J. Foss, F. Moss, and J.G. Milton, Phys. Rev. E 55, 4536\n~1997!.\n@40#B. Ruf and M. Schmitt, IEEE Trans. Neural Netw. 9, 575\n~1998!.@41#C.W. Eurich, K. Pawelzik, U. Ernst, J.D. Cowan, and J.G.\nMilton, Phys. Rev. Lett. 82, 1594 ~1999!.\n@42#W.A. Phillips and W. Singer, Behav. Brain Sci. 20, 657 ~1997!.\n@43#C. Koch and H. Schuster, Neural Comput. 4,2 1 1 ~1992!.\n@44#J. Lin, K. Pawelzik, U. Ernst, and T. Sejnowski, Network 9,\n333~1998!.\n@45#H. Sompolinsky, D. Golomb, and D. Kleinfeld, Proc. Natl.\nAcad. Sci. U.S.A. 87, 7200 ~1990!.\n@46#M.C. Teich, IEEE Trans. Biomed. Eng. 36, 150 ~1989!.\n@47#F. Gru\u00a8neis, M. Nakao, M. Yamamoto, T. Musha, and H. Naka-\nhama, Biol. Cybern. 60, 161 ~1989!.\n@48#F. Gru\u00a8neis, M. Nakao, and M. Yamamoto, Biol. Cybern. 62,\n407~1990!.\n@49#The term \u2018\u2018critical\u2019\u2019 is applied to \ufb01nite systems here meaning\nan approximate power-law behavior; true criticality requiresthe thermodynamic limit N!\u2018, which will be discussed be-\nlow.\n@50#By mapping the variable names L\u00b0s,L\na/N\u00b0p, and (N\n21)/L\u00b0n, expression ~C22!becomes identical to Eq. ~36!in\nRef.@26#.FINITE-SIZE EFFECTS OF AVALANCHE DYNAMICS PHYSICAL REVIEW E 66, 066137 ~2002!\n066137-15", "doc_id": "569a331a-329c-4d7b-9508-a7b361c3cac0", "embedding": null, "doc_hash": "0c112ca8347fc8ad43aad6378eb6d93c4f34244486345e35b6a4ef6d31955097", "extra_info": null, "node_info": {"start": 67828, "end": 69239}, "relationships": {"1": "950acfd7-35b6-40e0-bf25-1f85e4b3951d", "2": "9a1f2bb5-a1d8-42b2-bcf0-4af29dd3142d"}}, "__type__": "1"}}}
|
storage1/index_store.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"index_store/data": {"fef9aeda-bbb7-4579-ada5-0c428f2bb0a7": {"__type__": "vector_store", "__data__": {"index_id": "fef9aeda-bbb7-4579-ada5-0c428f2bb0a7", "summary": null, "nodes_dict": {"b3e21c43-f122-4228-800c-1f3b373b3960": "b3e21c43-f122-4228-800c-1f3b373b3960", "ecd2b3d1-32cd-40e9-a2cb-2a1318531b90": "ecd2b3d1-32cd-40e9-a2cb-2a1318531b90", "b2006ef0-2c96-48f2-b5a4-9f5ecebab917": "b2006ef0-2c96-48f2-b5a4-9f5ecebab917", "4d4caa9d-39d9-4341-8719-5c55510fc86f": "4d4caa9d-39d9-4341-8719-5c55510fc86f", "4753dd74-9764-40be-8baa-81348163e0bd": "4753dd74-9764-40be-8baa-81348163e0bd", "27891564-af8b-4c1b-b331-bae9cd5f0d38": "27891564-af8b-4c1b-b331-bae9cd5f0d38", "67231d57-e066-4cda-84fe-b3df6c8e09a8": "67231d57-e066-4cda-84fe-b3df6c8e09a8", "84db127e-7780-4291-987b-f77a9b7f9dc4": "84db127e-7780-4291-987b-f77a9b7f9dc4", "7e380a6d-4fc5-4186-b584-08ef2cf17f80": "7e380a6d-4fc5-4186-b584-08ef2cf17f80", "e282d256-9f11-4515-b813-b90e589f831e": "e282d256-9f11-4515-b813-b90e589f831e", "bba7a515-7ef3-4ba6-9572-9438b8bc5ebb": "bba7a515-7ef3-4ba6-9572-9438b8bc5ebb", "851bfea7-7ea6-48e4-a4ae-3ed590d606c2": "851bfea7-7ea6-48e4-a4ae-3ed590d606c2", "0631eda8-3675-4993-bc6f-915690b5017e": "0631eda8-3675-4993-bc6f-915690b5017e", "3487ebc9-ffaa-48a7-aa8e-872ddaa89c8c": "3487ebc9-ffaa-48a7-aa8e-872ddaa89c8c", "30fa272d-b0ff-4b7f-b7cb-17f3e098b8bd": "30fa272d-b0ff-4b7f-b7cb-17f3e098b8bd", "701d0aff-6665-4b22-98b5-4bd41470f1db": "701d0aff-6665-4b22-98b5-4bd41470f1db", "f5f3457d-3d38-45f9-bea0-60efa8549999": "f5f3457d-3d38-45f9-bea0-60efa8549999", "4ab5ea03-d5fa-4de3-9a71-cd0118f66b5e": "4ab5ea03-d5fa-4de3-9a71-cd0118f66b5e", "e8595aab-231a-4568-b65b-36a42d7ef43d": "e8595aab-231a-4568-b65b-36a42d7ef43d", "d2ba30fc-0e5f-4891-895d-6d148e261c7d": "d2ba30fc-0e5f-4891-895d-6d148e261c7d", "d1be3b9a-914d-42cd-92c5-efe7ebb6f33e": "d1be3b9a-914d-42cd-92c5-efe7ebb6f33e", "6995bb4c-d4f0-412b-8b28-2350734610c8": "6995bb4c-d4f0-412b-8b28-2350734610c8", "cb1939e2-a3e0-4684-9c87-7cbf0c6f5314": "cb1939e2-a3e0-4684-9c87-7cbf0c6f5314", "12fb5d28-5df9-418b-95ed-2632fdfc8513": "12fb5d28-5df9-418b-95ed-2632fdfc8513", "9a1f2bb5-a1d8-42b2-bcf0-4af29dd3142d": "9a1f2bb5-a1d8-42b2-bcf0-4af29dd3142d", "569a331a-329c-4d7b-9508-a7b361c3cac0": "569a331a-329c-4d7b-9508-a7b361c3cac0"}, "doc_id_dict": {"950acfd7-35b6-40e0-bf25-1f85e4b3951d": ["b3e21c43-f122-4228-800c-1f3b373b3960", "ecd2b3d1-32cd-40e9-a2cb-2a1318531b90", "b2006ef0-2c96-48f2-b5a4-9f5ecebab917", "4d4caa9d-39d9-4341-8719-5c55510fc86f", "4753dd74-9764-40be-8baa-81348163e0bd", "27891564-af8b-4c1b-b331-bae9cd5f0d38", "67231d57-e066-4cda-84fe-b3df6c8e09a8", "84db127e-7780-4291-987b-f77a9b7f9dc4", "7e380a6d-4fc5-4186-b584-08ef2cf17f80", "e282d256-9f11-4515-b813-b90e589f831e", "bba7a515-7ef3-4ba6-9572-9438b8bc5ebb", "851bfea7-7ea6-48e4-a4ae-3ed590d606c2", "0631eda8-3675-4993-bc6f-915690b5017e", "3487ebc9-ffaa-48a7-aa8e-872ddaa89c8c", "30fa272d-b0ff-4b7f-b7cb-17f3e098b8bd", "701d0aff-6665-4b22-98b5-4bd41470f1db", "f5f3457d-3d38-45f9-bea0-60efa8549999", "4ab5ea03-d5fa-4de3-9a71-cd0118f66b5e", "e8595aab-231a-4568-b65b-36a42d7ef43d", "d2ba30fc-0e5f-4891-895d-6d148e261c7d", "d1be3b9a-914d-42cd-92c5-efe7ebb6f33e", "6995bb4c-d4f0-412b-8b28-2350734610c8", "cb1939e2-a3e0-4684-9c87-7cbf0c6f5314", "12fb5d28-5df9-418b-95ed-2632fdfc8513", "9a1f2bb5-a1d8-42b2-bcf0-4af29dd3142d", "569a331a-329c-4d7b-9508-a7b361c3cac0"]}, "embeddings_dict": {}}}}}
|
storage1/vector_store.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|