Commit 37c7a25f by haoyifan

haoyifan add

parent 1c2e5ba1
......@@ -2,535 +2,30 @@
\bibstyle{aaai21}
\citation{kottur-etal-2017-natural,bogin2018emergence,lazaridou2018emergence,choi2018compositional,jaques2019social,mul2019mastering,kharitonov2019egg,labash2020perspective,chaabouni2020compositionality}
\citation{andreas2018measuring,chaabouni2020compositionality}
\citation{kirby2015compression}
\citation{kottur-etal-2017-natural}
\citation{choi2018compositional}
\citation{lazaridou2018emergence}
\citation{evtimova2018emergent}
\citation{li2019ease}
\citation{chaabouni-etal-2019-word}
\citation{chaabouni2020compositionality}
\citation{mordatch2017emergence}
\citation{kharitonov2019egg}
\citation{lazaridou2018emergence,evtimova2018emergent}
\citation{kottur-etal-2017-natural,li2019ease}
\citation{li2019ease}
\citation{david1969convention}
\@LN@col{1}
\@LN{0}{0}
\@LN{1}{0}
\@LN{2}{0}
\@LN{3}{0}
\@LN{4}{0}
\@LN{5}{0}
\@LN{6}{0}
\@LN{7}{0}
\@LN{8}{0}
\@LN{9}{0}
\@LN{10}{0}
\@LN{11}{0}
\@LN{12}{0}
\@LN{13}{0}
\@LN{14}{0}
\@LN{15}{0}
\@LN{16}{0}
\@LN{17}{0}
\@LN{18}{0}
\@LN{19}{0}
\@LN{20}{0}
\@LN{21}{0}
\@LN{22}{0}
\@LN{23}{0}
\newlabel{sec:introduction}{{}{1}}
\@LN{24}{0}
\@LN{25}{0}
\@LN{26}{0}
\@LN{27}{0}
\@LN{28}{0}
\@LN{29}{0}
\@LN{30}{0}
\@LN{31}{0}
\@LN{32}{0}
\@LN{33}{0}
\@LN{34}{0}
\@LN{35}{0}
\@LN{36}{0}
\@LN{37}{0}
\@LN{38}{0}
\@LN{39}{0}
\@LN@col{2}
\providecommand*\caption@xref[2]{\@setref\relax\@undefined{#1}}
\newlabel{fig:induction}{{1}{1}}
\@LN{40}{0}
\@LN{41}{0}
\@LN{42}{0}
\@LN{43}{0}
\@LN{44}{0}
\@LN{45}{0}
\@LN{46}{0}
\@LN{47}{0}
\@LN{48}{0}
\@LN{49}{0}
\@LN{50}{0}
\@LN{51}{0}
\@LN{52}{0}
\@LN{53}{0}
\@LN{54}{0}
\@LN{55}{0}
\@LN{56}{0}
\@LN{57}{0}
\@LN{58}{0}
\@LN{59}{0}
\@LN{60}{0}
\citation{kirby2015compression}
\citation{kottur-etal-2017-natural}
\citation{lazaridou2018emergence}
\citation{li2019ease}
\citation{evtimova2018emergent}
\citation{kottur-etal-2017-natural,choi2018compositional,lazaridou2018emergence,evtimova2018emergent,chaabouni2020compositionality}
\citation{choi2018compositional}
\citation{kottur-etal-2017-natural}
\citation{lazaridou2018emergence}
\citation{chaabouni2020compositionality}
\citation{choi2018compositional}
\newlabel{tab:rel}{{1}{2}}
\@LN@col{1}
\@LN{61}{1}
\@LN{62}{1}
\@LN{63}{1}
\@LN{64}{1}
\@LN{65}{1}
\@LN{66}{1}
\@LN{67}{1}
\@LN{68}{1}
\@LN{69}{1}
\@LN{70}{1}
\@LN{71}{1}
\@LN{72}{1}
\@LN{73}{1}
\@LN{74}{1}
\@LN{75}{1}
\@LN{76}{1}
\@LN{77}{1}
\@LN{78}{1}
\@LN{79}{1}
\@LN{80}{1}
\@LN{81}{1}
\@LN{82}{1}
\@LN{83}{1}
\@LN{84}{1}
\@LN{85}{1}
\@LN{86}{1}
\@LN{87}{1}
\@LN{88}{1}
\@LN{89}{1}
\@LN{90}{1}
\@LN{91}{1}
\@LN{92}{1}
\@LN{93}{1}
\@LN{94}{1}
\@LN{95}{1}
\@LN{96}{1}
\@LN{97}{1}
\@LN{98}{1}
\@LN{99}{1}
\@LN{100}{1}
\@LN{101}{1}
\@LN{102}{1}
\@LN{103}{1}
\@LN{104}{1}
\@LN@col{2}
\@LN{105}{1}
\@LN{106}{1}
\@LN{107}{1}
\@LN{108}{1}
\@LN{109}{1}
\@LN{110}{1}
\@LN{111}{1}
\@LN{112}{1}
\@LN{113}{1}
\@LN{114}{1}
\@LN{115}{1}
\@LN{116}{1}
\newlabel{sec:relatedwork}{{}{2}}
\@LN{117}{1}
\@LN{118}{1}
\@LN{119}{1}
\@LN{120}{1}
\@LN{121}{1}
\@LN{122}{1}
\@LN{123}{1}
\@LN{124}{1}
\@LN{125}{1}
\@LN{126}{1}
\@LN{127}{1}
\@LN{128}{1}
\@LN{129}{1}
\@LN{130}{1}
\@LN{131}{1}
\@LN{132}{1}
\@LN{133}{1}
\@LN{134}{1}
\@LN{135}{1}
\@LN{136}{1}
\@LN{137}{1}
\@LN{138}{1}
\@LN{139}{1}
\@LN{140}{1}
\@LN{141}{1}
\@LN{142}{1}
\@LN{143}{1}
\@LN{144}{1}
\@LN{145}{1}
\@LN{146}{1}
\@LN{147}{1}
\@LN{148}{1}
\@LN@col{1}
\newlabel{fig:game}{{2}{3}}
\@LN{149}{2}
\@LN{150}{2}
\@LN{151}{2}
\@LN{152}{2}
\@LN{153}{2}
\@LN{154}{2}
\@LN{155}{2}
\@LN{156}{2}
\@LN{157}{2}
\@LN{158}{2}
\@LN{159}{2}
\@LN{160}{2}
\@LN{161}{2}
\@LN{162}{2}
\@LN{163}{2}
\@LN{164}{2}
\@LN{165}{2}
\@LN{166}{2}
\@LN{167}{2}
\@LN{168}{2}
\@LN{169}{2}
\@LN{170}{2}
\@LN{171}{2}
\newlabel{sec:thory}{{}{3}}
\@LN{172}{2}
\@LN{173}{2}
\@LN{174}{2}
\@LN{175}{2}
\newlabel{ssec:env}{{}{3}}
\@LN{176}{2}
\@LN{177}{2}
\@LN{178}{2}
\@LN{179}{2}
\@LN{180}{2}
\@LN{181}{2}
\@LN{182}{2}
\@LN{183}{2}
\@LN{184}{2}
\@LN{185}{2}
\@LN{186}{2}
\@LN{187}{2}
\@LN{188}{2}
\@LN{189}{2}
\@LN{190}{2}
\@LN@col{2}
\newlabel{al:learning}{{1}{3}}
\@LN{191}{2}
\@LN{192}{2}
\@LN{193}{2}
\@LN{194}{2}
\@LN{195}{2}
\@LN{196}{2}
\@LN{197}{2}
\@LN{198}{2}
\@LN{199}{2}
\@LN{200}{2}
\@LN{201}{2}
\@LN{202}{2}
\@LN{203}{2}
\@LN{204}{2}
\newlabel{ssec:agent}{{}{3}}
\@LN{205}{2}
\@LN{206}{2}
\@LN{207}{2}
\@LN{208}{2}
\@LN{209}{2}
\@LN{210}{2}
\@LN{211}{2}
\@LN{212}{2}
\@LN{213}{2}
\@LN{214}{2}
\@LN{215}{2}
\@LN{216}{2}
\newlabel{fig:agents}{{3}{4}}
\@LN@col{1}
\@LN{217}{3}
\@LN{218}{3}
\@LN{219}{3}
\@LN{220}{3}
\@LN{221}{3}
\@LN{222}{3}
\newlabel{ssec:training}{{}{4}}
\@LN{223}{3}
\@LN{224}{3}
\@LN{225}{3}
\@LN{226}{3}
\@LN{227}{3}
\@LN{228}{3}
\@LN{229}{3}
\@LN{230}{3}
\@LN{231}{3}
\@LN{232}{3}
\@LN{233}{3}
\@LN{234}{3}
\@LN{235}{3}
\@LN{236}{3}
\@LN{237}{3}
\@LN{238}{3}
\@LN{239}{3}
\newlabel{sec:mis}{{}{4}}
\@LN{240}{3}
\@LN{241}{3}
\@LN{242}{3}
\@LN{243}{3}
\@LN{244}{3}
\@LN{245}{3}
\@LN{246}{3}
\@LN@col{2}
\newlabel{fig:modeling}{{4}{4}}
\newlabel{fig:unilateral}{{5}{4}}
\@LN{247}{3}
\@LN{248}{3}
\@LN{249}{3}
\@LN{250}{3}
\@LN{251}{3}
\@LN{252}{3}
\@LN{253}{3}
\@LN{254}{3}
\@LN{255}{3}
\@LN{256}{3}
\@LN{257}{3}
\@LN{258}{3}
\@LN{259}{3}
\@LN{260}{3}
\@LN{261}{3}
\citation{}
\citation{}
\@LN@col{1}
\newlabel{fig:exp1}{{6}{5}}
\@LN{262}{4}
\@LN{263}{4}
\@LN{264}{4}
\newlabel{eq:cmi}{{3}{5}}
\@LN{265}{4}
\@LN{266}{4}
\@LN{267}{4}
\@LN{268}{4}
\newlabel{eq:ri}{{4}{5}}
\@LN{269}{4}
\@LN{270}{4}
\@LN{271}{4}
\newlabel{eq:mri}{{5}{5}}
\@LN{272}{4}
\@LN{273}{4}
\@LN{274}{4}
\@LN{275}{4}
\@LN{276}{4}
\@LN{277}{4}
\@LN{278}{4}
\@LN{279}{4}
\@LN{280}{4}
\@LN{281}{4}
\@LN{282}{4}
\newlabel{eq:mis2}{{6}{5}}
\@LN{283}{4}
\@LN@col{2}
\newlabel{fig:exp2}{{7}{5}}
\@LN{284}{4}
\newlabel{eq:mis2}{{7}{5}}
\@LN{285}{4}
\@LN{286}{4}
\@LN{287}{4}
\@LN{288}{4}
\@LN{289}{4}
\@LN{290}{4}
\@LN{291}{4}
\@LN{292}{4}
\@LN{293}{4}
\@LN{294}{4}
\@LN{295}{4}
\@LN{296}{4}
\newlabel{sec:exp}{{}{5}}
\@LN{297}{4}
\@LN{298}{4}
\newlabel{tab:exp10}{{2}{5}}
\newlabel{fig:exp3}{{8}{6}}
\@LN@col{1}
\@LN{299}{5}
\@LN{300}{5}
\@LN{301}{5}
\@LN{302}{5}
\@LN{303}{5}
\@LN{304}{5}
\@LN{305}{5}
\@LN{306}{5}
\@LN{307}{5}
\@LN{308}{5}
\@LN{309}{5}
\@LN{310}{5}
\@LN{311}{5}
\@LN{312}{5}
\@LN{313}{5}
\@LN{314}{5}
\@LN{315}{5}
\@LN{316}{5}
\@LN{317}{5}
\@LN{318}{5}
\@LN{319}{5}
\@LN{320}{5}
\@LN{321}{5}
\@LN{322}{5}
\@LN{323}{5}
\@LN{324}{5}
\@LN@col{2}
\@LN{325}{5}
\@LN{326}{5}
\@LN{327}{5}
\@LN{328}{5}
\@LN{329}{5}
\@LN{330}{5}
\@LN{331}{5}
\@LN{332}{5}
\@LN{333}{5}
\@LN{334}{5}
\@LN{335}{5}
\@LN{336}{5}
\@LN{337}{5}
\@LN{338}{5}
\@LN{339}{5}
\@LN{340}{5}
\@LN{341}{5}
\@LN{342}{5}
\@LN{343}{5}
\@LN{344}{5}
\@LN{345}{5}
\@LN{346}{5}
\@LN{347}{5}
\@LN{348}{5}
\@LN{349}{5}
\@LN{350}{5}
\bibdata{ref.bib}
\bibcite{andreas2018measuring}{{1}{2018}{{Andreas}}{{}}}
\bibcite{bogin2018emergence}{{2}{2018}{{Bogin, Geva, and Berant}}{{}}}
\bibcite{chaabouni2020compositionality}{{3}{2020}{{Chaabouni et~al.}}{{Chaabouni, Kharitonov, Bouchacourt, Dupoux, and Baroni}}}
\bibcite{chaabouni-etal-2019-word}{{4}{2019}{{Chaabouni et~al.}}{{Chaabouni, Kharitonov, Lazaric, Dupoux, and Baroni}}}
\bibcite{choi2018compositional}{{5}{2018}{{Choi, Lazaridou, and de~Freitas}}{{}}}
\bibcite{david1969convention}{{6}{1969}{{David}}{{}}}
\bibcite{evtimova2018emergent}{{7}{2018}{{Evtimova et~al.}}{{Evtimova, Drozdov, Kiela, and Cho}}}
\bibcite{jaques2019social}{{8}{2019}{{Jaques et~al.}}{{Jaques, Lazaridou, Hughes, Gulcehre, Ortega, Strouse, Leibo, and De~Freitas}}}
\bibcite{kharitonov2019egg}{{9}{2019}{{Kharitonov et~al.}}{{Kharitonov, Chaabouni, Bouchacourt, and Baroni}}}
\bibcite{kirby2015compression}{{10}{2015}{{Kirby et~al.}}{{Kirby, Tamariz, Cornish, and Smith}}}
\bibcite{kottur-etal-2017-natural}{{11}{2017}{{Kottur et~al.}}{{Kottur, Moura, Lee, and Batra}}}
\bibcite{labash2020perspective}{{12}{2020}{{Labash et~al.}}{{Labash, Aru, Matiisen, Tampuu, and Vicente}}}
\bibcite{lazaridou2018emergence}{{13}{2018}{{Lazaridou et~al.}}{{Lazaridou, Hermann, Tuyls, and Clark}}}
\bibcite{li2019ease}{{14}{2019}{{Li and Bowling}}{{}}}
\bibcite{mordatch2017emergence}{{15}{2017}{{Mordatch and Abbeel}}{{}}}
\@LN@col{1}
\newlabel{fig:bench}{{9}{7}}
\@LN{351}{6}
\@LN{352}{6}
\@LN{353}{6}
\@LN{354}{6}
\@LN{355}{6}
\@LN{356}{6}
\@LN{357}{6}
\@LN{358}{6}
\@LN{359}{6}
\@LN{360}{6}
\@LN{361}{6}
\@LN{362}{6}
\@LN{363}{6}
\@LN{364}{6}
\@LN{365}{6}
\@LN{366}{6}
\@LN{367}{6}
\@LN{368}{6}
\newlabel{sec:con}{{}{7}}
\@LN{369}{6}
\@LN{370}{6}
\@LN{371}{6}
\@LN{372}{6}
\@LN{373}{6}
\@LN{374}{6}
\@LN{375}{6}
\@LN{376}{6}
\@LN{377}{6}
\@LN{378}{6}
\@LN{379}{6}
\@LN{380}{6}
\@LN{381}{6}
\@LN{382}{6}
\@LN{383}{6}
\@LN@col{2}
\@LN{384}{6}
\@LN{385}{6}
\@LN{386}{6}
\@LN{387}{6}
\@LN{388}{6}
\@LN{389}{6}
\@LN{390}{6}
\@LN{391}{6}
\@LN{392}{6}
\@LN{393}{6}
\@LN{394}{6}
\@LN{395}{6}
\@LN{396}{6}
\@LN{397}{6}
\@LN{398}{6}
\@LN{399}{6}
\@LN{400}{6}
\@LN{401}{6}
\@LN{402}{6}
\@LN{403}{6}
\@LN{404}{6}
\@LN{405}{6}
\@LN{406}{6}
\@LN{407}{6}
\@LN{408}{6}
\@LN{409}{6}
\@LN{410}{6}
\@LN{411}{6}
\@LN{412}{6}
\@LN{413}{6}
\@LN{414}{6}
\@LN{415}{6}
\@LN{416}{6}
\@LN{417}{6}
\@LN{418}{6}
\@LN{419}{6}
\@LN{420}{6}
\@LN{421}{6}
\@LN{422}{6}
\@LN{423}{6}
\@LN{424}{6}
\@LN{425}{6}
\@LN{426}{6}
\@LN{427}{6}
\@LN{428}{6}
\@LN{429}{6}
\@LN{430}{6}
\@LN{431}{6}
\@LN{432}{6}
\@LN{433}{6}
\@LN{434}{6}
\@LN{435}{6}
\@LN{436}{6}
\@LN{437}{6}
\@LN{438}{6}
\bibcite{mul2019mastering}{{16}{2019}{{Mul, Bouchacourt, and Bruni}}{{}}}
\@LN@col{1}
\@LN{439}{7}
\@LN{440}{7}
\@LN{441}{7}
\@LN{442}{7}
\@LN{443}{7}
\@LN{444}{7}
\@LN@col{2}
\bibcite{choi2018compositional}{{4}{2018}{{Choi, Lazaridou, and de~Freitas}}{{}}}
\bibcite{david1969convention}{{5}{1969}{{David}}{{}}}
\bibcite{evtimova2018emergent}{{6}{2018}{{Evtimova et~al.}}{{Evtimova, Drozdov, Kiela, and Cho}}}
\bibcite{jaques2019social}{{7}{2019}{{Jaques et~al.}}{{Jaques, Lazaridou, Hughes, Gulcehre, Ortega, Strouse, Leibo, and De~Freitas}}}
\bibcite{kharitonov2019egg}{{8}{2019}{{Kharitonov et~al.}}{{Kharitonov, Chaabouni, Bouchacourt, and Baroni}}}
\bibcite{kottur-etal-2017-natural}{{9}{2017}{{Kottur et~al.}}{{Kottur, Moura, Lee, and Batra}}}
\bibcite{labash2020perspective}{{10}{2020}{{Labash et~al.}}{{Labash, Aru, Matiisen, Tampuu, and Vicente}}}
\bibcite{lazaridou2018emergence}{{11}{2018}{{Lazaridou et~al.}}{{Lazaridou, Hermann, Tuyls, and Clark}}}
\bibcite{li2019ease}{{12}{2019}{{Li and Bowling}}{{}}}
\bibcite{mordatch2017emergence}{{13}{2017}{{Mordatch and Abbeel}}{{}}}
\bibcite{mul2019mastering}{{14}{2019}{{Mul, Bouchacourt, and Bruni}}{{}}}
\newlabel{fig:exp1}{{1}{3}}
\newlabel{sec:exp}{{}{3}}
\providecommand*\caption@xref[2]{\@setref\relax\@undefined{#1}}
\newlabel{tab:exp10}{{1}{3}}
\newlabel{fig:exp2}{{2}{4}}
\begin{thebibliography}{16}
\begin{thebibliography}{14}
\providecommand{\natexlab}[1]{#1}
\providecommand{\url}[1]{\texttt{#1}}
\providecommand{\urlprefix}{URL }
......@@ -25,16 +25,6 @@ Chaabouni, R.; Kharitonov, E.; Bouchacourt, D.; Dupoux, E.; and Baroni, M.
\newblock Compositionality and generalization in emergent languages.
\newblock \emph{arXiv preprint arXiv:2004.09124} .
\bibitem[{Chaabouni et~al.(2019)Chaabouni, Kharitonov, Lazaric, Dupoux, and
Baroni}]{chaabouni-etal-2019-word}
Chaabouni, R.; Kharitonov, E.; Lazaric, A.; Dupoux, E.; and Baroni, M. 2019.
\newblock Word-order Biases in Deep-agent Emergent Communication.
\newblock In \emph{Proceedings of the 57th Annual Meeting of the Association
for Computational Linguistics}, 5166--5175. Florence, Italy: Association for
Computational Linguistics.
\newblock \doi{10.18653/v1/P19-1509}.
\newblock \urlprefix\url{https://www.aclweb.org/anthology/P19-1509}.
\bibitem[{Choi, Lazaridou, and de~Freitas(2018)}]{choi2018compositional}
Choi, E.; Lazaridou, A.; and de~Freitas, N. 2018.
\newblock Compositional Obverter Communication Learning from Raw Visual Input.
......@@ -67,13 +57,6 @@ Kharitonov, E.; Chaabouni, R.; Bouchacourt, D.; and Baroni, M. 2019.
Natural Language Processing and the 9th International Joint Conference on
Natural Language Processing (EMNLP-IJCNLP): System Demonstrations}, 55--60.
\bibitem[{Kirby et~al.(2015)Kirby, Tamariz, Cornish, and
Smith}]{kirby2015compression}
Kirby, S.; Tamariz, M.; Cornish, H.; and Smith, K. 2015.
\newblock Compression and communication in the cultural evolution of linguistic
structure.
\newblock \emph{Cognition} 141: 87--102.
\bibitem[{Kottur et~al.(2017)Kottur, Moura, Lee, and
Batra}]{kottur-etal-2017-natural}
Kottur, S.; Moura, J.; Lee, S.; and Batra, D. 2017.
......
......@@ -3,46 +3,44 @@ Capacity: max_strings=35307, hash_size=35307, hash_prime=30011
The top-level auxiliary file: AAAI.aux
The style file: aaai21.bst
Database file #1: ref.bib.bib
Warning--I didn't find a database entry for ""
You've used 16 entries,
You've used 14 entries,
2951 wiz_defined-function locations,
681 strings with 8239 characters,
and the built_in function-call counts, 11790 in all, are:
= -- 939
> -- 615
665 strings with 7687 characters,
and the built_in function-call counts, 10036 in all, are:
= -- 793
> -- 525
< -- 0
+ -- 223
- -- 208
* -- 784
:= -- 1853
add.period$ -- 70
call.type$ -- 16
change.case$ -- 140
chr.to.int$ -- 17
cite$ -- 16
duplicate$ -- 853
empty$ -- 835
format.name$ -- 249
if$ -- 2356
+ -- 190
- -- 177
* -- 658
:= -- 1586
add.period$ -- 59
call.type$ -- 14
change.case$ -- 121
chr.to.int$ -- 15
cite$ -- 14
duplicate$ -- 731
empty$ -- 713
format.name$ -- 212
if$ -- 2001
int.to.chr$ -- 1
int.to.str$ -- 1
missing$ -- 174
newline$ -- 92
num.names$ -- 64
pop$ -- 477
missing$ -- 147
newline$ -- 80
num.names$ -- 56
pop$ -- 416
preamble$ -- 1
purify$ -- 122
purify$ -- 105
quote$ -- 0
skip$ -- 404
skip$ -- 351
stack$ -- 0
substring$ -- 435
swap$ -- 400
substring$ -- 351
swap$ -- 335
text.length$ -- 0
text.prefix$ -- 0
top$ -- 0
type$ -- 144
type$ -- 126
warning$ -- 0
while$ -- 84
while$ -- 71
width$ -- 0
write$ -- 217
(There was 1 warning)
write$ -- 186
This is pdfTeX, Version 3.1415926-2.5-1.40.14 (TeX Live 2013) (format=pdflatex 2019.7.31) 17 SEP 2020 00:22
This is pdfTeX, Version 3.1415926-2.5-1.40.14 (TeX Live 2013) (format=pdflatex 2019.7.31) 17 SEP 2020 01:50
entering extended mode
restricted \write18 enabled.
%&-line parsing enabled.
......@@ -240,11 +240,7 @@ Package: algorithmic 2009/08/24 v0.1 Document Style `algorithmic'
\ALC@tlm=\skip50
\algorithmicindent=\skip51
)
(./AAAI.aux
LaTeX Warning: Label `eq:mis2' multiply defined.
)
(./AAAI.aux)
\openout1 = `AAAI.aux'.
LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 172.
......@@ -328,28 +324,9 @@ e
Package caption Info: Begin \AtBeginDocument code.
Package caption Info: float package is loaded.
Package caption Info: End \AtBeginDocument code.
\titlearea=\box33
LaTeX Font Info: Font shape `OT1/ptm/bx/n' in size <14.4> not available
(Font) Font shape `OT1/ptm/b/n' tried instead on input line 175.
LaTeX Font Info: Try loading font information for U+msa on input line 175.
(/usr/share/texlive/texmf-dist/tex/latex/amsfonts/umsa.fd
File: umsa.fd 2013/01/14 v3.01 AMS symbols A
)
LaTeX Font Info: Try loading font information for U+msb on input line 175.
(/usr/share/texlive/texmf-dist/tex/latex/amsfonts/umsb.fd
File: umsb.fd 2013/01/14 v3.01 AMS symbols B
)
LaTeX Font Info: Font shape `OT1/ptm/bx/n' in size <12> not available
(Font) Font shape `OT1/ptm/b/n' tried instead on input line 175.
\actualheight=\skip52
Underfull \vbox (badness 10000) detected at line 175
[]
LaTeX Font Info: Try loading font information for OMS+ptm on input line 175.
(/usr/share/texlive/texmf-dist/tex/latex/psnfss/omsptm.fd
File: omsptm.fd
)
......@@ -357,188 +334,90 @@ LaTeX Font Info: Font shape `OMS/ptm/m/n' in size <9> not available
(Font) Font shape `OMS/cmsy/m/n' tried instead on input line 175.
LaTeX Font Info: Font shape `OT1/ptm/bx/n' in size <10> not available
(Font) Font shape `OT1/ptm/b/n' tried instead on input line 177.
LaTeX Font Info: Try loading font information for OT1+phv on input line 186.
(./tex/introduction.tex
LaTeX Font Info: Font shape `OT1/ptm/bx/n' in size <12> not available
(Font) Font shape `OT1/ptm/b/n' tried instead on input line 1.
LaTeX Warning: Reference `fig:induction' on page 1 undefined on input line 80.
(/usr/share/texlive/texmf-dist/tex/latex/psnfss/ot1phv.fd
File: ot1phv.fd 2001/06/04 scalable font definitions for OT1/phv.
LaTeX Font Info: Try loading font information for U+msa on input line 85.
(/usr/share/texlive/texmf-dist/tex/latex/amsfonts/umsa.fd
File: umsa.fd 2013/01/14 v3.01 AMS symbols A
)
(./tex/introduction.tex
LaTeX Font Info: Try loading font information for U+msb on input line 85.
pdfTeX warning: pdflatex (file ./fig/Figure1_motivation.pdf): PDF inclusion: fo
und PDF version <1.7>, but at most version <1.5> allowed
<fig/Figure1_motivation.pdf, id=1, 490.85182pt x 310.29929pt>
File: fig/Figure1_motivation.pdf Graphic file (type pdf)
(/usr/share/texlive/texmf-dist/tex/latex/amsfonts/umsb.fd
File: umsb.fd 2013/01/14 v3.01 AMS symbols B
)
LaTeX Font Info: Font shape `OMS/ptm/m/n' in size <10> not available
(Font) Font shape `OMS/cmsy/m/n' tried instead on input line 212.
<use fig/Figure1_motivation.pdf>
Package pdftex.def Info: fig/Figure1_motivation.pdf used on input line 42.
(pdftex.def) Requested size: 239.39438pt x 151.33775pt.
Underfull \hbox (badness 1237) in paragraph at lines 47--47
[]\OT1/ptm/m/n/10 Figure 1: |The dis-tri-bu-tion of com-po-si-tion-al-ity for 1
00
[]
LaTeX Warning: Reference `sec:relatedwork' on page 1 undefined on input line 22
8.
LaTeX Font Info: Font shape `OT1/ptm/bx/n' in size <9> not available
(Font) Font shape `OT1/ptm/b/n' tried instead on input line 69.
Underfull \hbox (badness 1728) in paragraph at lines 80--92
[]\OT1/ptm/m/n/10 Figure 1[] re-ports the com-po-si-tion-al-ity when train-ing
[]
LaTeX Warning: Reference `sec:thory' on page 1 undefined on input line 229.
Underfull \vbox (badness 1796) has occurred while \output is active []
LaTeX Warning: Reference `sec:mis' on page 1 undefined on input line 230.
[1{/usr/share/texlive/texmf-var/fonts/map/pdftex/updmap/pdftex.map}
LaTeX Warning: Reference `sec:con' on page 1 undefined on input line 234.
) [1
<./fig/Figure1_motivation.pdf>]
LaTeX Font Info: Font shape `OMS/ptm/m/n' in size <10> not available
(Font) Font shape `OMS/cmsy/m/n' tried instead on input line 212.
) (./tex/relatedwork.tex [2]) (./tex/theory.tex
<fig/Figure2_The_referential_game_environment.pdf, id=44, 870.65277pt x 413.635
33pt>
File: fig/Figure2_The_referential_game_environment.pdf Graphic file (type pdf)
<use fig/Figure2_The_referential_game_environment.pdf>
Package pdftex.def Info: fig/Figure2_The_referential_game_environment.pdf used
on input line 6.
(pdftex.def) Requested size: 239.39438pt x 113.73428pt.
pdfTeX warning: pdflatex (file ./fig/Figure3_The_architecture_of_agents.pdf): P
DF inclusion: found PDF version <1.7>, but at most version <1.5> allowed
<fig/Figure3_The_architecture_of_agents.pdf, id=45, 785.10117pt x 253.55226pt>
File: fig/Figure3_The_architecture_of_agents.pdf Graphic file (type pdf)
<use fig/Figure3_The_architecture_of_agents.pdf>
Package pdftex.def Info: fig/Figure3_The_architecture_of_agents.pdf used on inp
ut line 13.
(pdftex.def) Requested size: 430.9106pt x 139.164pt.
LaTeX Font Info: Font shape `OT1/ptm/bx/n' in size <10.95> not available
(Font) Font shape `OT1/ptm/b/n' tried instead on input line 61.
[3 <./fig/Figure2_The_referential_game_environment.pdf>]) (./tex/theory2.tex
<fig/Figure4_The_information_channel.pdf, id=84, 362.42403pt x 53.24893pt>
File: fig/Figure4_The_information_channel.pdf Graphic file (type pdf)
<use fig/Figure4_The_information_channel.pdf>
Package pdftex.def Info: fig/Figure4_The_information_channel.pdf used on input
line 8.
(pdftex.def) Requested size: 239.39438pt x 35.17284pt.
<fig/Figure5_An_emergent_language.pdf, id=85, 265.05022pt x 202.96829pt>
File: fig/Figure5_An_emergent_language.pdf Graphic file (type pdf)
<use fig/Figure5_An_emergent_language.pdf>
Package pdftex.def Info: fig/Figure5_An_emergent_language.pdf used on input lin
e 16.
(pdftex.def) Requested size: 191.51622pt x 146.65735pt.
[4 <./fig/Figure3_The_architecture_of_agents.pdf> <./fig/Figure4_The_informati
on_channel.pdf
pdfTeX warning: pdflatex (file ./fig/Figure4_The_information_channel.pdf): PDF
inclusion: multiple pdfs with page group included in a single page
> <./fig/Figure5_An_emergent_language.pdf
pdfTeX warning: pdflatex (file ./fig/Figure5_An_emergent_language.pdf): PDF inc
lusion: multiple pdfs with page group included in a single page
>]
<fig/Figure6_Compostionality_of_symbolic_language.pdf, id=195, 1142.2675pt x 66
4.4825pt>
File: fig/Figure6_Compostionality_of_symbolic_language.pdf Graphic file (type p
df)
<use fig/Figure6_Compostionality_of_symbolic_language.pdf>
Package pdftex.def Info: fig/Figure6_Compostionality_of_symbolic_language.pdf u
sed on input line 44.
(pdftex.def) Requested size: 237.00174pt x 137.86234pt.
)
(./tex/experiments.tex
<fig/Figure7_The_ratio_of_high_compositional_language.pdf, id=196, 1161.33875pt
x 614.295pt>
File: fig/Figure7_The_ratio_of_high_compositional_language.pdf Graphic file (ty
pe pdf)
<use fig/Figure7_The_ratio_of_high_compositional_language.pdf>
Package pdftex.def Info: fig/Figure7_The_ratio_of_high_compositional_language.p
df used on input line 8.
(pdftex.def) Requested size: 237.00174pt x 125.35951pt.
pdfTeX warning: pdflatex (file ./fig/Figure9.pdf): PDF inclusion: found PDF ver
sion <1.7>, but at most version <1.5> allowed
<fig/Figure9.pdf, id=197, 1680.31058pt x 979.6891pt>
File: fig/Figure9.pdf Graphic file (type pdf)
<use fig/Figure9.pdf>
Package pdftex.def Info: fig/Figure9.pdf used on input line 16.
(pdftex.def) Requested size: 505.89pt x 294.95541pt.
pdfTeX warning: pdflatex (file ./fig/Figure8_Three_artificial_languages_with_di
fferent_MIS.pdf): PDF inclusion: found PDF version <1.7>, but at most version <
1.5> allowed
<fig/Figure8_Three_artificial_languages_with_different_MIS.pdf, id=198, 155.715
76pt x 145.69933pt>
File: fig/Figure8_Three_artificial_languages_with_different_MIS.pdf Graphic fil
e (type pdf)
<use fig/Figure8_Three_artificial_languages_with_different_MIS.pdf>
Package pdftex.def Info: fig/Figure8_Three_artificial_languages_with_different_
MIS.pdf used on input line 64.
(pdftex.def) Requested size: 191.51622pt x 179.20909pt.
[5 <./fig/Figure6_Compostionality_of_symbolic_language.pdf> <./fig/Figure7_The
_ratio_of_high_compositional_language.pdf
pdfTeX warning: pdflatex (file ./fig/Figure7_The_ratio_of_high_compositional_la
nguage.pdf): PDF inclusion: multiple pdfs with page group included in a single
page
>] [6 <./fig/Figure9.pdf>]) (./tex/last.tex) (./AAAI.bbl
Underfull \hbox (badness 5726) in paragraph at lines 23--27
\OT1/ptm/m/n/10 er-al-iza-tion in emer-gent lan-guages. \OT1/ptm/m/it/10 arXiv
preprint
[]
[7 <./fig/Figure8_Three_artificial_languages_with_different_MIS.pdf>]) [8
]
(./AAAI.aux)
{/usr/share/texlive/texmf-var/fonts/map/pdftex/updmap/pdftex.map}]
(./AAAI.bbl) [2] (./tex/appendix.tex
<fig/Appendix_Figure1_MIS.pdf, id=16, 1336.995pt x 758.835pt>
File: fig/Appendix_Figure1_MIS.pdf Graphic file (type pdf)
<use fig/Appendix_Figure1_MIS.pdf>
Package pdftex.def Info: fig/Appendix_Figure1_MIS.pdf used on input line 41.
(pdftex.def) Requested size: 500.83388pt x 284.26138pt.
<fig/Appendix_Figure2_Ratio.pdf, id=17, 1336.995pt x 758.835pt>
File: fig/Appendix_Figure2_Ratio.pdf Graphic file (type pdf)
<use fig/Appendix_Figure2_Ratio.pdf>
Package pdftex.def Info: fig/Appendix_Figure2_Ratio.pdf used on input line 51.
(pdftex.def) Requested size: 500.83388pt x 284.26138pt.
[3 <./fig/Appendix_Figure1_MIS.pdf>])
[4 <./fig/Appendix_Figure2_Ratio.pdf>] (./AAAI.aux)
LaTeX Warning: There were multiply-defined labels.
LaTeX Warning: There were undefined references.
)
Here is how much of TeX's memory you used:
4792 strings out of 495063
71689 string characters out of 3182201
147051 words of memory out of 3000000
7842 multiletter control sequences out of 15000+200000
28635 words of font info for 63 fonts, out of 3000000 for 9000
4603 strings out of 495063
68172 string characters out of 3182201
135258 words of memory out of 3000000
7681 multiletter control sequences out of 15000+200000
20476 words of font info for 52 fonts, out of 3000000 for 9000
14 hyphenation exceptions out of 8191
38i,14n,38p,808b,637s stack positions out of 5000i,500n,10000p,200000b,50000s
38i,11n,38p,367b,287s stack positions out of 5000i,500n,10000p,200000b,50000s
{/usr/share/texlive/texmf-dist/fonts/enc/dvips/base/8r.enc}</usr/share/texliv
e/texmf-dist/fonts/type1/public/amsfonts/cm/cmex10.pfb></usr/share/texlive/texm
f-dist/fonts/type1/public/amsfonts/cm/cmmi10.pfb></usr/share/texlive/texmf-dist
/fonts/type1/public/amsfonts/cm/cmmi5.pfb></usr/share/texlive/texmf-dist/fonts/
type1/public/amsfonts/cm/cmmi6.pfb></usr/share/texlive/texmf-dist/fonts/type1/p
ublic/amsfonts/cm/cmmi7.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/a
msfonts/cm/cmmi9.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts
/cm/cmr10.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmr
6.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmr7.pfb></
usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmr9.pfb></usr/shar
e/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmsy10.pfb></usr/share/texl
ive/texmf-dist/fonts/type1/public/amsfonts/cm/cmsy6.pfb></usr/share/texlive/tex
mf-dist/fonts/type1/public/amsfonts/cm/cmsy7.pfb></usr/share/texlive/texmf-dist
/fonts/type1/public/amsfonts/cm/cmsy9.pfb></usr/share/texlive/texmf-dist/fonts/
type1/public/amsfonts/cm/cmti10.pfb></usr/share/texlive/texmf-dist/fonts/type1/
public/amsfonts/cm/cmti7.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/
amsfonts/cm/cmti9.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/amsfont
s/symbols/msbm10.pfb></usr/share/texlive/texmf-dist/fonts/type1/urw/helvetic/uh
vr8a.pfb></usr/share/texlive/texmf-dist/fonts/type1/urw/times/utmb8a.pfb></usr/
share/texlive/texmf-dist/fonts/type1/urw/times/utmr8a.pfb></usr/share/texlive/t
exmf-dist/fonts/type1/urw/times/utmri8a.pfb>
Output written on AAAI.pdf (8 pages, 715409 bytes).
e/texmf-dist/fonts/type1/public/amsfonts/cm/cmmi10.pfb></usr/share/texlive/texm
f-dist/fonts/type1/public/amsfonts/cm/cmmi7.pfb></usr/share/texlive/texmf-dist/
fonts/type1/public/amsfonts/cm/cmmi9.pfb></usr/share/texlive/texmf-dist/fonts/t
ype1/public/amsfonts/cm/cmr10.pfb></usr/share/texlive/texmf-dist/fonts/type1/pu
blic/amsfonts/cm/cmr6.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/ams
fonts/cm/cmr7.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm
/cmr9.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmsy10.
pfb></usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmsy6.pfb></u
sr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmsy9.pfb></usr/shar
e/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmti10.pfb></usr/share/texl
ive/texmf-dist/fonts/type1/public/amsfonts/cm/cmti7.pfb></usr/share/texlive/tex
mf-dist/fonts/type1/public/amsfonts/cm/cmti9.pfb></usr/share/texlive/texmf-dist
/fonts/type1/urw/times/utmb8a.pfb></usr/share/texlive/texmf-dist/fonts/type1/ur
w/times/utmr8a.pfb></usr/share/texlive/texmf-dist/fonts/type1/urw/times/utmri8a
.pfb>
Output written on AAAI.pdf (4 pages, 184806 bytes).
PDF statistics:
395 PDF objects out of 1000 (max. 8388607)
224 compressed objects within 3 object streams
166 PDF objects out of 1000 (max. 8388607)
79 compressed objects within 1 object stream
0 named destinations out of 1000 (max. 500000)
46 words of extra memory for PDF output out of 10000 (max. 10000000)
11 words of extra memory for PDF output out of 10000 (max. 10000000)
No preview for this file type
......@@ -27,7 +27,8 @@
\setlength\headheight{0pt} \setlength\headsep{0pt}
%\setlength\footheight{0pt} \setlength\footskip{0pt}
\thispagestyle{empty} \pagestyle{empty}
\flushbottom \twocolumn \sloppy
%\flushbottom \twocolumn \sloppy
\flushbottom \sloppy
% We're never going to need a table of contents, so just flush it to
% save space --- suggested by drstrip@sandia-2
\def\addcontentsline#1#2#3{}
......@@ -47,7 +48,8 @@ All rights reserved.}
\def\thefootnote{\fnsymbol{footnote}}
% gf: Don't see why we'd want the footnotemark to be 0pt wide
%\def\@makefnmark{\hbox to 0pt{$^{\@thefnmark}$\hss}}
\twocolumn[\@maketitle] \@thanks
%\twocolumn[\@maketitle] \@thanks
\@thanks
\endgroup
% gf: Insert copyright slug unless turned off
\if T\copyright@on\insert\footins{\noindent\footnotesize\copyright@text}\fi
......
......@@ -170,7 +170,7 @@
\DeclareMathOperator*{\argmax}{arg\,max}
\begin{document}
\linenumbers
%\linenumbers
\maketitle
......@@ -229,17 +229,17 @@ compositional language with a higher probability.}
\end{abstract}
\input{tex/introduction.tex}
\input{tex/relatedwork.tex}
\input{tex/theory.tex}
\input{tex/theory2.tex}
\input{tex/experiments.tex}
\input{tex/last.tex}
%\input{tex/relatedwork.tex}
%\input{tex/theory.tex}
%\input{tex/theory2.tex}
%\input{tex/experiments.tex}
%\input{tex/last.tex}
%\clearpage
%\newpage
\bibliography{ref.bib}
%\newpage
%\input{tex/appendix.tex}
\newpage
\input{tex/appendix.tex}
\end{document}
......@@ -37,39 +37,39 @@ vocabulary can express almost infinite concepts.}
%extract information from a single symbol.
%
%
\begin{figure}[t]
\centering
\includegraphics[width=\columnwidth]{fig/Figure1_motivation.pdf}
\caption{The distribution of compositionality for 100 emerged symbolic
languages without
any induction. It can be observed that high compositional symbolic language
seldom emerged (e.g., $<5\%$ for compositionality $>0.99$). Moreover, varying
the vocabulary size does not affect the compositionality notably.}
\label{fig:induction}
\end{figure}
\begin{table*}[t]
\centering
\small
\caption{Handcrafted inductions in related works.}
\label{tab:rel}
\begin{tabular}{llllll}
\toprule
Works & Handcrafted induction & Compositionality\\
\midrule
\cite{kirby2015compression}&Expressivity and compressibility&Not quantitative, Speaker\\
\cite{kottur-etal-2017-natural}&Listener's memory&Not quantitative, Speaker\\
\cite{choi2018compositional}&Maximum message length&Not quantitative, Speaker+Listener\\
\cite{lazaridou2018emergence}&Structure of input data&Quantitative, Speaker\\
\cite{evtimova2018emergent}&Multi-modal scenarios&Quantitative, Speaker\\
\cite{li2019ease}&Population size, resetting all listeners&Quantitative, Speaker\\
\cite{chaabouni-etal-2019-word}&Word-order constraints&Not quantitative, Speaker\\
\cite{chaabouni2020compositionality}&Easier to decode&Quantitative, Speaker\\
\textbf{Ours} & \textbf{None} & \textbf{Quantitative, Speaker+Listener} \\
\bottomrule
\end{tabular}
\end{table*}
%\begin{figure}[t]
% \centering
% \includegraphics[width=\columnwidth]{fig/Figure1_motivation.pdf}
% \caption{The distribution of compositionality for 100 emerged symbolic
% languages without
% any induction. It can be observed that high compositional symbolic language
% seldom emerged (e.g., $<5\%$ for compositionality $>0.99$). Moreover, varying
% the vocabulary size does not affect the compositionality notably.}
% \label{fig:induction}
% \end{figure}
%\begin{table*}[t]
% \centering
% \small
% \caption{Handcrafted inductions in related works.}
% \label{tab:rel}
% \begin{tabular}{llllll}
% \toprule
% Works & Handcrafted induction & Compositionality\\
% \midrule
% \cite{kirby2015compression}&Expressivity and compressibility&Not quantitative, Speaker\\
% \cite{kottur-etal-2017-natural}&Listener's memory&Not quantitative, Speaker\\
% \cite{choi2018compositional}&Maximum message length&Not quantitative, Speaker+Listener\\
% \cite{lazaridou2018emergence}&Structure of input data&Quantitative, Speaker\\
% \cite{evtimova2018emergent}&Multi-modal scenarios&Quantitative, Speaker\\
% \cite{li2019ease}&Population size, resetting all listeners&Quantitative, Speaker\\
% \cite{chaabouni-etal-2019-word}&Word-order constraints&Not quantitative, Speaker\\
% \cite{chaabouni2020compositionality}&Easier to decode&Quantitative, Speaker\\
% \textbf{Ours} & \textbf{None} & \textbf{Quantitative, Speaker+Listener} \\
% \bottomrule
% \end{tabular}
% \end{table*}
Prior studies focus on achieving high compositional symbolic language
through \emph{deliberately handcrafted} inductions, e.g., additional rewards~\cite{mordatch2017emergence},
......
%%%% ijcai21-multiauthor.tex
\typeout{IJCAI--21 Multiple authors example}
% These are the instructions for authors for IJCAI-21.
\documentclass{article}
\pdfpagewidth=8.5in
\pdfpageheight=11in
% The file ijcai21.sty is NOT the same than previous years'
\usepackage{ijcai21}
% Use the postscript times font!
\usepackage{times}
\renewcommand*\ttdefault{txtt}
\usepackage{soul}
\usepackage{url}
\usepackage[hidelinks]{hyperref}
\usepackage[utf8]{inputenc}
\usepackage[small]{caption}
\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{booktabs}
\urlstyle{same}
% the following package is optional:
%\usepackage{latexsym}
% Following comment is from ijcai97-submit.tex:
% The preparation of these files was supported by Schlumberger Palo Alto
% Research, AT\&T Bell Laboratories, and Morgan Kaufmann Publishers.
% Shirley Jowell, of Morgan Kaufmann Publishers, and Peter F.
% Patel-Schneider, of AT\&T Bell Laboratories collaborated on their
% preparation.
% These instructions can be modified and used in other conferences as long
% as credit to the authors and supporting agencies is retained, this notice
% is not changed, and further modification or reuse is not restricted.
% Neither Shirley Jowell nor Peter F. Patel-Schneider can be listed as
% contacts for providing assistance without their prior permission.
% To use for other conferences, change references to files and the
% conference appropriate and use other authors, contacts, publishers, and
% organizations.
% Also change the deadline and address for returning papers and the length and
% page charge instructions.
% Put where the files are available in the appropriate places.
\title{IJCAI--21 Example on typesetting multiple authors}
\author{
First Author$^1$\footnote{Contact Author}\and
Second Author$^2$\and
Third Author$^{2,3}$\And
Fourth Author$^4$\\
\affiliations
$^1$First Affiliation\\
$^2$Second Affiliation\\
$^3$Third Affiliation\\
$^4$Fourth Affiliation\\
\emails
\{first, second\}@example.com,
third@other.example.com,
fourth@example.com
}
\begin{document}
\maketitle
\begin{abstract}
This short example shows a contrived example on how to format the authors' information for {\it IJCAI--21 Proceedings} using \LaTeX{}.
\end{abstract}
\section{Introduction}
This short example shows a contrived example on how to format the authors' information for {\it IJCAI--21 Proceedings}.
\section{Author names}
Each author name must be followed by:
\begin{itemize}
\item A newline {\tt \textbackslash{}\textbackslash{}} command for the last author.
\item An {\tt \textbackslash{}And} command for the second to last author.
\item An {\tt \textbackslash{}and} command for the other authors.
\end{itemize}
\section{Affiliations}
After all authors, start the affiliations section by using the {\tt \textbackslash{}affiliations} command.
Each affiliation must be terminated by a newline {\tt \textbackslash{}\textbackslash{}} command. Make sure that you include the newline on the last affiliation too.
\section{Mapping authors to affiliations}
If some scenarios, the affiliation of each author is clear without any further indication (\emph{e.g.}, all authors share the same affiliation, all authors have a single and different affiliation). In these situations you don't need to do anything special.
In more complex scenarios you will have to clearly indicate the affiliation(s) for each author. This is done by using numeric math superscripts {\tt \$\{\^{}$i,j, \ldots$\}\$}. You must use numbers, not symbols, because those are reserved for footnotes in this section (should you need them). Check the authors definition in this example for reference.
\section{Emails}
This section is optional, and can be omitted entirely if you prefer. If you want to include e-mails, you should either include all authors' e-mails or just the contact author(s)' ones.
Start the e-mails section with the {\tt \textbackslash{}emails} command. After that, write all emails you want to include separated by a comma and a space, following the same order used for the authors (\emph{i.e.}, the first e-mail should correspond to the first author, the second e-mail to the second author and so on).
You may ``contract" consecutive e-mails on the same domain as shown in this example (write the users' part within curly brackets, followed by the domain name). Only e-mails of the exact same domain may be contracted. For instance, you cannot contract ``person@example.com" and ``other@test.example.com" because the domains are different.
\end{document}
@book{ abelson-et-al:scheme,
author = "Harold Abelson and Gerald~Jay Sussman and Julie Sussman",
title = "Structure and Interpretation of Computer Programs",
publisher = "MIT Press",
address = "Cambridge, Massachusetts",
year = "1985"
}
@inproceedings{ bgf:Lixto,
author = "Robert Baumgartner and Georg Gottlob and Sergio Flesca",
title = "Visual Information Extraction with {Lixto}",
booktitle = "Proceedings of the 27th International Conference on Very Large Databases",
pages = "119--128",
publisher = "Morgan Kaufmann",
address = "Rome, Italy",
month = "September",
year = "2001"
}
@article{ brachman-schmolze:kl-one,
author = "Ronald~J. Brachman and James~G. Schmolze",
title = "An overview of the {KL-ONE} knowledge representation system",
journal = "Cognitive Science",
volume = "9",
number = "2",
pages = "171--216",
month = "April--June",
year = "1985"
}
@article{ gottlob:nonmon,
author = "Georg Gottlob",
title = "Complexity results for nonmonotonic logics",
journal = "Journal of Logic and Computation",
volume = "2",
number = "3",
pages = "397--425",
month = "June",
year = "1992"
}
@article{ gls:hypertrees,
author = "Georg Gottlob and Nicola Leone and Francesco Scarcello",
title = "Hypertree Decompositions and Tractable Queries",
journal = "Journal of Computer and System Sciences",
volume = "64",
number = "3",
pages = "579--627",
month = "May",
year = "2002"
}
@article{ levesque:functional-foundations,
author = "Hector~J. Levesque",
title = "Foundations of a functional approach to knowledge representation",
journal = "Artificial Intelligence",
volume = "23",
number = "2",
pages = "155--212",
month = "July",
year = "1984"
}
@inproceedings{ levesque:belief,
author = "Hector~J. Levesque",
title = "A logic of implicit and explicit belief",
booktitle = "Proceedings of the Fourth National Conference on Artificial Intelligence",
publisher = "American Association for Artificial Intelligence",
pages = "198--202",
address = "Austin, Texas",
month = "August",
year = "1984"
}
@article{ nebel:jair-2000,
author = "Bernhard Nebel",
title = "On the compilability and expressive power of propositional planning formalisms",
journal = "Journal of Artificial Intelligence Research",
volume = "12",
pages = "271--315",
year = "2000"
}
@misc{proceedings,
author = {{IJCAI Proceedings}},
title = {{IJCAI} Camera Ready Submission},
howpublished = {\url{https://proceedings.ijcai.org/info}},
}
\typeout{Conference Style, version of November 2018}
% All bug reports should be directed to proceedings@ijcai.org
% The following comments are from the original ijcai97.sty
% The current two-column conference style.
% Heavily adapted from the IJCAI-89 original style.
% Fixes from various people incorporated up to the IJCAI-95 style.
% Some major changes for the IJCAI-2018 edition
% To use, place in a file called conference.sty, or whatever your conference
% is called, in the TeX search path. (Placing it in the same directory as
% the paper should also work.)
% Prepared by Peter F. Patel-Schneider,
% liberally using the ideas of
% other style hackers, including Barbara Beeton.
% This style is NOT guaranteed to work. It is provided in the hope
% that it will make the preparation of papers easier.
%
% The preparation of this file was supported by Schlumberger Palo Alto
% Research, AT\&T Bell Laboratories, AAAI, and Morgan Kaufmann Publishers.
%
% \pubnote added by J. Scott Penberthy
% These instructions can be modified and used in other conferences as long
% as credit to the authors and supporting agencies is retained, this notice
% is not changed, and further modification or reuse is not restricted.
%
% If you are organizing a conference, and want to use this file, you should
% appoint a contact person to handle any problems!
%
% If you are using this file for the preparation of papers for a
% conference that supplied you with this file, you should contact the
% organizers of the conference if you have any problems. They should have
% much more information than I have.
% There are undoubtably bugs in this style. If you make bug fixes,
% improvements, etc. please let us know at proceedings@ijcai.org.
% NOTE: Some laser printers have a serious problem printing TeX output.
% These printing devices, commonly known as ``write-white'' laser
% printers, tend to make characters too light. To get around this
% problem, a darker set of fonts must be created for these devices.
% Physical page layout
\twocolumn \flushbottom \sloppy
% Note that TeX has built-in 1-inch top and left margins.
\setlength\topmargin{-0.25in}
\setlength\oddsidemargin{-0.25in}
\setlength\evensidemargin{-0.25in}
\setlength\textheight{9.0in}
\setlength\textwidth{7.0in}
\setlength\columnsep{0.25in}
% No pages numbers or other headers or footers
\setlength\headheight{0pt} \setlength\headsep{0pt}
%\setlength\footheight{0pt} \setlength\footskip{0pt}
\thispagestyle{empty} \pagestyle{empty}
% jsp added:
\def\pubnote#1{\thispagestyle{myheadings}
\markboth{#1}{#1}
\def\thepage{}
}
% Less leading in most fonts (due to the narrow columns)
% The choices were between 1-pt and 1.5-pt leading
% \def\@normalsize{\@setsize\normalsize{11pt}\xpt\@xpt} % 10 point on 11
% \def\small{\@setsize\small{10pt}\ixpt\@ixpt} % 9 point on 10
% \def\footnotesize{\@setsize\footnotesize{10pt}\ixpt\@ixpt} % 9 point on 10
% \def\scriptsize{\@setsize\scriptsize{8pt}\viipt\@viipt} % 7 point on 8
% \def\tiny{\@setsize\tiny{7pt}\vipt\@vipt} % 6 point on 7
% \def\large{\@setsize\large{12pt}\xipt\@xipt} % 11 point on 12
% \def\Large{\@setsize\Large{14pt}\xiipt\@xiipt} % 12 point on 14
% \def\LARGE{\@setsize\LARGE{16pt}\xivpt\@xivpt} % 14 point on 16
% \def\huge{\@setsize\huge{20pt}\xviipt\@xviipt} % 17 point on 20
% \def\Huge{\@setsize\Huge{23pt}\xxpt\@xxpt} % 20 point on 23
% latex2e compatibility mode hack - kek@cs.brown.edu 11/10/98
\def\@normalsize{\@setsize\normalsize{11pt}\xpt\@xpt} % 10 point on 11
\def\normalsize{\@setsize\normalsize{11pt}\xpt\@xpt} % 10 point on 11
\def\small{\@setsize\small{10pt}\ixpt\@ixpt} % 9 point on 10
\def\footnotesize{\@setsize\footnotesize{10pt}\ixpt\@ixpt} % 9 point on 10
\def\scriptsize{\@setsize\scriptsize{8pt}\viipt\@viipt} % 7 point on 8
\def\tiny{\@setsize\tiny{7pt}\vipt\@vipt} % 6 point on 7
\def\large{\@setsize\large{12pt}\xipt\@xipt} % 11 point on 12
\def\Large{\@setsize\Large{14pt}\xiipt\@xiipt} % 12 point on 14
\def\LARGE{\@setsize\LARGE{16pt}\xivpt\@xivpt} % 14 point on 16
\def\huge{\@setsize\huge{20pt}\xviipt\@xviipt} % 17 point on 20
\def\Huge{\@setsize\Huge{23pt}\xxpt\@xxpt} % 20 point on 23
% Paragraphs
\parindent 1em
\parskip 0pt plus 1pt
% Title stuff, taken from deproc.
\newlength\titlepad \setlength\titlepad{0in}
\newlength\titlebox \setlength\titlebox{2.25in}
\def\maketitle{\par
\begingroup % to make the footnote style local to the title
\def\thefootnote{\fnsymbol{footnote}}
\def\@makefnmark{$^{\@thefnmark}$}
\twocolumn[\@maketitle] \@thanks
\endgroup
\setcounter{footnote}{0}
\let\maketitle\relax \let\@maketitle\relax
\gdef\@thanks{}\gdef\@author{}\gdef\@title{}\let\thanks\relax}%
%
\def\@maketitle{%
\newsavebox{\titlearea}
\sbox{\titlearea}{
\let\footnote\thanks\relax
\vbox{
\hsize\textwidth \linewidth\hsize%
\vskip 0.5in%
\centering%
{\LARGE\bf \@title \par}%
\vskip 0.1in%
{%
\def\and{\unskip\thinspace{\rm ,}\enspace}%
\def\And{\unskip\enspace{\rm and}\enspace}%
\def\affiliations{%
\egroup\par\Large\bgroup\rm%
}%
\def\emails{%
\egroup\par\Large\bgroup\rm%
}%
\bgroup\Large\bf\@author\egroup%%
}%
\vskip 0.2in%
}
}
\newlength\actualheight
\settoheight{\actualheight}{\usebox{\titlearea}}
\ifdim\actualheight>\titlebox
\setlength{\titlebox}{\actualheight}
\fi
%\setlength{\titlepad}{\dimexpr\titlepad+\titlepad\relax}
\setcounter{footnote}{0}
\vbox to \titlebox {
\def\thanks##1{\footnotemark}\relax
\hsize\textwidth \linewidth\hsize%
\vskip 0.5in%
\centering%
{\LARGE\bf \@title \par}%
\vskip 0.2in plus 4fil minus 0.1in%
{%
\def\and{\unskip\thinspace{\rm ,}\enspace}%
\def\And{\unskip\enspace{\rm and}\enspace}%
\def\affiliations{
\egroup%
\vskip 0.05in minus 0.05in%
\par\bgroup\Large\rm%
}
\def\emails{
\egroup%
\vskip 0.05in minus 0.05in%
\par\bgroup\Large\rm%
}
\bgroup\Large\bf\@author\egroup%
}%
\vskip 0.3in plus 8fil minus 0.1in
}
}
\renewenvironment{abstract}{\centerline{\Large\bf
Abstract}\vspace{0.5ex}\begin{quote}}{\par\end{quote}\vskip 1ex}
% Sections with less space
\def\section{\@startsection{section}{1}{\z@}{-10pt plus
-3pt minus -2pt}{4pt plus 2pt minus 1pt}{\Large\bf\raggedright}}
\def\subsection{\@startsection{subsection}{2}{\z@}{-8pt plus
-2pt minus -2pt}{3pt plus 2pt minus 1pt}{\large\bf\raggedright}}
\def\subsubsection{\@startsection{subsubsection}{3}{\z@}{-6pt plus
-2pt minus -1pt}{1pt plus 1pt minus 1pt}{\normalsize\bf\raggedright}}
\renewcommand\paragraph{\@startsection{paragraph}{4}{\z@}{-4pt plus
-2pt minus -1pt}{-1em}{\normalsize\bf}}
\setcounter{secnumdepth}{2} % Don't number subsubsections
% Footnotes
\footnotesep 6.65pt \skip\footins 9pt plus 4pt minus 2pt
\def\footnoterule{\kern-3pt \hrule width 5pc \kern 2.6pt }
\setcounter{footnote}{0}
% Illustrations (floats)
\floatsep 12pt plus 2pt minus 2pt
\textfloatsep 16pt plus 2pt minus 4pt
\intextsep 12pt plus 2pt minus 2pt
\dblfloatsep 12pt plus 2pt minus 2pt
\dbltextfloatsep 18pt plus 2pt minus 4pt
% Displays
\abovedisplayskip 7pt plus2pt minus5pt%
\belowdisplayskip \abovedisplayskip
\abovedisplayshortskip 0pt plus3pt%
\belowdisplayshortskip 4pt plus3pt minus3pt%
% Lists
\leftmargini 2em
\leftmarginii 2em
\leftmarginiii 1em
\leftmarginiv 0.5em
\leftmarginv 0.5em
\leftmarginvi 0.5em
\leftmargin\leftmargini
\labelsep 5pt
\labelwidth\leftmargini\advance\labelwidth-\labelsep
\def\@listI{\leftmargin\leftmargini
\parsep 2pt plus 1pt minus 0.5pt%
\topsep 4pt plus 1pt minus 2pt%
\itemsep 2pt plus 1pt minus 0.5pt%
\partopsep 1pt plus 0.5pt minus 0.5pt}
\let\@listi\@listI
\@listi
\def\@listii{\leftmargin\leftmarginii
\labelwidth\leftmarginii\advance\labelwidth-\labelsep
\parsep 1pt plus 0.5pt minus 0.5pt
\topsep 2pt plus 1pt minus 0.5pt
\itemsep \parsep}
\def\@listiii{\leftmargin\leftmarginiii
\labelwidth\leftmarginiii\advance\labelwidth-\labelsep
\parsep 0pt plus 1pt
\partopsep 0.5pt plus 0pt minus 0.5pt
\topsep 1pt plus 0.5pt minus 0.5pt
\itemsep \topsep}
\def\@listiv{\leftmargin\leftmarginiv
\labelwidth\leftmarginiv\advance\labelwidth-\labelsep}
\def\@listv{\leftmargin\leftmarginv
\labelwidth\leftmarginv\advance\labelwidth-\labelsep}
\def\@listvi{\leftmargin\leftmarginvi
\labelwidth\leftmarginvi\advance\labelwidth-\labelsep}
% We're never going to need a table of contents, so just flush it to
% save space --- suggested by drstrip@sandia-2
%\def\addcontentsline#1#2#3{}
%%%% named.sty
\typeout{Named Citation Style, version of 30 November 1994}
% This file implements citations for the ``named'' bibliography style.
% Place it in a file called named.sty in the TeX search path. (Placing it
% in the same directory as the LaTeX document should also work.)
% Prepared by Peter F. Patel-Schneider, with the assistance of several,
% since forgotten, LaTeX hackers.
% This style is NOT guaranteed to work. It is provided in the hope
% that it will make the preparation of papers easier.
%
% There are undoubtably bugs in this style. If you make bug fixes,
% improvements, etc. please let me know. My e-mail address is:
% pfps@research.att.com
% The preparation of this file was supported by Schlumberger Palo Alto
% Research and AT\&T Bell Laboratories.
% This file can be modified and used in other conferences as long
% as credit to the authors and supporting agencies is retained, this notice
% is not changed, and further modification or reuse is not restricted.
% The ``named'' bibliography style creates citations with labels like
% \citeauthoryear{author-info}{year}
% these labels are processed by the following commands:
% \cite{keylist}
% which produces citations with both author and year,
% enclosed in square brackets
% \shortcite{keylist}
% which produces citations with year only,
% enclosed in square brackets
% \citeauthor{key}
% which produces the author information only
% \citeyear{key}
% which produces the year information only
\def\leftcite{\@up[}\def\rightcite{\@up]}
\def\cite{\def\citeauthoryear##1##2{\def\@thisauthor{##1}%
\ifx \@lastauthor \@thisauthor \relax \else##1, \fi ##2}\@icite}
\def\shortcite{\def\citeauthoryear##1##2{##2}\@icite}
\def\citeauthor{\def\citeauthoryear##1##2{##1}\@nbcite}
\def\citeyear{\def\citeauthoryear##1##2{##2}\@nbcite}
% internal macro for citations with [] and with breaks between citations
% used in \cite and \shortcite
\def\@icite{\leavevmode\def\@citeseppen{-1000}%
\def\@cite##1##2{\leftcite\nobreak\hskip 0in{##1\if@tempswa , ##2\fi}\rightcite}%
\@ifnextchar [{\@tempswatrue\@citex}{\@tempswafalse\@citex[]}}
% internal macro for citations without [] and with no breaks
% used in \citeauthor and \citeyear
\def\@nbcite{\leavevmode\def\@citeseppen{1000}%
\def\@cite##1##2{{##1\if@tempswa , ##2\fi}}%
\@ifnextchar [{\@tempswatrue\@citex}{\@tempswafalse\@citex[]}}
% don't box citations, separate with ; and a space
% also, make the penalty between citations a parameter,
% it may be a good place to break
\def\@citex[#1]#2{%
\def\@lastauthor{}\def\@citea{}%
\@cite{\@for\@citeb:=#2\do
{\@citea\def\@citea{;\penalty\@citeseppen\ }%
\if@filesw\immediate\write\@auxout{\string\citation{\@citeb}}\fi
\@ifundefined{b@\@citeb}{\def\@thisauthor{}{\bf ?}\@warning
{Citation `\@citeb' on page \thepage \space undefined}}%
{\csname b@\@citeb\endcsname}\let\@lastauthor\@thisauthor}}{#1}}
% raise the brackets in bibliography labels
\def\@biblabel#1{\def\citeauthoryear##1##2{##1, ##2}\@up{[}#1\@up{]}\hfill}
\def\@up#1{\leavevmode\raise.2ex\hbox{#1}}
% Optional changes
%%%% use parentheses in the reference list and citations
%\def\leftcite{(}\def\rightcite{)}
%\def\@biblabel#1{\def\citeauthoryear##1##2{##1, ##2}(#1)\hfill}
%%%% no key in the reference list
%\def\@lbibitem[#1]#2{\item\if@filesw
% { \def\protect##1{\string ##1\space}\immediate
% \write\@auxout{\string\bibcite{#2}{#1}}}\fi\ignorespaces}
%\def\thebibliography#1{\section*{References\@mkboth
% {REFERENCES}{REFERENCES}}\list
% {}{\labelwidth 0pt\leftmargin\labelwidth \itemsep 0.5ex}
% \def\newblock{\hskip .11em plus .33em minus .07em}
% \sloppy\clubpenalty4000\widowpenalty4000
% \sfcode`\.=1000\relax}
\ No newline at end of file
%%%% ijcai21.tex
\typeout{IJCAI--21 Instructions for Authors}
% These are the instructions for authors for IJCAI-21.
\documentclass{article}
\pdfpagewidth=8.5in
\pdfpageheight=11in
% The file ijcai21.sty is NOT the same than previous years'
\usepackage{ijcai21}
% Use the postscript times font!
\usepackage{times}
\usepackage{soul}
\usepackage{url}
\usepackage[hidelinks]{hyperref}
\usepackage[utf8]{inputenc}
\usepackage[small]{caption}
\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{booktabs}
\usepackage{algorithm}
\usepackage{algorithmic}
\urlstyle{same}
% the following package is optional:
%\usepackage{latexsym}
% See https://www.overleaf.com/learn/latex/theorems_and_proofs
% for a nice explanation of how to define new theorems, but keep
% in mind that the amsthm package is already included in this
% template and that you must *not* alter the styling.
\newtheorem{example}{Example}
\newtheorem{theorem}{Theorem}
% Following comment is from ijcai97-submit.tex:
% The preparation of these files was supported by Schlumberger Palo Alto
% Research, AT\&T Bell Laboratories, and Morgan Kaufmann Publishers.
% Shirley Jowell, of Morgan Kaufmann Publishers, and Peter F.
% Patel-Schneider, of AT\&T Bell Laboratories collaborated on their
% preparation.
% These instructions can be modified and used in other conferences as long
% as credit to the authors and supporting agencies is retained, this notice
% is not changed, and further modification or reuse is not restricted.
% Neither Shirley Jowell nor Peter F. Patel-Schneider can be listed as
% contacts for providing assistance without their prior permission.
% To use for other conferences, change references to files and the
% conference appropriate and use other authors, contacts, publishers, and
% organizations.
% Also change the deadline and address for returning papers and the length and
% page charge instructions.
% Put where the files are available in the appropriate places.
%PDF Info Is REQUIRED.
\pdfinfo{
/TemplateVersion (IJCAI.2021.0)
}
\title{IJCAI--21 Formatting Instructions}
% Single author syntax
\author{
Zhi-Hua Zhou
\affiliations
Nanjing University
\emails
pcchair@ijcai-21.org
}
% Multiple author syntax (remove the single-author syntax above and the \iffalse ... \fi here)
% Check the ijcai21-multiauthor.tex file for detailed instructions
\iffalse
\author{
First Author$^1$
\and
Second Author$^2$\and
Third Author$^{2,3}$\And
Fourth Author$^4$
\affiliations
$^1$First Affiliation\\
$^2$Second Affiliation\\
$^3$Third Affiliation\\
$^4$Fourth Affiliation
\emails
\{first, second\}@example.com,
third@other.example.com,
fourth@example.com
}
\fi
\begin{document}
\maketitle
\begin{abstract}
The {\it IJCAI--21 Proceedings} will be printed from electronic
manuscripts submitted by the authors. The electronic manuscript will
also be included in the online version of the proceedings. This paper
provides the style instructions.
\end{abstract}
\section{Introduction}
The {\it IJCAI--21 Proceedings} will be printed from electronic
manuscripts submitted by the authors. These must be PDF ({\em Portable
Document Format}) files formatted for 8-1/2$''$ $\times$ 11$''$ paper.
\subsection{Length of Papers}
All paper {\em submissions} must have a maximum of six pages, plus at most one for references. The seventh page cannot contain {\bf anything} other than references.
The length rules may change for final camera-ready versions of accepted papers and will differ between tracks. Some tracks may include only references in the last page, whereas others allow for any content in all pages. Similarly, some tracks allow you to buy a few extra pages should you want to, whereas others don't.
If your paper is accepted, please carefully read the notifications you receive, and check the proceedings submission information website\footnote{\url{https://proceedings.ijcai.org/info}} to know how many pages you can finally use. That website holds the most up-to-date information regarding paper length limits at all times. Please notice that if your track allows for a special references-only page, the {\bf references-only page(s) cannot contain anything else than references} (i.e.: do not write your acknowledgments on that page or you will be charged for it).
\subsection{Word Processing Software}
As detailed below, IJCAI has prepared and made available a set of
\LaTeX{} macros and a Microsoft Word template for use in formatting
your paper. If you are using some other word processing software, please follow the format instructions given below and ensure that your final paper looks as much like this sample as possible.
\section{Style and Format}
\LaTeX{} and Word style files that implement these instructions
can be retrieved electronically. (See Appendix~\ref{stylefiles} for
instructions on how to obtain these files.)
\subsection{Layout}
Print manuscripts two columns to a page, in the manner in which these
instructions are printed. The exact dimensions for pages are:
\begin{itemize}
\item left and right margins: .75$''$
\item column width: 3.375$''$
\item gap between columns: .25$''$
\item top margin---first page: 1.375$''$
\item top margin---other pages: .75$''$
\item bottom margin: 1.25$''$
\item column height---first page: 6.625$''$
\item column height---other pages: 9$''$
\end{itemize}
All measurements assume an 8-1/2$''$ $\times$ 11$''$ page size. For
A4-size paper, use the given top and left margins, column width,
height, and gap, and modify the bottom and right margins as necessary.
\subsection{Format of Electronic Manuscript}
For the production of the electronic manuscript, you must use Adobe's
{\em Portable Document Format} (PDF). A PDF file can be generated, for
instance, on Unix systems using {\tt ps2pdf} or on Windows systems
using Adobe's Distiller. There is also a website with free software
and conversion services: \url{http://www.ps2pdf.com}. For reasons of
uniformity, use of Adobe's {\em Times Roman} font is strongly suggested.
In \LaTeX2e{} this is accomplished by writing
\begin{quote}
\mbox{\tt $\backslash$usepackage\{times\}}
\end{quote}
in the preamble.\footnote{You may want also to use the package {\tt
latexsym}, which defines all symbols known from the old \LaTeX{}
version.}
Additionally, it is of utmost importance to specify the {\bf
letter} format (corresponding to 8-1/2$''$ $\times$ 11$''$) when
formatting the paper. When working with {\tt dvips}, for instance, one
should specify {\tt -t letter}.
\subsection{Title and Author Information}
Center the title on the entire width of the page in a 14-point bold
font. The title must be capitalized using Title Case. Below it, center author name(s) in 12-point bold font. On the following line(s) place the affiliations, each affiliation on its own line using 12-point regular font. Matching between authors and affiliations can be done using numeric superindices. Optionally, a comma-separated list of email addresses follows the affiliation(s) line(s), using 12-point regular font.
\subsubsection{Blind Review}
In order to make blind reviewing possible, authors must omit their
names and affiliations when submitting the paper for review. In place
of names and affiliations, provide a list of content areas. When
referring to one's own work, use the third person rather than the
first person. For example, say, ``Previously,
Gottlob~\shortcite{gottlob:nonmon} has shown that\ldots'', rather
than, ``In our previous work~\cite{gottlob:nonmon}, we have shown
that\ldots'' Try to avoid including any information in the body of the
paper or references that would identify the authors or their
institutions. Such information can be added to the final camera-ready
version for publication.
\subsection{Abstract}
Place the abstract at the beginning of the first column 3$''$ from the
top of the page, unless that does not leave enough room for the title
and author information. Use a slightly smaller width than in the body
of the paper. Head the abstract with ``Abstract'' centered above the
body of the abstract in a 12-point bold font. The body of the abstract
should be in the same font as the body of the paper.
The abstract should be a concise, one-paragraph summary describing the
general thesis and conclusion of your paper. A reader should be able
to learn the purpose of the paper and the reason for its importance
from the abstract. The abstract should be no more than 200 words long.
\subsection{Text}
The main body of the text immediately follows the abstract. Use
10-point type in a clear, readable font with 1-point leading (10 on
11).
Indent when starting a new paragraph, except after major headings.
\subsection{Headings and Sections}
When necessary, headings should be used to separate major sections of
your paper. (These instructions use many headings to demonstrate their
appearance; your paper should have fewer headings.). All headings should be capitalized using Title Case.
\subsubsection{Section Headings}
Print section headings in 12-point bold type in the style shown in
these instructions. Leave a blank space of approximately 10 points
above and 4 points below section headings. Number sections with
arabic numerals.
\subsubsection{Subsection Headings}
Print subsection headings in 11-point bold type. Leave a blank space
of approximately 8 points above and 3 points below subsection
headings. Number subsections with the section number and the
subsection number (in arabic numerals) separated by a
period.
\subsubsection{Subsubsection Headings}
Print subsubsection headings in 10-point bold type. Leave a blank
space of approximately 6 points above subsubsection headings. Do not
number subsubsections.
\paragraph{Titled paragraphs.} You should use titled paragraphs if and
only if the title covers exactly one paragraph. Such paragraphs should be
separated from the preceding content by at least 3pt, and no more than
6pt. The title should be in 10pt bold font and ended with a period.
After that, a 1em horizontal space should follow the title before
the paragraph's text.
In \LaTeX{} titled paragraphs should be typeset using
\begin{quote}
{\tt \textbackslash{}paragraph\{Title.\} text} .
\end{quote}
\subsubsection{Acknowledgements}
You may include an unnumbered acknowledgments section, including
acknowledgments of help from colleagues, financial support, and
permission to publish. If present, acknowledgements must be in a dedicated,
unnumbered section appearing after all regular sections but before any
appendices or references.
Use
\begin{quote}
{\tt \textbackslash{}section*\{Acknowledgements\}})
\end{quote}
to typeset the acknowledgements section in \LaTeX{}.
\subsubsection{Appendices}
Any appendices directly follow the text and look like sections, except
that they are numbered with capital letters instead of arabic
numerals. See this document for an example.
\subsubsection{References}
The references section is headed ``References'', printed in the same
style as a section heading but without a number. A sample list of
references is given at the end of these instructions. Use a consistent
format for references. The reference list should not include publicly unavailable work.
\subsection{Citations}
Citations within the text should include the author's last name and
the year of publication, for example~\cite{gottlob:nonmon}. Append
lowercase letters to the year in cases of ambiguity. Treat multiple
authors as in the following examples:~\cite{abelson-et-al:scheme}
or~\cite{bgf:Lixto} (for more than two authors) and
\cite{brachman-schmolze:kl-one} (for two authors). If the author
portion of a citation is obvious, omit it, e.g.,
Nebel~\shortcite{nebel:jair-2000}. Collapse multiple citations as
follows:~\cite{gls:hypertrees,levesque:functional-foundations}.
\nocite{abelson-et-al:scheme}
\nocite{bgf:Lixto}
\nocite{brachman-schmolze:kl-one}
\nocite{gottlob:nonmon}
\nocite{gls:hypertrees}
\nocite{levesque:functional-foundations}
\nocite{levesque:belief}
\nocite{nebel:jair-2000}
\subsection{Footnotes}
Place footnotes at the bottom of the page in a 9-point font. Refer to
them with superscript numbers.\footnote{This is how your footnotes
should appear.} Separate them from the text by a short
line.\footnote{Note the line separating these footnotes from the
text.} Avoid footnotes as much as possible; they interrupt the flow of
the text.
\section{Illustrations}
Place all illustrations (figures, drawings, tables, and photographs)
throughout the paper at the places where they are first discussed,
rather than at the end of the paper.
They should be floated to the top (preferred) or bottom of the page,
unless they are an integral part
of your narrative flow. When placed at the bottom or top of
a page, illustrations may run across both columns, but not when they
appear inline.
Illustrations must be rendered electronically or scanned and placed
directly in your document. They should be cropped outside latex, otherwise portions of the image could reappear during the post-processing of your paper. All illustrations should be understandable when printed in black and
white, albeit you can use colors to enhance them. Line weights should
be 1/2-point or thicker. Avoid screens and superimposing type on
patterns, as these effects may not reproduce well.
Number illustrations sequentially. Use references of the following
form: Figure 1, Table 2, etc. Place illustration numbers and captions
under illustrations. Leave a margin of 1/4-inch around the area
covered by the illustration and caption. Use 9-point type for
captions, labels, and other text in illustrations. Captions should always appear below the illustration.
\section{Tables}
Tables are considered illustrations containing data. Therefore, they should also appear floated to the top (preferably) or bottom of the page, and with the captions below them.
\begin{table}
\centering
\begin{tabular}{lll}
\hline
Scenario & $\delta$ & Runtime \\
\hline
Paris & 0.1s & 13.65ms \\
Paris & 0.2s & 0.01ms \\
New York & 0.1s & 92.50ms \\
Singapore & 0.1s & 33.33ms \\
Singapore & 0.2s & 23.01ms \\
\hline
\end{tabular}
\caption{Latex default table}
\label{tab:plain}
\end{table}
\begin{table}
\centering
\begin{tabular}{lrr}
\toprule
Scenario & $\delta$ (s) & Runtime (ms) \\
\midrule
Paris & 0.1 & 13.65 \\
& 0.2 & 0.01 \\
New York & 0.1 & 92.50 \\
Singapore & 0.1 & 33.33 \\
& 0.2 & 23.01 \\
\bottomrule
\end{tabular}
\caption{Booktabs table}
\label{tab:booktabs}
\end{table}
If you are using \LaTeX, you should use the {\tt booktabs} package, because it produces better tables than the standard ones. Compare Tables \ref{tab:plain} and~\ref{tab:booktabs}. The latter is clearly more readable for three reasons:
\begin{enumerate}
\item The styling is better thanks to using the {\tt booktabs} rulers instead of the default ones.
\item Numeric columns are right-aligned, making it easier to compare the numbers. Make sure to also right-align the corresponding headers, and to use the same precision for all numbers.
\item We avoid unnecessary repetition, both between lines (no need to repeat the scenario name in this case) as well as in the content (units can be shown in the column header).
\end{enumerate}
\section{Formulas}
IJCAI's two-column format makes it difficult to typeset long formulas. A usual temptation is to reduce the size of the formula by using the {\tt small} or {\tt tiny} sizes. This doesn't work correctly with the current \LaTeX{} versions, breaking the line spacing of the preceding paragraphs and title, as well as the equation number sizes. The following equation demonstrates the effects (notice that this entire paragraph looks badly formatted):
%
\begin{tiny}
\begin{equation}
x = \prod_{i=1}^n \sum_{j=1}^n j_i + \prod_{i=1}^n \sum_{j=1}^n i_j + \prod_{i=1}^n \sum_{j=1}^n j_i + \prod_{i=1}^n \sum_{j=1}^n i_j + \prod_{i=1}^n \sum_{j=1}^n j_i
\end{equation}
\end{tiny}%
Reducing formula sizes this way is strictly forbidden. We {\bf strongly} recommend authors to split formulas in multiple lines when they don't fit in a single line. This is the easiest approach to typeset those formulas and provides the most readable output%
%
\begin{align}
x =& \prod_{i=1}^n \sum_{j=1}^n j_i + \prod_{i=1}^n \sum_{j=1}^n i_j + \prod_{i=1}^n \sum_{j=1}^n j_i + \prod_{i=1}^n \sum_{j=1}^n i_j + \nonumber\\
+ & \prod_{i=1}^n \sum_{j=1}^n j_i
\end{align}%
If a line is just slightly longer than the column width, you may use the {\tt resizebox} environment on that equation. The result looks better and doesn't interfere with the paragraph's line spacing: %
\begin{equation}
\resizebox{.91\linewidth}{!}{$
\displaystyle
x = \prod_{i=1}^n \sum_{j=1}^n j_i + \prod_{i=1}^n \sum_{j=1}^n i_j + \prod_{i=1}^n \sum_{j=1}^n j_i + \prod_{i=1}^n \sum_{j=1}^n i_j + \prod_{i=1}^n \sum_{j=1}^n j_i
$}
\end{equation}%
This last solution may have to be adapted if you use different equation environments, but it can generally be made to work. Please notice that in any case:
\begin{itemize}
\item Equation numbers must be in the same font and size than the main text (10pt).
\item Your formula's main symbols should not be smaller than {\small small} text (9pt).
\end{itemize}
For instance, the formula
%
\begin{equation}
\resizebox{.91\linewidth}{!}{$
\displaystyle
x = \prod_{i=1}^n \sum_{j=1}^n j_i + \prod_{i=1}^n \sum_{j=1}^n i_j + \prod_{i=1}^n \sum_{j=1}^n j_i + \prod_{i=1}^n \sum_{j=1}^n i_j + \prod_{i=1}^n \sum_{j=1}^n j_i + \prod_{i=1}^n \sum_{j=1}^n i_j
$}
\end{equation}
%
would not be acceptable because the text is too small.
\section{Examples, Definitions, Theorems and Similar}
Examples, definitions, theorems, corollaries and similar must be written in their own paragraph. The paragraph must be separated by at least 2pt and no more than 5pt from the preceding and succeeding paragraphs. They must begin with the kind of item written in 10pt bold font followed by their number (e.g.: Theorem 1), optionally followed by a title/summary between parentheses in non-bold font and ended with a period. After that the main body of the item follows, written in 10 pt italics font (see below for examples).
In \LaTeX{} We strongly recommend you to define environments for your examples, definitions, propositions, lemmas, corollaries and similar. This can be done in your \LaTeX{} preamble using \texttt{\textbackslash{newtheorem}} -- see the source of this document for examples. Numbering for these items must be global, not per-section (e.g.: Theorem 1 instead of Theorem 6.1).
\begin{example}[How to write an example]
Examples should be written using the example environment defined in this template.
\end{example}
\begin{theorem}
This is an example of an untitled theorem.
\end{theorem}
You may also include a title or description using these environments as shown in the following theorem.
\begin{theorem}[A titled theorem]
This is an example of a titled theorem.
\end{theorem}
\section{Proofs}
Proofs must be written in their own paragraph separated by at least 2pt and no more than 5pt from the preceding and succeeding paragraphs. Proof paragraphs should start with the keyword ``Proof." in 10pt italics font. After that the proof follows in regular 10pt font. At the end of the proof, an unfilled square symbol (qed) marks the end of the proof.
In \LaTeX{} proofs should be typeset using the \texttt{\textbackslash{proof}} environment.
\begin{proof}
This paragraph is an example of how a proof looks like using the \texttt{\textbackslash{proof}} environment.
\end{proof}
\section{Algorithms and Listings}
Algorithms and listings are a special kind of figures. Like all illustrations, they should appear floated to the top (preferably) or bottom of the page. However, their caption should appear in the header, left-justified and enclosed between horizontal lines, as shown in Algorithm~\ref{alg:algorithm}. The algorithm body should be terminated with another horizontal line. It is up to the authors to decide whether to show line numbers or not, how to format comments, etc.
In \LaTeX{} algorithms may be typeset using the {\tt algorithm} and {\tt algorithmic} packages, but you can also use one of the many other packages for the task.
\begin{algorithm}[tb]
\caption{Example algorithm}
\label{alg:algorithm}
\textbf{Input}: Your algorithm's input\\
\textbf{Parameter}: Optional list of parameters\\
\textbf{Output}: Your algorithm's output
\begin{algorithmic}[1] %[1] enables line numbers
\STATE Let $t=0$.
\WHILE{condition}
\STATE Do some action.
\IF {conditional}
\STATE Perform task A.
\ELSE
\STATE Perform task B.
\ENDIF
\ENDWHILE
\STATE \textbf{return} solution
\end{algorithmic}
\end{algorithm}
\section*{Acknowledgments}
The preparation of these instructions and the \LaTeX{} and Bib\TeX{}
files that implement them was supported by Schlumberger Palo Alto
Research, AT\&T Bell Laboratories, and Morgan Kaufmann Publishers.
Preparation of the Microsoft Word file was supported by IJCAI. An
early version of this document was created by Shirley Jowell and Peter
F. Patel-Schneider. It was subsequently modified by Jennifer
Ballentine and Thomas Dean, Bernhard Nebel, Daniel Pagenstecher,
Kurt Steinkraus, Toby Walsh and Carles Sierra. The current version
has been prepared by Marc Pujol-Gonzalez and Francisco Cruz-Mencia.
\appendix
\section{\LaTeX{} and Word Style Files}\label{stylefiles}
The \LaTeX{} and Word style files are available on the IJCAI--21
website, \url{https://www.ijcai21.org/}.
These style files implement the formatting instructions in this
document.
The \LaTeX{} files are {\tt ijcai21.sty} and {\tt ijcai21.tex}, and
the Bib\TeX{} files are {\tt named.bst} and {\tt ijcai21.bib}. The
\LaTeX{} style file is for version 2e of \LaTeX{}, and the Bib\TeX{}
style file is for version 0.99c of Bib\TeX{} ({\em not} version
0.98i). The {\tt ijcai21.sty} style differs from the {\tt
ijcai20.sty} file used for IJCAI--PRICAI--20.
The Microsoft Word style file consists of a single file, {\tt
ijcai21.doc}. This template differs from the one used for
IJCAI--PRICAI--20.
These Microsoft Word and \LaTeX{} files contain the source of the
present document and may serve as a formatting sample.
Further information on using these styles for the preparation of
papers for IJCAI--21 can be obtained by contacting {\tt
pcchair@ijcai-21.org}.
%% The file named.bst is a bibliography style file for BibTeX 0.99c
\bibliographystyle{named}
\bibliography{ijcai21}
\end{document}
%NAME: named.bst
% BibTeX `named' style file for BibTeX version 0.99c, LaTeX version 2.09
% Place it in a file called named.bst in the BibTeX search path. (Placing it
% in the same directory as the LaTeX document should also work.)
% Support for named citations is provided by named.sty
% This version was made by modifying the master file made by
% Oren Patashnik (PATASHNIK@SCORE.STANFORD.EDU)
% Copyright (C) 1985, all rights reserved.
% Modifications Copyright 1988, Peter F. Patel-Schneider
% Copying of this file is authorized only if either
% (1) you make absolutely no changes to your copy, including name, or
% (2) if you do make changes, you name it something other than
% btxbst.doc, plain.bst, unsrt.bst, alpha.bst, and abbrv.bst.
% This restriction helps ensure that all standard styles are identical.
% There are undoubtably bugs in this style. If you make bug fixes,
% improvements, etc. please let me know. My e-mail address is:
% pfps@research.att.com
% Citation format: [author-last-name, year]
% [author-last-name and author-last-name, year]
% [author-last-name {\em et al.}, year]
%
% Reference list ordering: alphabetical by author or whatever passes
% for author in the absence of one.
%
% This BibTeX style has support for short (year only) citations. This
% is done by having the citations actually look like
% \citeauthoryear{author-info}{year}
% The LaTeX style has to have the following (or similar)
% \let\@internalcite\cite
% \def\cite{\def\citeauthoryear##1##2{##1, ##2}\@internalcite}
% \def\shortcite{\def\citeauthoryear##1{##2}\@internalcite}
% \def\@biblabel#1{\def\citeauthoryear##1##2{##1, ##2}[#1]\hfill}
% which makes \shortcite the macro for short citations.
ENTRY
{ address
author
booktitle
chapter
edition
editor
howpublished
institution
journal
key
month
note
number
organization
pages
publisher
school
series
title
type
volume
year
}
{}
{ label extra.label sort.label }
INTEGERS { output.state before.all mid.sentence after.sentence after.block }
FUNCTION {init.state.consts}
{ #0 'before.all :=
#1 'mid.sentence :=
#2 'after.sentence :=
#3 'after.block :=
}
STRINGS { s t }
FUNCTION {output.nonnull}
{ 's :=
output.state mid.sentence =
{ ", " * write$ }
{ output.state after.block =
{ add.period$ write$
newline$
"\newblock " write$
}
{ output.state before.all =
'write$
{ add.period$ " " * write$ }
if$
}
if$
mid.sentence 'output.state :=
}
if$
s
}
FUNCTION {output}
{ duplicate$ empty$
'pop$
'output.nonnull
if$
}
FUNCTION {output.check}
{ 't :=
duplicate$ empty$
{ pop$ "empty " t * " in " * cite$ * warning$ }
'output.nonnull
if$
}
FUNCTION {output.bibitem}
{ newline$
"\bibitem[" write$
label write$
"]{" write$
cite$ write$
"}" write$
newline$
""
before.all 'output.state :=
}
FUNCTION {fin.entry}
{ add.period$
write$
newline$
}
FUNCTION {new.block}
{ output.state before.all =
'skip$
{ after.block 'output.state := }
if$
}
FUNCTION {new.sentence}
{ output.state after.block =
'skip$
{ output.state before.all =
'skip$
{ after.sentence 'output.state := }
if$
}
if$
}
FUNCTION {not}
{ { #0 }
{ #1 }
if$
}
FUNCTION {and}
{ 'skip$
{ pop$ #0 }
if$
}
FUNCTION {or}
{ { pop$ #1 }
'skip$
if$
}
FUNCTION {new.block.checka}
{ empty$
'skip$
'new.block
if$
}
FUNCTION {new.block.checkb}
{ empty$
swap$ empty$
and
'skip$
'new.block
if$
}
FUNCTION {new.sentence.checka}
{ empty$
'skip$
'new.sentence
if$
}
FUNCTION {new.sentence.checkb}
{ empty$
swap$ empty$
and
'skip$
'new.sentence
if$
}
FUNCTION {field.or.null}
{ duplicate$ empty$
{ pop$ "" }
'skip$
if$
}
FUNCTION {emphasize}
{ duplicate$ empty$
{ pop$ "" }
{ "{\em " swap$ * "}" * }
if$
}
INTEGERS { nameptr namesleft numnames }
FUNCTION {format.names}
{ 's :=
#1 'nameptr :=
s num.names$ 'numnames :=
numnames 'namesleft :=
{ namesleft #0 > }
{ s nameptr "{ff~}{vv~}{ll}{, jj}" format.name$ 't :=
nameptr #1 >
{ namesleft #1 >
{ ", " * t * }
{ numnames #2 >
{ "," * }
'skip$
if$
t "others" =
{ " et~al." * }
{ " and " * t * }
if$
}
if$
}
't
if$
nameptr #1 + 'nameptr :=
namesleft #1 - 'namesleft :=
}
while$
}
FUNCTION {format.authors}
{ author empty$
{ "" }
{ author format.names }
if$
}
FUNCTION {format.editors}
{ editor empty$
{ "" }
{ editor format.names
editor num.names$ #1 >
{ ", editors" * }
{ ", editor" * }
if$
}
if$
}
FUNCTION {format.title}
{ title empty$
{ "" }
{ title "t" change.case$ }
if$
}
FUNCTION {n.dashify}
{ 't :=
""
{ t empty$ not }
{ t #1 #1 substring$ "-" =
{ t #1 #2 substring$ "--" = not
{ "--" *
t #2 global.max$ substring$ 't :=
}
{ { t #1 #1 substring$ "-" = }
{ "-" *
t #2 global.max$ substring$ 't :=
}
while$
}
if$
}
{ t #1 #1 substring$ *
t #2 global.max$ substring$ 't :=
}
if$
}
while$
}
FUNCTION {format.date}
{ year empty$
{ month empty$
{ "" }
{ "there's a month but no year in " cite$ * warning$
month
}
if$
}
{ month empty$
'year
{ month " " * year * }
if$
}
if$
}
FUNCTION {format.btitle}
{ title emphasize
}
FUNCTION {tie.or.space.connect}
{ duplicate$ text.length$ #3 <
{ "~" }
{ " " }
if$
swap$ * *
}
FUNCTION {either.or.check}
{ empty$
'pop$
{ "can't use both " swap$ * " fields in " * cite$ * warning$ }
if$
}
FUNCTION {format.bvolume}
{ volume empty$
{ "" }
{ "volume" volume tie.or.space.connect
series empty$
'skip$
{ " of " * series emphasize * }
if$
"volume and number" number either.or.check
}
if$
}
FUNCTION {format.number.series}
{ volume empty$
{ number empty$
{ series field.or.null }
{ output.state mid.sentence =
{ "number" }
{ "Number" }
if$
number tie.or.space.connect
series empty$
{ "there's a number but no series in " cite$ * warning$ }
{ " in " * series * }
if$
}
if$
}
{ "" }
if$
}
FUNCTION {format.edition}
{ edition empty$
{ "" }
{ output.state mid.sentence =
{ edition "l" change.case$ " edition" * }
{ edition "t" change.case$ " edition" * }
if$
}
if$
}
INTEGERS { multiresult }
FUNCTION {multi.page.check}
{ 't :=
#0 'multiresult :=
{ multiresult not
t empty$ not
and
}
{ t #1 #1 substring$
duplicate$ "-" =
swap$ duplicate$ "," =
swap$ "+" =
or or
{ #1 'multiresult := }
{ t #2 global.max$ substring$ 't := }
if$
}
while$
multiresult
}
FUNCTION {format.pages}
{ pages empty$
{ "" }
{ pages multi.page.check
{ "pages" pages n.dashify tie.or.space.connect }
{ "page" pages tie.or.space.connect }
if$
}
if$
}
FUNCTION {format.vol.num.pages}
{ volume field.or.null
number empty$
'skip$
{ "(" number * ")" * *
volume empty$
{ "there's a number but no volume in " cite$ * warning$ }
'skip$
if$
}
if$
pages empty$
'skip$
{ duplicate$ empty$
{ pop$ format.pages }
{ ":" * pages n.dashify * }
if$
}
if$
}
FUNCTION {format.chapter.pages}
{ chapter empty$
'format.pages
{ type empty$
{ "chapter" }
{ type "l" change.case$ }
if$
chapter tie.or.space.connect
pages empty$
'skip$
{ ", " * format.pages * }
if$
}
if$
}
FUNCTION {format.in.ed.booktitle}
{ booktitle empty$
{ "" }
{ editor empty$
{ "In " booktitle emphasize * }
{ "In " format.editors * ", " * booktitle emphasize * }
if$
}
if$
}
FUNCTION {empty.misc.check}
{ author empty$ title empty$ howpublished empty$
month empty$ year empty$ note empty$
and and and and and
key empty$ not and
{ "all relevant fields are empty in " cite$ * warning$ }
'skip$
if$
}
FUNCTION {format.thesis.type}
{ type empty$
'skip$
{ pop$
type "t" change.case$
}
if$
}
FUNCTION {format.tr.number}
{ type empty$
{ "Technical Report" }
'type
if$
number empty$
{ "t" change.case$ }
{ number tie.or.space.connect }
if$
}
FUNCTION {format.article.crossref}
{ key empty$
{ journal empty$
{ "need key or journal for " cite$ * " to crossref " * crossref *
warning$
""
}
{ "In {\em " journal * "\/}" * }
if$
}
{ "In " key * }
if$
" \shortcite{" * crossref * "}" *
}
FUNCTION {format.crossref.editor}
{ editor #1 "{vv~}{ll}" format.name$
editor num.names$ duplicate$
#2 >
{ pop$ " et~al." * }
{ #2 <
'skip$
{ editor #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" =
{ " et~al." * }
{ " and " * editor #2 "{vv~}{ll}" format.name$ * }
if$
}
if$
}
if$
}
FUNCTION {format.book.crossref}
{ volume empty$
{ "empty volume in " cite$ * "'s crossref of " * crossref * warning$
"In "
}
{ "Volume" volume tie.or.space.connect
" of " *
}
if$
editor empty$
editor field.or.null author field.or.null =
or
{ key empty$
{ series empty$
{ "need editor, key, or series for " cite$ * " to crossref " *
crossref * warning$
"" *
}
{ "{\em " * series * "\/}" * }
if$
}
{ key * }
if$
}
{ format.crossref.editor * }
if$
" \shortcite{" * crossref * "}" *
}
FUNCTION {format.incoll.inproc.crossref}
{ editor empty$
editor field.or.null author field.or.null =
or
{ key empty$
{ booktitle empty$
{ "need editor, key, or booktitle for " cite$ * " to crossref " *
crossref * warning$
""
}
{ "In {\em " booktitle * "\/}" * }
if$
}
{ "In " key * }
if$
}
{ "In " format.crossref.editor * }
if$
" \shortcite{" * crossref * "}" *
}
FUNCTION {article}
{ output.bibitem
format.authors "author" output.check
new.block
format.title "title" output.check
new.block
crossref missing$
{ journal emphasize "journal" output.check
format.vol.num.pages output
format.date "year" output.check
}
{ format.article.crossref output.nonnull
format.pages output
}
if$
new.block
note output
fin.entry
}
FUNCTION {book}
{ output.bibitem
author empty$
{ format.editors "author and editor" output.check }
{ format.authors output.nonnull
crossref missing$
{ "author and editor" editor either.or.check }
'skip$
if$
}
if$
new.block
format.btitle "title" output.check
crossref missing$
{ format.bvolume output
new.block
format.number.series output
new.sentence
publisher "publisher" output.check
address output
}
{ new.block
format.book.crossref output.nonnull
}
if$
format.edition output
format.date "year" output.check
new.block
note output
fin.entry
}
FUNCTION {booklet}
{ output.bibitem
format.authors output
new.block
format.title "title" output.check
howpublished address new.block.checkb
howpublished output
address output
format.date output
new.block
note output
fin.entry
}
FUNCTION {inbook}
{ output.bibitem
author empty$
{ format.editors "author and editor" output.check }
{ format.authors output.nonnull
crossref missing$
{ "author and editor" editor either.or.check }
'skip$
if$
}
if$
new.block
format.btitle "title" output.check
crossref missing$
{ format.bvolume output
format.chapter.pages "chapter and pages" output.check
new.block
format.number.series output
new.sentence
publisher "publisher" output.check
address output
}
{ format.chapter.pages "chapter and pages" output.check
new.block
format.book.crossref output.nonnull
}
if$
format.edition output
format.date "year" output.check
new.block
note output
fin.entry
}
FUNCTION {incollection}
{ output.bibitem
format.authors "author" output.check
new.block
format.title "title" output.check
new.block
crossref missing$
{ format.in.ed.booktitle "booktitle" output.check
format.bvolume output
format.number.series output
format.chapter.pages output
new.sentence
publisher "publisher" output.check
address output
format.edition output
format.date "year" output.check
}
{ format.incoll.inproc.crossref output.nonnull
format.chapter.pages output
}
if$
new.block
note output
fin.entry
}
FUNCTION {inproceedings}
{ output.bibitem
format.authors "author" output.check
new.block
format.title "title" output.check
new.block
crossref missing$
{ format.in.ed.booktitle "booktitle" output.check
format.bvolume output
format.number.series output
format.pages output
address empty$
{ organization publisher new.sentence.checkb
organization output
publisher output
format.date "year" output.check
}
{ address output.nonnull
format.date "year" output.check
new.sentence
organization output
publisher output
}
if$
}
{ format.incoll.inproc.crossref output.nonnull
format.pages output
}
if$
new.block
note output
fin.entry
}
FUNCTION {conference} { inproceedings }
FUNCTION {manual}
{ output.bibitem
author empty$
{ organization empty$
'skip$
{ organization output.nonnull
address output
}
if$
}
{ format.authors output.nonnull }
if$
new.block
format.btitle "title" output.check
author empty$
{ organization empty$
{ address new.block.checka
address output
}
'skip$
if$
}
{ organization address new.block.checkb
organization output
address output
}
if$
format.edition output
format.date output
new.block
note output
fin.entry
}
FUNCTION {mastersthesis}
{ output.bibitem
format.authors "author" output.check
new.block
format.title "title" output.check
new.block
"Master's thesis" format.thesis.type output.nonnull
school "school" output.check
address output
format.date "year" output.check
new.block
note output
fin.entry
}
FUNCTION {misc}
{ output.bibitem
format.authors output
title howpublished new.block.checkb
format.title output
howpublished new.block.checka
howpublished output
format.date output
new.block
note output
fin.entry
empty.misc.check
}
FUNCTION {phdthesis}
{ output.bibitem
format.authors "author" output.check
new.block
format.btitle "title" output.check
new.block
"PhD thesis" format.thesis.type output.nonnull
school "school" output.check
address output
format.date "year" output.check
new.block
note output
fin.entry
}
FUNCTION {proceedings}
{ output.bibitem
editor empty$
{ organization output }
{ format.editors output.nonnull }
if$
new.block
format.btitle "title" output.check
format.bvolume output
format.number.series output
address empty$
{ editor empty$
{ publisher new.sentence.checka }
{ organization publisher new.sentence.checkb
organization output
}
if$
publisher output
format.date "year" output.check
}
{ address output.nonnull
format.date "year" output.check
new.sentence
editor empty$
'skip$
{ organization output }
if$
publisher output
}
if$
new.block
note output
fin.entry
}
FUNCTION {techreport}
{ output.bibitem
format.authors "author" output.check
new.block
format.title "title" output.check
new.block
format.tr.number output.nonnull
institution "institution" output.check
address output
format.date "year" output.check
new.block
note output
fin.entry
}
FUNCTION {unpublished}
{ output.bibitem
format.authors "author" output.check
new.block
format.title "title" output.check
new.block
note "note" output.check
format.date output
fin.entry
}
FUNCTION {default.type} { misc }
MACRO {jan} {"January"}
MACRO {feb} {"February"}
MACRO {mar} {"March"}
MACRO {apr} {"April"}
MACRO {may} {"May"}
MACRO {jun} {"June"}
MACRO {jul} {"July"}
MACRO {aug} {"August"}
MACRO {sep} {"September"}
MACRO {oct} {"October"}
MACRO {nov} {"November"}
MACRO {dec} {"December"}
MACRO {acmcs} {"ACM Computing Surveys"}
MACRO {acta} {"Acta Informatica"}
MACRO {cacm} {"Communications of the ACM"}
MACRO {ibmjrd} {"IBM Journal of Research and Development"}
MACRO {ibmsj} {"IBM Systems Journal"}
MACRO {ieeese} {"IEEE Transactions on Software Engineering"}
MACRO {ieeetc} {"IEEE Transactions on Computers"}
MACRO {ieeetcad}
{"IEEE Transactions on Computer-Aided Design of Integrated Circuits"}
MACRO {ipl} {"Information Processing Letters"}
MACRO {jacm} {"Journal of the ACM"}
MACRO {jcss} {"Journal of Computer and System Sciences"}
MACRO {scp} {"Science of Computer Programming"}
MACRO {sicomp} {"SIAM Journal on Computing"}
MACRO {tocs} {"ACM Transactions on Computer Systems"}
MACRO {tods} {"ACM Transactions on Database Systems"}
MACRO {tog} {"ACM Transactions on Graphics"}
MACRO {toms} {"ACM Transactions on Mathematical Software"}
MACRO {toois} {"ACM Transactions on Office Information Systems"}
MACRO {toplas} {"ACM Transactions on Programming Languages and Systems"}
MACRO {tcs} {"Theoretical Computer Science"}
READ
FUNCTION {sortify}
{ purify$
"l" change.case$
}
INTEGERS { len }
FUNCTION {chop.word}
{ 's :=
'len :=
s #1 len substring$ =
{ s len #1 + global.max$ substring$ }
's
if$
}
INTEGERS { et.al.char.used }
FUNCTION {initialize.et.al.char.used}
{ #0 'et.al.char.used :=
}
EXECUTE {initialize.et.al.char.used}
FUNCTION {format.lab.names}
{ 's :=
s num.names$ 'numnames :=
numnames #1 =
{ s #1 "{vv }{ll}" format.name$ }
{ numnames #2 =
{ s #1 "{vv }{ll }and " format.name$ s #2 "{vv }{ll}" format.name$ *
}
{ s #1 "{vv }{ll }\bgroup \em et al.\egroup " format.name$ }
if$
}
if$
}
FUNCTION {author.key.label}
{ author empty$
{ key empty$
{ cite$ #1 #3 substring$ }
{ key }
if$
}
{ author format.lab.names }
if$
}
FUNCTION {author.editor.key.label}
{ author empty$
{ editor empty$
{ key empty$
{ cite$ #1 #3 substring$ }
{ key }
if$
}
{ editor format.lab.names }
if$
}
{ author format.lab.names }
if$
}
FUNCTION {author.key.organization.label}
{ author empty$
{ key empty$
{ organization empty$
{ cite$ #1 #3 substring$ }
{ "The " #4 organization chop.word #3 text.prefix$ }
if$
}
{ key }
if$
}
{ author format.lab.names }
if$
}
FUNCTION {editor.key.organization.label}
{ editor empty$
{ key empty$
{ organization empty$
{ cite$ #1 #3 substring$ }
{ "The " #4 organization chop.word #3 text.prefix$ }
if$
}
{ key }
if$
}
{ editor format.lab.names }
if$
}
FUNCTION {calc.label}
{ type$ "book" =
type$ "inbook" =
or
'author.editor.key.label
{ type$ "proceedings" =
'editor.key.organization.label
{ type$ "manual" =
'author.key.organization.label
'author.key.label
if$
}
if$
}
if$
duplicate$
"\protect\citeauthoryear{" swap$ * "}{" *
year field.or.null purify$ * % CHANGED - pfps - 15 Feb 1989
'label :=
year field.or.null purify$ *
sortify 'sort.label :=
}
FUNCTION {sort.format.names}
{ 's :=
#1 'nameptr :=
""
s num.names$ 'numnames :=
numnames 'namesleft :=
{ namesleft #0 > }
{ nameptr #1 >
{ " " * }
'skip$
if$
s nameptr "{vv{ } }{ll{ }}{ ff{ }}{ jj{ }}" format.name$ 't :=
nameptr numnames = t "others" = and
{ "et al" * }
{ t sortify * }
if$
nameptr #1 + 'nameptr :=
namesleft #1 - 'namesleft :=
}
while$
}
FUNCTION {sort.format.title}
{ 't :=
"A " #2
"An " #3
"The " #4 t chop.word
chop.word
chop.word
sortify
#1 global.max$ substring$
}
FUNCTION {author.sort}
{ author empty$
{ key empty$
{ "to sort, need author or key in " cite$ * warning$
""
}
{ key sortify }
if$
}
{ author sort.format.names }
if$
}
FUNCTION {author.editor.sort}
{ author empty$
{ editor empty$
{ key empty$
{ "to sort, need author, editor, or key in " cite$ * warning$
""
}
{ key sortify }
if$
}
{ editor sort.format.names }
if$
}
{ author sort.format.names }
if$
}
FUNCTION {author.organization.sort}
{ author empty$
{ organization empty$
{ key empty$
{ "to sort, need author, organization, or key in " cite$ * warning$
""
}
{ key sortify }
if$
}
{ "The " #4 organization chop.word sortify }
if$
}
{ author sort.format.names }
if$
}
FUNCTION {editor.organization.sort}
{ editor empty$
{ organization empty$
{ key empty$
{ "to sort, need editor, organization, or key in " cite$ * warning$
""
}
{ key sortify }
if$
}
{ "The " #4 organization chop.word sortify }
if$
}
{ editor sort.format.names }
if$
}
FUNCTION {presort}
{ calc.label
sort.label
" "
*
type$ "book" =
type$ "inbook" =
or
'author.editor.sort
{ type$ "proceedings" =
'editor.organization.sort
{ type$ "manual" =
'author.organization.sort
'author.sort
if$
}
if$
}
if$
*
" "
*
year field.or.null sortify
*
" "
*
title field.or.null
sort.format.title
*
#1 entry.max$ substring$
'sort.key$ :=
}
ITERATE {presort}
SORT
STRINGS { longest.label last.sort.label next.extra }
INTEGERS { longest.label.width last.extra.num }
FUNCTION {initialize.longest.label}
{ "" 'longest.label :=
#0 int.to.chr$ 'last.sort.label :=
"" 'next.extra :=
#0 'longest.label.width :=
#0 'last.extra.num :=
}
FUNCTION {forward.pass}
{ last.sort.label sort.label =
{ last.extra.num #1 + 'last.extra.num :=
last.extra.num int.to.chr$ 'extra.label :=
}
{ "a" chr.to.int$ 'last.extra.num :=
"" 'extra.label :=
sort.label 'last.sort.label :=
}
if$
}
FUNCTION {reverse.pass}
{ next.extra "b" =
{ "a" 'extra.label := }
'skip$
if$
label extra.label * "}" * 'label := % CHANGED - pfps 15 Feb 1989
label width$ longest.label.width >
{ label 'longest.label :=
label width$ 'longest.label.width :=
}
'skip$
if$
extra.label 'next.extra :=
}
EXECUTE {initialize.longest.label}
ITERATE {forward.pass}
REVERSE {reverse.pass}
FUNCTION {begin.bib}
{ et.al.char.used
{ "\newcommand{\etalchar}[1]{$^{#1}$}" write$ newline$ }
'skip$
if$
preamble$ empty$
'skip$
{ preamble$ write$ newline$ }
if$
"\begin{thebibliography}{}" write$ newline$
}
EXECUTE {begin.bib}
EXECUTE {init.state.consts}
ITERATE {call.type$}
FUNCTION {end.bib}
{ newline$
"\end{thebibliography}" write$ newline$
}
EXECUTE {end.bib}
MAKE = make
file = paper
all: IJCAI
paper-writting:
pdflatex -jobname $@ "\newcommand{\submitmode}{false}\input{$(file)}"
pdflatex -jobname $@ "\newcommand{\submitmode}{false}\input{$(file)}"
bibtex $@
pdflatex -jobname $@ "\newcommand{\submitmode}{false}\input{$(file)}"
pdflatex -jobname $@ "\newcommand{\submitmode}{false}\input{$(file)}"
paper-submission:
pdflatex -jobname $@ "\newcommand{\submitmode}{true}\input{$(file)}"
pdflatex -jobname $@ "\newcommand{\submitmode}{true}\input{$(file)}"
bibtex $@
pdflatex -jobname $@ "\newcommand{\submitmode}{true}\input{$(file)}"
pdflatex -jobname $@ "\newcommand{\submitmode}{true}\input{$(file)}"
final:
pdflatex -jobname $@ "\newcommand{\submitmode}{true}\input{$(file)}"
pdflatex -jobname $@ "\newcommand{\submitmode}{true}\input{$(file)}"
bibtex $@
pdflatex -jobname $@ "\newcommand{\submitmode}{true}\input{$(file)}"
pdflatex -jobname $@ "\newcommand{\submitmode}{true}\input{$(file)}"
pdftops final.pdf
ps2pdf14 -dPDFSETTINGS=/prepress final.pdf final-output.pdf
IJCAI:
pdflatex -jobname $@ $(file)
pdflatex -jobname $@ $(file)
bibtex $@
pdflatex -jobname $@ $(file)
pdflatex -jobname $@ $(file)
pull:
#git pull gitlab master:master
git pull gitlab master --allow-unrelated-histories
push:
git push gitlab master:master
clean:
rm -f *.aux *.bbl *.blg *.log *.out *.pdf *.gz *.fls *.fdb_latexmk
%NAME: named.bst
% BibTeX `named' style file for BibTeX version 0.99c, LaTeX version 2.09
% Place it in a file called named.bst in the BibTeX search path. (Placing it
% in the same directory as the LaTeX document should also work.)
% Support for named citations is provided by named.sty
% This version was made by modifying the master file made by
% Oren Patashnik (PATASHNIK@SCORE.STANFORD.EDU)
% Copyright (C) 1985, all rights reserved.
% Modifications Copyright 1988, Peter F. Patel-Schneider
% Copying of this file is authorized only if either
% (1) you make absolutely no changes to your copy, including name, or
% (2) if you do make changes, you name it something other than
% btxbst.doc, plain.bst, unsrt.bst, alpha.bst, and abbrv.bst.
% This restriction helps ensure that all standard styles are identical.
% There are undoubtably bugs in this style. If you make bug fixes,
% improvements, etc. please let me know. My e-mail address is:
% pfps@research.att.com
% Citation format: [author-last-name, year]
% [author-last-name and author-last-name, year]
% [author-last-name {\em et al.}, year]
%
% Reference list ordering: alphabetical by author or whatever passes
% for author in the absence of one.
%
% This BibTeX style has support for short (year only) citations. This
% is done by having the citations actually look like
% \citeauthoryear{author-info}{year}
% The LaTeX style has to have the following (or similar)
% \let\@internalcite\cite
% \def\cite{\def\citeauthoryear##1##2{##1, ##2}\@internalcite}
% \def\shortcite{\def\citeauthoryear##1{##2}\@internalcite}
% \def\@biblabel#1{\def\citeauthoryear##1##2{##1, ##2}[#1]\hfill}
% which makes \shortcite the macro for short citations.
ENTRY
{ address
author
booktitle
chapter
edition
editor
howpublished
institution
journal
key
month
note
number
organization
pages
publisher
school
series
title
type
volume
year
}
{}
{ label extra.label sort.label }
INTEGERS { output.state before.all mid.sentence after.sentence after.block }
FUNCTION {init.state.consts}
{ #0 'before.all :=
#1 'mid.sentence :=
#2 'after.sentence :=
#3 'after.block :=
}
STRINGS { s t }
FUNCTION {output.nonnull}
{ 's :=
output.state mid.sentence =
{ ", " * write$ }
{ output.state after.block =
{ add.period$ write$
newline$
"\newblock " write$
}
{ output.state before.all =
'write$
{ add.period$ " " * write$ }
if$
}
if$
mid.sentence 'output.state :=
}
if$
s
}
FUNCTION {output}
{ duplicate$ empty$
'pop$
'output.nonnull
if$
}
FUNCTION {output.check}
{ 't :=
duplicate$ empty$
{ pop$ "empty " t * " in " * cite$ * warning$ }
'output.nonnull
if$
}
FUNCTION {output.bibitem}
{ newline$
"\bibitem[" write$
label write$
"]{" write$
cite$ write$
"}" write$
newline$
""
before.all 'output.state :=
}
FUNCTION {fin.entry}
{ add.period$
write$
newline$
}
FUNCTION {new.block}
{ output.state before.all =
'skip$
{ after.block 'output.state := }
if$
}
FUNCTION {new.sentence}
{ output.state after.block =
'skip$
{ output.state before.all =
'skip$
{ after.sentence 'output.state := }
if$
}
if$
}
FUNCTION {not}
{ { #0 }
{ #1 }
if$
}
FUNCTION {and}
{ 'skip$
{ pop$ #0 }
if$
}
FUNCTION {or}
{ { pop$ #1 }
'skip$
if$
}
FUNCTION {new.block.checka}
{ empty$
'skip$
'new.block
if$
}
FUNCTION {new.block.checkb}
{ empty$
swap$ empty$
and
'skip$
'new.block
if$
}
FUNCTION {new.sentence.checka}
{ empty$
'skip$
'new.sentence
if$
}
FUNCTION {new.sentence.checkb}
{ empty$
swap$ empty$
and
'skip$
'new.sentence
if$
}
FUNCTION {field.or.null}
{ duplicate$ empty$
{ pop$ "" }
'skip$
if$
}
FUNCTION {emphasize}
{ duplicate$ empty$
{ pop$ "" }
{ "{\em " swap$ * "}" * }
if$
}
INTEGERS { nameptr namesleft numnames }
FUNCTION {format.names}
{ 's :=
#1 'nameptr :=
s num.names$ 'numnames :=
numnames 'namesleft :=
{ namesleft #0 > }
{ s nameptr "{ff~}{vv~}{ll}{, jj}" format.name$ 't :=
nameptr #1 >
{ namesleft #1 >
{ ", " * t * }
{ numnames #2 >
{ "," * }
'skip$
if$
t "others" =
{ " et~al." * }
{ " and " * t * }
if$
}
if$
}
't
if$
nameptr #1 + 'nameptr :=
namesleft #1 - 'namesleft :=
}
while$
}
FUNCTION {format.authors}
{ author empty$
{ "" }
{ author format.names }
if$
}
FUNCTION {format.editors}
{ editor empty$
{ "" }
{ editor format.names
editor num.names$ #1 >
{ ", editors" * }
{ ", editor" * }
if$
}
if$
}
FUNCTION {format.title}
{ title empty$
{ "" }
{ title "t" change.case$ }
if$
}
FUNCTION {n.dashify}
{ 't :=
""
{ t empty$ not }
{ t #1 #1 substring$ "-" =
{ t #1 #2 substring$ "--" = not
{ "--" *
t #2 global.max$ substring$ 't :=
}
{ { t #1 #1 substring$ "-" = }
{ "-" *
t #2 global.max$ substring$ 't :=
}
while$
}
if$
}
{ t #1 #1 substring$ *
t #2 global.max$ substring$ 't :=
}
if$
}
while$
}
FUNCTION {format.date}
{ year empty$
{ month empty$
{ "" }
{ "there's a month but no year in " cite$ * warning$
month
}
if$
}
{ month empty$
'year
{ month " " * year * }
if$
}
if$
}
FUNCTION {format.btitle}
{ title emphasize
}
FUNCTION {tie.or.space.connect}
{ duplicate$ text.length$ #3 <
{ "~" }
{ " " }
if$
swap$ * *
}
FUNCTION {either.or.check}
{ empty$
'pop$
{ "can't use both " swap$ * " fields in " * cite$ * warning$ }
if$
}
FUNCTION {format.bvolume}
{ volume empty$
{ "" }
{ "volume" volume tie.or.space.connect
series empty$
'skip$
{ " of " * series emphasize * }
if$
"volume and number" number either.or.check
}
if$
}
FUNCTION {format.number.series}
{ volume empty$
{ number empty$
{ series field.or.null }
{ output.state mid.sentence =
{ "number" }
{ "Number" }
if$
number tie.or.space.connect
series empty$
{ "there's a number but no series in " cite$ * warning$ }
{ " in " * series * }
if$
}
if$
}
{ "" }
if$
}
FUNCTION {format.edition}
{ edition empty$
{ "" }
{ output.state mid.sentence =
{ edition "l" change.case$ " edition" * }
{ edition "t" change.case$ " edition" * }
if$
}
if$
}
INTEGERS { multiresult }
FUNCTION {multi.page.check}
{ 't :=
#0 'multiresult :=
{ multiresult not
t empty$ not
and
}
{ t #1 #1 substring$
duplicate$ "-" =
swap$ duplicate$ "," =
swap$ "+" =
or or
{ #1 'multiresult := }
{ t #2 global.max$ substring$ 't := }
if$
}
while$
multiresult
}
FUNCTION {format.pages}
{ pages empty$
{ "" }
{ pages multi.page.check
{ "pages" pages n.dashify tie.or.space.connect }
{ "page" pages tie.or.space.connect }
if$
}
if$
}
FUNCTION {format.vol.num.pages}
{ volume field.or.null
number empty$
'skip$
{ "(" number * ")" * *
volume empty$
{ "there's a number but no volume in " cite$ * warning$ }
'skip$
if$
}
if$
pages empty$
'skip$
{ duplicate$ empty$
{ pop$ format.pages }
{ ":" * pages n.dashify * }
if$
}
if$
}
FUNCTION {format.chapter.pages}
{ chapter empty$
'format.pages
{ type empty$
{ "chapter" }
{ type "l" change.case$ }
if$
chapter tie.or.space.connect
pages empty$
'skip$
{ ", " * format.pages * }
if$
}
if$
}
FUNCTION {format.in.ed.booktitle}
{ booktitle empty$
{ "" }
{ editor empty$
{ "In " booktitle emphasize * }
{ "In " format.editors * ", " * booktitle emphasize * }
if$
}
if$
}
FUNCTION {empty.misc.check}
{ author empty$ title empty$ howpublished empty$
month empty$ year empty$ note empty$
and and and and and
key empty$ not and
{ "all relevant fields are empty in " cite$ * warning$ }
'skip$
if$
}
FUNCTION {format.thesis.type}
{ type empty$
'skip$
{ pop$
type "t" change.case$
}
if$
}
FUNCTION {format.tr.number}
{ type empty$
{ "Technical Report" }
'type
if$
number empty$
{ "t" change.case$ }
{ number tie.or.space.connect }
if$
}
FUNCTION {format.article.crossref}
{ key empty$
{ journal empty$
{ "need key or journal for " cite$ * " to crossref " * crossref *
warning$
""
}
{ "In {\em " journal * "\/}" * }
if$
}
{ "In " key * }
if$
" \shortcite{" * crossref * "}" *
}
FUNCTION {format.crossref.editor}
{ editor #1 "{vv~}{ll}" format.name$
editor num.names$ duplicate$
#2 >
{ pop$ " et~al." * }
{ #2 <
'skip$
{ editor #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" =
{ " et~al." * }
{ " and " * editor #2 "{vv~}{ll}" format.name$ * }
if$
}
if$
}
if$
}
FUNCTION {format.book.crossref}
{ volume empty$
{ "empty volume in " cite$ * "'s crossref of " * crossref * warning$
"In "
}
{ "Volume" volume tie.or.space.connect
" of " *
}
if$
editor empty$
editor field.or.null author field.or.null =
or
{ key empty$
{ series empty$
{ "need editor, key, or series for " cite$ * " to crossref " *
crossref * warning$
"" *
}
{ "{\em " * series * "\/}" * }
if$
}
{ key * }
if$
}
{ format.crossref.editor * }
if$
" \shortcite{" * crossref * "}" *
}
FUNCTION {format.incoll.inproc.crossref}
{ editor empty$
editor field.or.null author field.or.null =
or
{ key empty$
{ booktitle empty$
{ "need editor, key, or booktitle for " cite$ * " to crossref " *
crossref * warning$
""
}
{ "In {\em " booktitle * "\/}" * }
if$
}
{ "In " key * }
if$
}
{ "In " format.crossref.editor * }
if$
" \shortcite{" * crossref * "}" *
}
FUNCTION {article}
{ output.bibitem
format.authors "author" output.check
new.block
format.title "title" output.check
new.block
crossref missing$
{ journal emphasize "journal" output.check
format.vol.num.pages output
format.date "year" output.check
}
{ format.article.crossref output.nonnull
format.pages output
}
if$
new.block
note output
fin.entry
}
FUNCTION {book}
{ output.bibitem
author empty$
{ format.editors "author and editor" output.check }
{ format.authors output.nonnull
crossref missing$
{ "author and editor" editor either.or.check }
'skip$
if$
}
if$
new.block
format.btitle "title" output.check
crossref missing$
{ format.bvolume output
new.block
format.number.series output
new.sentence
publisher "publisher" output.check
address output
}
{ new.block
format.book.crossref output.nonnull
}
if$
format.edition output
format.date "year" output.check
new.block
note output
fin.entry
}
FUNCTION {booklet}
{ output.bibitem
format.authors output
new.block
format.title "title" output.check
howpublished address new.block.checkb
howpublished output
address output
format.date output
new.block
note output
fin.entry
}
FUNCTION {inbook}
{ output.bibitem
author empty$
{ format.editors "author and editor" output.check }
{ format.authors output.nonnull
crossref missing$
{ "author and editor" editor either.or.check }
'skip$
if$
}
if$
new.block
format.btitle "title" output.check
crossref missing$
{ format.bvolume output
format.chapter.pages "chapter and pages" output.check
new.block
format.number.series output
new.sentence
publisher "publisher" output.check
address output
}
{ format.chapter.pages "chapter and pages" output.check
new.block
format.book.crossref output.nonnull
}
if$
format.edition output
format.date "year" output.check
new.block
note output
fin.entry
}
FUNCTION {incollection}
{ output.bibitem
format.authors "author" output.check
new.block
format.title "title" output.check
new.block
crossref missing$
{ format.in.ed.booktitle "booktitle" output.check
format.bvolume output
format.number.series output
format.chapter.pages output
new.sentence
publisher "publisher" output.check
address output
format.edition output
format.date "year" output.check
}
{ format.incoll.inproc.crossref output.nonnull
format.chapter.pages output
}
if$
new.block
note output
fin.entry
}
FUNCTION {inproceedings}
{ output.bibitem
format.authors "author" output.check
new.block
format.title "title" output.check
new.block
crossref missing$
{ format.in.ed.booktitle "booktitle" output.check
format.bvolume output
format.number.series output
format.pages output
address empty$
{ organization publisher new.sentence.checkb
organization output
publisher output
format.date "year" output.check
}
{ address output.nonnull
format.date "year" output.check
new.sentence
organization output
publisher output
}
if$
}
{ format.incoll.inproc.crossref output.nonnull
format.pages output
}
if$
new.block
note output
fin.entry
}
FUNCTION {conference} { inproceedings }
FUNCTION {manual}
{ output.bibitem
author empty$
{ organization empty$
'skip$
{ organization output.nonnull
address output
}
if$
}
{ format.authors output.nonnull }
if$
new.block
format.btitle "title" output.check
author empty$
{ organization empty$
{ address new.block.checka
address output
}
'skip$
if$
}
{ organization address new.block.checkb
organization output
address output
}
if$
format.edition output
format.date output
new.block
note output
fin.entry
}
FUNCTION {mastersthesis}
{ output.bibitem
format.authors "author" output.check
new.block
format.title "title" output.check
new.block
"Master's thesis" format.thesis.type output.nonnull
school "school" output.check
address output
format.date "year" output.check
new.block
note output
fin.entry
}
FUNCTION {misc}
{ output.bibitem
format.authors output
title howpublished new.block.checkb
format.title output
howpublished new.block.checka
howpublished output
format.date output
new.block
note output
fin.entry
empty.misc.check
}
FUNCTION {phdthesis}
{ output.bibitem
format.authors "author" output.check
new.block
format.btitle "title" output.check
new.block
"PhD thesis" format.thesis.type output.nonnull
school "school" output.check
address output
format.date "year" output.check
new.block
note output
fin.entry
}
FUNCTION {proceedings}
{ output.bibitem
editor empty$
{ organization output }
{ format.editors output.nonnull }
if$
new.block
format.btitle "title" output.check
format.bvolume output
format.number.series output
address empty$
{ editor empty$
{ publisher new.sentence.checka }
{ organization publisher new.sentence.checkb
organization output
}
if$
publisher output
format.date "year" output.check
}
{ address output.nonnull
format.date "year" output.check
new.sentence
editor empty$
'skip$
{ organization output }
if$
publisher output
}
if$
new.block
note output
fin.entry
}
FUNCTION {techreport}
{ output.bibitem
format.authors "author" output.check
new.block
format.title "title" output.check
new.block
format.tr.number output.nonnull
institution "institution" output.check
address output
format.date "year" output.check
new.block
note output
fin.entry
}
FUNCTION {unpublished}
{ output.bibitem
format.authors "author" output.check
new.block
format.title "title" output.check
new.block
note "note" output.check
format.date output
fin.entry
}
FUNCTION {default.type} { misc }
MACRO {jan} {"January"}
MACRO {feb} {"February"}
MACRO {mar} {"March"}
MACRO {apr} {"April"}
MACRO {may} {"May"}
MACRO {jun} {"June"}
MACRO {jul} {"July"}
MACRO {aug} {"August"}
MACRO {sep} {"September"}
MACRO {oct} {"October"}
MACRO {nov} {"November"}
MACRO {dec} {"December"}
MACRO {acmcs} {"ACM Computing Surveys"}
MACRO {acta} {"Acta Informatica"}
MACRO {cacm} {"Communications of the ACM"}
MACRO {ibmjrd} {"IBM Journal of Research and Development"}
MACRO {ibmsj} {"IBM Systems Journal"}
MACRO {ieeese} {"IEEE Transactions on Software Engineering"}
MACRO {ieeetc} {"IEEE Transactions on Computers"}
MACRO {ieeetcad}
{"IEEE Transactions on Computer-Aided Design of Integrated Circuits"}
MACRO {ipl} {"Information Processing Letters"}
MACRO {jacm} {"Journal of the ACM"}
MACRO {jcss} {"Journal of Computer and System Sciences"}
MACRO {scp} {"Science of Computer Programming"}
MACRO {sicomp} {"SIAM Journal on Computing"}
MACRO {tocs} {"ACM Transactions on Computer Systems"}
MACRO {tods} {"ACM Transactions on Database Systems"}
MACRO {tog} {"ACM Transactions on Graphics"}
MACRO {toms} {"ACM Transactions on Mathematical Software"}
MACRO {toois} {"ACM Transactions on Office Information Systems"}
MACRO {toplas} {"ACM Transactions on Programming Languages and Systems"}
MACRO {tcs} {"Theoretical Computer Science"}
READ
FUNCTION {sortify}
{ purify$
"l" change.case$
}
INTEGERS { len }
FUNCTION {chop.word}
{ 's :=
'len :=
s #1 len substring$ =
{ s len #1 + global.max$ substring$ }
's
if$
}
INTEGERS { et.al.char.used }
FUNCTION {initialize.et.al.char.used}
{ #0 'et.al.char.used :=
}
EXECUTE {initialize.et.al.char.used}
FUNCTION {format.lab.names}
{ 's :=
s num.names$ 'numnames :=
numnames #1 =
{ s #1 "{vv }{ll}" format.name$ }
{ numnames #2 =
{ s #1 "{vv }{ll }and " format.name$ s #2 "{vv }{ll}" format.name$ *
}
{ s #1 "{vv }{ll }\bgroup \em et al.\egroup " format.name$ }
if$
}
if$
}
FUNCTION {author.key.label}
{ author empty$
{ key empty$
{ cite$ #1 #3 substring$ }
{ key }
if$
}
{ author format.lab.names }
if$
}
FUNCTION {author.editor.key.label}
{ author empty$
{ editor empty$
{ key empty$
{ cite$ #1 #3 substring$ }
{ key }
if$
}
{ editor format.lab.names }
if$
}
{ author format.lab.names }
if$
}
FUNCTION {author.key.organization.label}
{ author empty$
{ key empty$
{ organization empty$
{ cite$ #1 #3 substring$ }
{ "The " #4 organization chop.word #3 text.prefix$ }
if$
}
{ key }
if$
}
{ author format.lab.names }
if$
}
FUNCTION {editor.key.organization.label}
{ editor empty$
{ key empty$
{ organization empty$
{ cite$ #1 #3 substring$ }
{ "The " #4 organization chop.word #3 text.prefix$ }
if$
}
{ key }
if$
}
{ editor format.lab.names }
if$
}
FUNCTION {calc.label}
{ type$ "book" =
type$ "inbook" =
or
'author.editor.key.label
{ type$ "proceedings" =
'editor.key.organization.label
{ type$ "manual" =
'author.key.organization.label
'author.key.label
if$
}
if$
}
if$
duplicate$
"\protect\citeauthoryear{" swap$ * "}{" *
year field.or.null purify$ * % CHANGED - pfps - 15 Feb 1989
'label :=
year field.or.null purify$ *
sortify 'sort.label :=
}
FUNCTION {sort.format.names}
{ 's :=
#1 'nameptr :=
""
s num.names$ 'numnames :=
numnames 'namesleft :=
{ namesleft #0 > }
{ nameptr #1 >
{ " " * }
'skip$
if$
s nameptr "{vv{ } }{ll{ }}{ ff{ }}{ jj{ }}" format.name$ 't :=
nameptr numnames = t "others" = and
{ "et al" * }
{ t sortify * }
if$
nameptr #1 + 'nameptr :=
namesleft #1 - 'namesleft :=
}
while$
}
FUNCTION {sort.format.title}
{ 't :=
"A " #2
"An " #3
"The " #4 t chop.word
chop.word
chop.word
sortify
#1 global.max$ substring$
}
FUNCTION {author.sort}
{ author empty$
{ key empty$
{ "to sort, need author or key in " cite$ * warning$
""
}
{ key sortify }
if$
}
{ author sort.format.names }
if$
}
FUNCTION {author.editor.sort}
{ author empty$
{ editor empty$
{ key empty$
{ "to sort, need author, editor, or key in " cite$ * warning$
""
}
{ key sortify }
if$
}
{ editor sort.format.names }
if$
}
{ author sort.format.names }
if$
}
FUNCTION {author.organization.sort}
{ author empty$
{ organization empty$
{ key empty$
{ "to sort, need author, organization, or key in " cite$ * warning$
""
}
{ key sortify }
if$
}
{ "The " #4 organization chop.word sortify }
if$
}
{ author sort.format.names }
if$
}
FUNCTION {editor.organization.sort}
{ editor empty$
{ organization empty$
{ key empty$
{ "to sort, need editor, organization, or key in " cite$ * warning$
""
}
{ key sortify }
if$
}
{ "The " #4 organization chop.word sortify }
if$
}
{ editor sort.format.names }
if$
}
FUNCTION {presort}
{ calc.label
sort.label
" "
*
type$ "book" =
type$ "inbook" =
or
'author.editor.sort
{ type$ "proceedings" =
'editor.organization.sort
{ type$ "manual" =
'author.organization.sort
'author.sort
if$
}
if$
}
if$
*
" "
*
year field.or.null sortify
*
" "
*
title field.or.null
sort.format.title
*
#1 entry.max$ substring$
'sort.key$ :=
}
ITERATE {presort}
SORT
STRINGS { longest.label last.sort.label next.extra }
INTEGERS { longest.label.width last.extra.num }
FUNCTION {initialize.longest.label}
{ "" 'longest.label :=
#0 int.to.chr$ 'last.sort.label :=
"" 'next.extra :=
#0 'longest.label.width :=
#0 'last.extra.num :=
}
FUNCTION {forward.pass}
{ last.sort.label sort.label =
{ last.extra.num #1 + 'last.extra.num :=
last.extra.num int.to.chr$ 'extra.label :=
}
{ "a" chr.to.int$ 'last.extra.num :=
"" 'extra.label :=
sort.label 'last.sort.label :=
}
if$
}
FUNCTION {reverse.pass}
{ next.extra "b" =
{ "a" 'extra.label := }
'skip$
if$
label extra.label * "}" * 'label := % CHANGED - pfps 15 Feb 1989
label width$ longest.label.width >
{ label 'longest.label :=
label width$ 'longest.label.width :=
}
'skip$
if$
extra.label 'next.extra :=
}
EXECUTE {initialize.longest.label}
ITERATE {forward.pass}
REVERSE {reverse.pass}
FUNCTION {begin.bib}
{ et.al.char.used
{ "\newcommand{\etalchar}[1]{$^{#1}$}" write$ newline$ }
'skip$
if$
preamble$ empty$
'skip$
{ preamble$ write$ newline$ }
if$
"\begin{thebibliography}{}" write$ newline$
}
EXECUTE {begin.bib}
EXECUTE {init.state.consts}
ITERATE {call.type$}
FUNCTION {end.bib}
{ newline$
"\end{thebibliography}" write$ newline$
}
EXECUTE {end.bib}
\typeout{Conference Style, version of November 2018}
% All bug reports should be directed to proceedings@ijcai.org
% The following comments are from the original ijcai97.sty
% The current two-column conference style.
% Heavily adapted from the IJCAI-89 original style.
% Fixes from various people incorporated up to the IJCAI-95 style.
% Some major changes for the IJCAI-2018 edition
% To use, place in a file called conference.sty, or whatever your conference
% is called, in the TeX search path. (Placing it in the same directory as
% the paper should also work.)
% Prepared by Peter F. Patel-Schneider,
% liberally using the ideas of
% other style hackers, including Barbara Beeton.
% This style is NOT guaranteed to work. It is provided in the hope
% that it will make the preparation of papers easier.
%
% The preparation of this file was supported by Schlumberger Palo Alto
% Research, AT\&T Bell Laboratories, AAAI, and Morgan Kaufmann Publishers.
%
% \pubnote added by J. Scott Penberthy
% These instructions can be modified and used in other conferences as long
% as credit to the authors and supporting agencies is retained, this notice
% is not changed, and further modification or reuse is not restricted.
%
% If you are organizing a conference, and want to use this file, you should
% appoint a contact person to handle any problems!
%
% If you are using this file for the preparation of papers for a
% conference that supplied you with this file, you should contact the
% organizers of the conference if you have any problems. They should have
% much more information than I have.
% There are undoubtably bugs in this style. If you make bug fixes,
% improvements, etc. please let us know at proceedings@ijcai.org.
% NOTE: Some laser printers have a serious problem printing TeX output.
% These printing devices, commonly known as ``write-white'' laser
% printers, tend to make characters too light. To get around this
% problem, a darker set of fonts must be created for these devices.
% Physical page layout
\twocolumn \flushbottom \sloppy
% Note that TeX has built-in 1-inch top and left margins.
\setlength\topmargin{-0.25in}
\setlength\oddsidemargin{-0.25in}
\setlength\evensidemargin{-0.25in}
\setlength\textheight{9.0in}
\setlength\textwidth{7.0in}
\setlength\columnsep{0.25in}
% No pages numbers or other headers or footers
\setlength\headheight{0pt} \setlength\headsep{0pt}
%\setlength\footheight{0pt} \setlength\footskip{0pt}
\thispagestyle{empty} \pagestyle{empty}
% jsp added:
\def\pubnote#1{\thispagestyle{myheadings}
\markboth{#1}{#1}
\def\thepage{}
}
% Less leading in most fonts (due to the narrow columns)
% The choices were between 1-pt and 1.5-pt leading
% \def\@normalsize{\@setsize\normalsize{11pt}\xpt\@xpt} % 10 point on 11
% \def\small{\@setsize\small{10pt}\ixpt\@ixpt} % 9 point on 10
% \def\footnotesize{\@setsize\footnotesize{10pt}\ixpt\@ixpt} % 9 point on 10
% \def\scriptsize{\@setsize\scriptsize{8pt}\viipt\@viipt} % 7 point on 8
% \def\tiny{\@setsize\tiny{7pt}\vipt\@vipt} % 6 point on 7
% \def\large{\@setsize\large{12pt}\xipt\@xipt} % 11 point on 12
% \def\Large{\@setsize\Large{14pt}\xiipt\@xiipt} % 12 point on 14
% \def\LARGE{\@setsize\LARGE{16pt}\xivpt\@xivpt} % 14 point on 16
% \def\huge{\@setsize\huge{20pt}\xviipt\@xviipt} % 17 point on 20
% \def\Huge{\@setsize\Huge{23pt}\xxpt\@xxpt} % 20 point on 23
% latex2e compatibility mode hack - kek@cs.brown.edu 11/10/98
\def\@normalsize{\@setsize\normalsize{11pt}\xpt\@xpt} % 10 point on 11
\def\normalsize{\@setsize\normalsize{11pt}\xpt\@xpt} % 10 point on 11
\def\small{\@setsize\small{10pt}\ixpt\@ixpt} % 9 point on 10
\def\footnotesize{\@setsize\footnotesize{10pt}\ixpt\@ixpt} % 9 point on 10
\def\scriptsize{\@setsize\scriptsize{8pt}\viipt\@viipt} % 7 point on 8
\def\tiny{\@setsize\tiny{7pt}\vipt\@vipt} % 6 point on 7
\def\large{\@setsize\large{12pt}\xipt\@xipt} % 11 point on 12
\def\Large{\@setsize\Large{14pt}\xiipt\@xiipt} % 12 point on 14
\def\LARGE{\@setsize\LARGE{16pt}\xivpt\@xivpt} % 14 point on 16
\def\huge{\@setsize\huge{20pt}\xviipt\@xviipt} % 17 point on 20
\def\Huge{\@setsize\Huge{23pt}\xxpt\@xxpt} % 20 point on 23
% Paragraphs
\parindent 1em
\parskip 0pt plus 1pt
% Title stuff, taken from deproc.
\newlength\titlepad \setlength\titlepad{0in}
\newlength\titlebox \setlength\titlebox{2.25in}
\def\maketitle{\par
\begingroup % to make the footnote style local to the title
\def\thefootnote{\fnsymbol{footnote}}
\def\@makefnmark{$^{\@thefnmark}$}
\twocolumn[\@maketitle] \@thanks
\endgroup
\setcounter{footnote}{0}
\let\maketitle\relax \let\@maketitle\relax
\gdef\@thanks{}\gdef\@author{}\gdef\@title{}\let\thanks\relax}%
%
\def\@maketitle{%
\newsavebox{\titlearea}
\sbox{\titlearea}{
\let\footnote\thanks\relax
\vbox{
\hsize\textwidth \linewidth\hsize%
\vskip 0.5in%
\centering%
{\LARGE\bf \@title \par}%
\vskip 0.1in%
{%
\def\and{\unskip\thinspace{\rm ,}\enspace}%
\def\And{\unskip\enspace{\rm and}\enspace}%
\def\affiliations{%
\egroup\par\Large\bgroup\rm%
}%
\def\emails{%
\egroup\par\Large\bgroup\rm%
}%
\bgroup\Large\bf\@author\egroup%%
}%
\vskip 0.2in%
}
}
\newlength\actualheight
\settoheight{\actualheight}{\usebox{\titlearea}}
\ifdim\actualheight>\titlebox
\setlength{\titlebox}{\actualheight}
\fi
%\setlength{\titlepad}{\dimexpr\titlepad+\titlepad\relax}
\setcounter{footnote}{0}
\vbox to \titlebox {
\def\thanks##1{\footnotemark}\relax
\hsize\textwidth \linewidth\hsize%
\vskip 0.5in%
\centering%
{\LARGE\bf \@title \par}%
\vskip 0.2in plus 4fil minus 0.1in%
{%
\def\and{\unskip\thinspace{\rm ,}\enspace}%
\def\And{\unskip\enspace{\rm and}\enspace}%
\def\affiliations{
\egroup%
\vskip 0.05in minus 0.05in%
\par\bgroup\Large\rm%
}
\def\emails{
\egroup%
\vskip 0.05in minus 0.05in%
\par\bgroup\Large\rm%
}
\bgroup\Large\bf\@author\egroup%
}%
\vskip 0.3in plus 8fil minus 0.1in
}
}
\renewenvironment{abstract}{\centerline{\Large\bf
Abstract}\vspace{0.5ex}\begin{quote}}{\par\end{quote}\vskip 1ex}
% Sections with less space
\def\section{\@startsection{section}{1}{\z@}{-10pt plus
-3pt minus -2pt}{4pt plus 2pt minus 1pt}{\Large\bf\raggedright}}
\def\subsection{\@startsection{subsection}{2}{\z@}{-8pt plus
-2pt minus -2pt}{3pt plus 2pt minus 1pt}{\large\bf\raggedright}}
\def\subsubsection{\@startsection{subsubsection}{3}{\z@}{-6pt plus
-2pt minus -1pt}{1pt plus 1pt minus 1pt}{\normalsize\bf\raggedright}}
\renewcommand\paragraph{\@startsection{paragraph}{4}{\z@}{-4pt plus
-2pt minus -1pt}{-1em}{\normalsize\bf}}
\setcounter{secnumdepth}{2} % Don't number subsubsections
% Footnotes
\footnotesep 6.65pt \skip\footins 9pt plus 4pt minus 2pt
\def\footnoterule{\kern-3pt \hrule width 5pc \kern 2.6pt }
\setcounter{footnote}{0}
% Illustrations (floats)
\floatsep 12pt plus 2pt minus 2pt
\textfloatsep 16pt plus 2pt minus 4pt
\intextsep 12pt plus 2pt minus 2pt
\dblfloatsep 12pt plus 2pt minus 2pt
\dbltextfloatsep 18pt plus 2pt minus 4pt
% Displays
\abovedisplayskip 7pt plus2pt minus5pt%
\belowdisplayskip \abovedisplayskip
\abovedisplayshortskip 0pt plus3pt%
\belowdisplayshortskip 4pt plus3pt minus3pt%
% Lists
\leftmargini 2em
\leftmarginii 2em
\leftmarginiii 1em
\leftmarginiv 0.5em
\leftmarginv 0.5em
\leftmarginvi 0.5em
\leftmargin\leftmargini
\labelsep 5pt
\labelwidth\leftmargini\advance\labelwidth-\labelsep
\def\@listI{\leftmargin\leftmargini
\parsep 2pt plus 1pt minus 0.5pt%
\topsep 4pt plus 1pt minus 2pt%
\itemsep 2pt plus 1pt minus 0.5pt%
\partopsep 1pt plus 0.5pt minus 0.5pt}
\let\@listi\@listI
\@listi
\def\@listii{\leftmargin\leftmarginii
\labelwidth\leftmarginii\advance\labelwidth-\labelsep
\parsep 1pt plus 0.5pt minus 0.5pt
\topsep 2pt plus 1pt minus 0.5pt
\itemsep \parsep}
\def\@listiii{\leftmargin\leftmarginiii
\labelwidth\leftmarginiii\advance\labelwidth-\labelsep
\parsep 0pt plus 1pt
\partopsep 0.5pt plus 0pt minus 0.5pt
\topsep 1pt plus 0.5pt minus 0.5pt
\itemsep \topsep}
\def\@listiv{\leftmargin\leftmarginiv
\labelwidth\leftmarginiv\advance\labelwidth-\labelsep}
\def\@listv{\leftmargin\leftmarginv
\labelwidth\leftmarginv\advance\labelwidth-\labelsep}
\def\@listvi{\leftmargin\leftmarginvi
\labelwidth\leftmarginvi\advance\labelwidth-\labelsep}
% We're never going to need a table of contents, so just flush it to
% save space --- suggested by drstrip@sandia-2
%\def\addcontentsline#1#2#3{}
%%%% named.sty
\typeout{Named Citation Style, version of 30 November 1994}
% This file implements citations for the ``named'' bibliography style.
% Place it in a file called named.sty in the TeX search path. (Placing it
% in the same directory as the LaTeX document should also work.)
% Prepared by Peter F. Patel-Schneider, with the assistance of several,
% since forgotten, LaTeX hackers.
% This style is NOT guaranteed to work. It is provided in the hope
% that it will make the preparation of papers easier.
%
% There are undoubtably bugs in this style. If you make bug fixes,
% improvements, etc. please let me know. My e-mail address is:
% pfps@research.att.com
% The preparation of this file was supported by Schlumberger Palo Alto
% Research and AT\&T Bell Laboratories.
% This file can be modified and used in other conferences as long
% as credit to the authors and supporting agencies is retained, this notice
% is not changed, and further modification or reuse is not restricted.
% The ``named'' bibliography style creates citations with labels like
% \citeauthoryear{author-info}{year}
% these labels are processed by the following commands:
% \cite{keylist}
% which produces citations with both author and year,
% enclosed in square brackets
% \shortcite{keylist}
% which produces citations with year only,
% enclosed in square brackets
% \citeauthor{key}
% which produces the author information only
% \citeyear{key}
% which produces the year information only
\def\leftcite{\@up[}\def\rightcite{\@up]}
\def\cite{\def\citeauthoryear##1##2{\def\@thisauthor{##1}%
\ifx \@lastauthor \@thisauthor \relax \else##1, \fi ##2}\@icite}
\def\shortcite{\def\citeauthoryear##1##2{##2}\@icite}
\def\citeauthor{\def\citeauthoryear##1##2{##1}\@nbcite}
\def\citeyear{\def\citeauthoryear##1##2{##2}\@nbcite}
% internal macro for citations with [] and with breaks between citations
% used in \cite and \shortcite
\def\@icite{\leavevmode\def\@citeseppen{-1000}%
\def\@cite##1##2{\leftcite\nobreak\hskip 0in{##1\if@tempswa , ##2\fi}\rightcite}%
\@ifnextchar [{\@tempswatrue\@citex}{\@tempswafalse\@citex[]}}
% internal macro for citations without [] and with no breaks
% used in \citeauthor and \citeyear
\def\@nbcite{\leavevmode\def\@citeseppen{1000}%
\def\@cite##1##2{{##1\if@tempswa , ##2\fi}}%
\@ifnextchar [{\@tempswatrue\@citex}{\@tempswafalse\@citex[]}}
% don't box citations, separate with ; and a space
% also, make the penalty between citations a parameter,
% it may be a good place to break
\def\@citex[#1]#2{%
\def\@lastauthor{}\def\@citea{}%
\@cite{\@for\@citeb:=#2\do
{\@citea\def\@citea{;\penalty\@citeseppen\ }%
\if@filesw\immediate\write\@auxout{\string\citation{\@citeb}}\fi
\@ifundefined{b@\@citeb}{\def\@thisauthor{}{\bf ?}\@warning
{Citation `\@citeb' on page \thepage \space undefined}}%
{\csname b@\@citeb\endcsname}\let\@lastauthor\@thisauthor}}{#1}}
% raise the brackets in bibliography labels
\def\@biblabel#1{\def\citeauthoryear##1##2{##1, ##2}\@up{[}#1\@up{]}\hfill}
\def\@up#1{\leavevmode\raise.2ex\hbox{#1}}
% Optional changes
%%%% use parentheses in the reference list and citations
%\def\leftcite{(}\def\rightcite{)}
%\def\@biblabel#1{\def\citeauthoryear##1##2{##1, ##2}(#1)\hfill}
%%%% no key in the reference list
%\def\@lbibitem[#1]#2{\item\if@filesw
% { \def\protect##1{\string ##1\space}\immediate
% \write\@auxout{\string\bibcite{#2}{#1}}}\fi\ignorespaces}
%\def\thebibliography#1{\section*{References\@mkboth
% {REFERENCES}{REFERENCES}}\list
% {}{\labelwidth 0pt\leftmargin\labelwidth \itemsep 0.5ex}
% \def\newblock{\hskip .11em plus .33em minus .07em}
% \sloppy\clubpenalty4000\widowpenalty4000
% \sfcode`\.=1000\relax}
\ No newline at end of file
%%%% ijcai21.tex
\typeout{IJCAI--21 Instructions for Authors}
% These are the instructions for authors for IJCAI-21.
\documentclass{article}
\pdfpagewidth=8.5in
\pdfpageheight=11in
% The file ijcai21.sty is NOT the same than previous years'
\usepackage{ijcai21}
% Use the postscript times font!
\usepackage{times}
\usepackage{soul}
\usepackage{url}
\usepackage[hidelinks]{hyperref}
\usepackage[utf8]{inputenc}
\usepackage[small]{caption}
\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{booktabs}
\usepackage{algorithm}
\usepackage{algorithmic}
\urlstyle{same}
% the following package is optional:
%\usepackage{latexsym}
% See https://www.overleaf.com/learn/latex/theorems_and_proofs
% for a nice explanation of how to define new theorems, but keep
% in mind that the amsthm package is already included in this
% template and that you must *not* alter the styling.
\newtheorem{example}{Example}
\newtheorem{theorem}{Theorem}
% Following comment is from ijcai97-submit.tex:
% The preparation of these files was supported by Schlumberger Palo Alto
% Research, AT\&T Bell Laboratories, and Morgan Kaufmann Publishers.
% Shirley Jowell, of Morgan Kaufmann Publishers, and Peter F.
% Patel-Schneider, of AT\&T Bell Laboratories collaborated on their
% preparation.
% These instructions can be modified and used in other conferences as long
% as credit to the authors and supporting agencies is retained, this notice
% is not changed, and further modification or reuse is not restricted.
% Neither Shirley Jowell nor Peter F. Patel-Schneider can be listed as
% contacts for providing assistance without their prior permission.
% To use for other conferences, change references to files and the
% conference appropriate and use other authors, contacts, publishers, and
% organizations.
% Also change the deadline and address for returning papers and the length and
% page charge instructions.
% Put where the files are available in the appropriate places.
%PDF Info Is REQUIRED.
\pdfinfo{
/TemplateVersion (IJCAI.2021.0)
}
\title{Enabling the Emergence of Symbolic Language without Handcrafted Inductions}
% Single author syntax
\author{
% Zhi-Hua Zhou
% \affiliations
% Nanjing University
% \emails
% pcchair@ijcai-21.org
}
% Multiple author syntax (remove the single-author syntax above and the \iffalse ... \fi here)
% Check the ijcai21-multiauthor.tex file for detailed instructions
\iffalse
\author{
First Author$^1$
\and
Second Author$^2$\and
Third Author$^{2,3}$\And
Fourth Author$^4$
\affiliations
$^1$First Affiliation\\
$^2$Second Affiliation\\
$^3$Third Affiliation\\
$^4$Fourth Affiliation
\emails
\{first, second\}@example.com,
third@other.example.com,
fourth@example.com
}
\fi
\begin{document}
\maketitle
\begin{abstract}
The emergence of symbolic languages with high compositionality has
attracted extensive attention from a broad range of communities. Existing
studies achieve high compositionality through \emph{deliberately handcrafted}
inductions (e.g., additional rewards, constructed
loss functions and structural input data) in multi-agent learning, which are unnatural.
Yet, few studies investigate the emergence of symbolic language with high
compositionality \emph{naturally}, i.e., without deliberately handcrafted
inductions.
In this paper, \note{we are the first to successfully achieve high compositional
symbolic language} in a \emph{natural} manner without handcrafted inductions.
Initially, by investigating the emergent
language after removing the \emph{deliberately handcrafted}
inductions, we observe the difficulty in naturally generating high compositional
language.
%the agent capacity plays a key role in compositionality.
Further, we reveal and characterize the \note{quantitative relationship}
between the agent capacity and the compositionality of emergent language, with
a novel mutual information-based metric for more reasonable measuring the compositionality.
The experimental results lead to a counter-intuitive conclusion that lower agent
capacity facilitates the emergence of language with higher
compositionality. \note{Based on our conclusion, we can get a more
compositional language with a higher probability.}
\end{abstract}
\input{tex/introduction.tex}
\input{tex/relatedwork.tex}
\input{tex/theory.tex}
\input{tex/theory2.tex}
\input{tex/experiments.tex}
\input{tex/last.tex}
%\clearpage
%\newpage
\bibliographystyle{ijcai21}
\bibliography{ref.bib}
\end{document}
@inproceedings{DBLP:conf/iclr/WuLCS18,
author = {Shuang Wu and
Guoqi Li and
Feng Chen and
Luping Shi},
title = {Training and Inference with Integers in Deep Neural Networks},
booktitle = {6th International Conference on Learning Representations, {ICLR} 2018,
Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings},
year = {2018},
url = {https://openreview.net/forum?id=HJGXzmspb},
timestamp = {Thu, 04 Apr 2019 13:20:09 +0200},
biburl = {https://dblp.org/rec/bib/conf/iclr/WuLCS18},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%Related Work%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@inproceedings{kottur-etal-2017-natural,
title = "Natural Language Does Not Emerge {`}Naturally{'} in Multi-Agent Dialog",
author = "Kottur, Satwik and
Moura, Jos{\'e} and
Lee, Stefan and
Batra, Dhruv",
booktitle = "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/D17-1321",
doi = "10.18653/v1/D17-1321",
pages = "2962--2967",
abstract = "A number of recent works have proposed techniques for end-to-end learning of communication protocols among cooperative multi-agent populations, and have simultaneously found the emergence of grounded human-interpretable language in the protocols developed by the agents, learned without any human supervision! In this paper, using a Task {\&} Talk reference game between two agents as a testbed, we present a sequence of {`}negative{'} results culminating in a {`}positive{'} one {--} showing that while most agent-invented languages are effective (i.e. achieve near-perfect task rewards), they are decidedly not interpretable or compositional. In essence, we find that natural language does not emerge {`}naturally{'},despite the semblance of ease of natural-language-emergence that one may gather from recent literature. We discuss how it is possible to coax the invented languages to become more and more human-like and compositional by increasing restrictions on how two agents may communicate.",
}
@inproceedings{chaabouni-etal-2019-word,
title = "Word-order Biases in Deep-agent Emergent Communication",
author = "Chaabouni, Rahma and
Kharitonov, Eugene and
Lazaric, Alessandro and
Dupoux, Emmanuel and
Baroni, Marco",
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P19-1509",
doi = "10.18653/v1/P19-1509",
pages = "5166--5175",
abstract = "Sequence-processing neural networks led to remarkable progress on many NLP tasks. As a consequence, there has been increasing interest in understanding to what extent they process language as humans do. We aim here to uncover which biases such models display with respect to {``}natural{''} word-order constraints. We train models to communicate about paths in a simple gridworld, using miniature languages that reflect or violate various natural language trends, such as the tendency to avoid redundancy or to minimize long-distance dependencies. We study how the controlled characteristics of our miniature languages affect individual learning and their stability across multiple network generations. The results draw a mixed picture. On the one hand, neural networks show a strong tendency to avoid long-distance dependencies. On the other hand, there is no clear preference for the efficient, non-redundant encoding of information that is widely attested in natural language. We thus suggest inoculating a notion of {``}effort{''} into neural networks, as a possible way to make their linguistic behavior more human-like.",
}
@article{kirby2015compression,
title={Compression and communication in the cultural evolution of linguistic structure},
author={Kirby, Simon and Tamariz, Monica and Cornish, Hannah and Smith, Kenny},
journal={Cognition},
volume={141},
pages={87--102},
year={2015},
publisher={Elsevier}
}
@inproceedings{lazaridou2018emergence,
title={Emergence of Linguistic Communication from Referential Games with Symbolic and Pixel Input},
author={Lazaridou, Angeliki and Hermann, Karl Moritz and Tuyls, Karl and Clark, Stephen},
booktitle={International Conference on Learning Representations},
year={2018}
}
@inproceedings{li2019ease,
title={Ease-of-teaching and language structure from emergent communication},
author={Li, Fushan and Bowling, Michael},
booktitle={Advances in Neural Information Processing Systems},
pages={15851--15861},
year={2019}
}
@inproceedings{evtimova2018emergent,
title={Emergent Communication in a Multi-Modal, Multi-Step Referential Game},
author={Evtimova, Katrina and Drozdov, Andrew and Kiela, Douwe and Cho, Kyunghyun},
booktitle={International Conference on Learning Representations},
year={2018}
}
@inproceedings{choi2018compositional,
title={Compositional Obverter Communication Learning from Raw Visual Input},
author={Choi, Edward and Lazaridou, Angeliki and de Freitas, Nando},
booktitle={International Conference on Learning Representations},
year={2018}
}
@article{chaabouni2020compositionality,
title={Compositionality and generalization in emergent languages},
author={Chaabouni, Rahma and Kharitonov, Eugene and Bouchacourt, Diane and Dupoux, Emmanuel and Baroni, Marco},
journal={arXiv preprint arXiv:2004.09124},
year={2020}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@article{bogin2018emergence,
title={Emergence of Communication in an Interactive World with Consistent Speakers},
author={Bogin, Ben and Geva, Mor and Berant, Jonathan},
journal={arXiv},
pages={arXiv--1809},
year={2018}
}
@inproceedings{jaques2019social,
title={Social influence as intrinsic motivation for multi-agent deep reinforcement learning},
author={Jaques, Natasha and Lazaridou, Angeliki and Hughes, Edward and Gulcehre, Caglar and Ortega, Pedro and Strouse, DJ and Leibo, Joel Z and De Freitas, Nando},
booktitle={International Conference on Machine Learning},
pages={3040--3049},
year={2019},
organization={PMLR}
}
@article{mul2019mastering,
title={Mastering emergent language: learning to guide in simulated navigation},
author={Mul, Mathijs and Bouchacourt, Diane and Bruni, Elia},
journal={arXiv preprint arXiv:1908.05135},
year={2019}
}
@inproceedings{kharitonov2019egg,
title={EGG: a toolkit for research on Emergence of lanGuage in Games},
author={Kharitonov, Eugene and Chaabouni, Rahma and Bouchacourt, Diane and Baroni, Marco},
booktitle={Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP): System Demonstrations},
pages={55--60},
year={2019}
}
@article{labash2020perspective,
title={Perspective taking in deep reinforcement learning agents},
author={Labash, Aqeel and Aru, Jaan and Matiisen, Tambet and Tampuu, Ardi and Vicente, Raul},
journal={Frontiers in Computational Neuroscience},
volume={14},
year={2020},
publisher={Frontiers Media SA}
}
@inproceedings{andreas2018measuring,
title={Measuring Compositionality in Representation Learning},
author={Andreas, Jacob},
booktitle={International Conference on Learning Representations},
year={2018}
}
@book{partee2008compositionality,
title={Compositionality in formal semantics: Selected papers},
author={Partee, Barbara H},
year={2008},
publisher={John Wiley \& Sons}
}
@article{mordatch2017emergence,
title={Emergence of grounded compositional language in multi-agent populations},
author={Mordatch, Igor and Abbeel, Pieter},
journal={arXiv preprint arXiv:1703.04908},
year={2017}
}
@misc{david1969convention,
title={Convention: a philosophical study},
author={David, Lewis},
year={1969},
publisher={Cambridge, Harvard university press}
}
\ No newline at end of file
In this section, a referential game platform and a speaker-listener model are introduced. Referential game is commonly used in the emergent language study, such as [][]. In this game, the speaker needs communicate with the listener to complete a task cooperatively. The game setup for the referential game is firstly described. Then, how to construct the speaker-listener with the neural networks is introduced. Lastly, the training algorithm and the evaluation
methods are discussed.
#subsection1: Set up
In the referential game, the agents should obey the following rules:
a)The speaker agent S uses the input object t to output the corresponding symbol sequence s;
b)The listener agent L uses the symbol sequence s to output the predict result $\hat{t}$;
c)If $t=\hat{t}$, this game is successful, and each agent receives reward $R(t,\hat{t}=1$; otherwise, the game is failed, and the reward is set as $R(t,\hat{t}=-1$.
An input object t is a concept sequence with fixed length, denoted $t=(c_0,c_1)$. The concept $c_0(shape)$ and $c_1(color)$ are indicated as a one-hot vector respectively. The length of each one-hot vector ranges from 3 to 6. These two vectors are concatenated to denote the input object t.
Each symbol sequence s contains two words, denoted $(s_0,s_1)$. Each word $s_i$ is chosen in the vocabulary set $V$. In this game, let the card $|V|$ range from 4 to 10, and the inequation $|V|^2\geq|M_1||M_1|$ is satisfied to ensure the symbol sequence $(s_0,s_1)$ can be used to denote all the input object t. The one-hot vector with the length $|V|$ is used to indicate the word $s_0$ and $s_1$ respectively. Then, the two one-hot vectors are concatenated to denote the symbol sequence s.
The predict result $\hat{t}$ is denoted as a one-hot vector with the length $|M_0||M_1|$. Each bit of the one-hot vector denotes one input object. If the predict result $\hat{t}[i*|M_1|+j]=1$, the one-hot vector of each predict concept $\hat{c}_0$ and $\hat{c}_1$ respectively satisfied $\hat_{c}_0[i]=1$ and $\hat{c}_1[j]=1$.
If $(c_0,c_1) is equal to $(\hat{c}_0,\hat{c}_1)$, the input object and the predict result indicate the same object.
#subsection2: Agent architecture
The agents apply their own policy to play the referential game. Denote the policy of the speaker agent S and the listener L as $\pi_S$ and $\pi_L$. $\pi_S$ indicates the conditional probability $P(s_0|t)$ and $P(s_1|t)$. $\pi_L$ indicates the conditional probability $P(\hat{t}|s_0,s_1)$. The listener agent output predict result $\hat{t}$ through random sampling on the conditional probability $P(\hat{t}|s_0,s_1)$. The neural networks are used to simulate the agent policy. The agent architecture is shown in Figure 1.
For the speaker, the input object t is firstly passed to a MLP to get a hidden layer vector h^S. Then, the hidden layer vector is split into two feature vectors h_0^S and h_1^S with length h_size. Through a MLP and a softmax layer, these feature vectors are transformed as the output o_0 and o_1 with the length |V| respectively. Lastly, the symbol sequences s_0 and s_1 are sampled from the output o_0 and o_1.
For the listener, the input symbol sequences s_0 and s_1 are passed into a MLP respectively to get the hidden layer vectors h_0 and h_1. The length of each vector is h_size. Concatenating these vectors, and passing the conjunctive vector into a MLP and a softmax layer, the output o^L with length $|M_0||M_1|$ denotes P(\hat{t}|s_0,s_1). Lastly, the predict result is sampled from the output o^L.
In the experiments, the symbol h_size is used to denote the model capacity of the agents.
#subsection3: Training Algorithm
In this paper, the Stochastic Policy Gradient methodology is used to train the speaker and the listener respectively. The symbol $\theta_S$ and $\theta_L$ denote the neural network parameters of the policy $\pi_S$ and $\pi_L$ respectively. When training the speaker, the parameter $\theta_L$ is fixed, and the training objective is to maximize the expected reward $ J(theta_S, theta_L) = E_{\pi_S,\pi_L}[R(t, t^)]$ through adjusting the parameter $\theta_S$. In a similar way, the listener is trained to maximize the expected reward$ J(theta_S, theta_L)$ by fixing the parameter $\theta_S$ and adjusting the parameter $\theta_L$. To minimize the influence of artificial induction on emergent language, we only use the predict result $\hat{t}$ of the listener agent as the evidence of whether giving the positive rewards. Then, the gradients of the expected reward $ J(theta_S, theta_L)$ can be calculated as follows:
\begin{align}
\nabla_{\theta^S} J &= \mathbb{E}_{\pi^S, \pi^L} \left[ R(\hat{t}, t) \cdot \nabla_{\theta^S} \log{\pi^S(s_0, s_1 | t)} \right] \\
\nabla_{\theta^L} J &= \mathbb{E}_{\pi^S, \pi^L} \left[ R(\hat{t}, t) \cdot \nabla_{\theta^L} \log{\pi^S(\hat{t} | s_0, s_1)} \right]
\end{align}
Unlike previous studies[][], the agents in this paper are totally independent. It means that all the neural networks parameters of each agent are not shared, and there are not any connection between the architecture of the neural networks. The training procedure is shown in Figure 2. The training process is the alternations of two procedure: the speaker training and the listener training. When one agent is training, the parameters of the other agent are fixed.
\begin{algorithm}[!h]
\caption{OurAlgorithm$(t,\hat{t})$}
\begin{algorithmic}[1]
\IF{Training the speaker agent S}
\FOR{Batch T randomly selected from $M_0\times M_1$}
\FOR{$t=(c_0,c_1)$ in T}
\STATE $P(s_0|t),P(s_1|t)=\pi_{old}^S(s=(s_0,s_1)|t)$
\STATE Sample $s_0$ with $P(s_0|t)$, $s_1$ with $P(s_1|t)$
\STATE $P(\hat{t}|s) = \pi^L(\hat{t}|s)$
\STATE Sample $\hat{t}$ with $P(\hat{t}|s)$
\STATE Get reward $R(\hat{t},t)$
\STATE $J(\theta^S,\theta^L)=E_{\pi_{old}^S,\pi^L}[R(\hat{t},t)\cdot\frac{\pi^S(s|t)}{\pi^S_{old}(s|t)}]$
\STATE Update $\theta^S$ by $\bigtriangledown_{\theta^S}J$
\ENDFOR
\STATE $\pi_{old}^S\leftarrow \pi^S$
\ENDFOR
\ENDIF
\IF{Training the listener agent L}
\FOR{Batch T randomly selected from $M_0\times M_1$}
\FOR{$t=(c_0,c_1)$ in T}
\STATE $P(s_0|t),P(s_1|t)=\pi^S(s=(s_0,s_1)|t)$
\STATE Sample $s_0$ with $P(s_0|t)$, $s_1$ with $P(s_1|t)$
\STATE $P(\hat{t}|s) = \pi^L_{old}(\hat{t}|s)$
\STATE Sample $\hat{t}$ with $P(\hat{t}|s)$
\STATE Get reward $R(\hat{t},t)$
\STATE $J(\theta^S,\theta^L)=E_{\pi^S,\pi_{old}^L}[R(\hat{t},t)\cdot\frac{\pi^L(s|t)}{\pi^L_{old}(s|t)}]$
\STATE Update $\theta^L$ by $\bigtriangledown_{\theta^L}J$
\ENDFOR
\STATE $\pi_{old}^L\leftarrow \pi^L$
\ENDFOR
\ENDIF
\end{algorithmic}
\end{algorithm}
Figure2. Training Algorithm of agents
#subsection4: Evaluation
Our objective is to study the relationship between the agent model capacity and the compositionality of the emergent language, within the range afforded by the need for successful communication. When the accuracy of the listener converges to 100\%, it is believed that the training process is finished. With one training process, the agent model is evaluated through two aspects: the model capacity and the compositionality of the emergent language.
\begin{table}[b]
\centering
\small
\caption{The Chi-square test between high-compositionality and agent capacity.}
\label{tab:exp10}
\begin{tabular}{cccc}
\toprule
\multicolumn{4}{c}{$H_0$: $\mathit{MIS} > 0.90$ is independent with $h_{\mathit{size}}$}\\
\midrule
Configuration & $\chi^2$ & $df$ & $p$-value \\
\midrule
$|M_0|=5,|M_1|=3,|V|=10$ & 87.20 & 10 & $1.72\times 10^{-13}$ \\
$|M_0|=4,|M_1|=4,|V|=10$ & 71.47 & 10 & $1.70\times 10^{-10}$ \\
\bottomrule
\multicolumn{4}{c}{\vspace{1em}}\\
\toprule
\multicolumn{4}{c}{$H_0$: $\mathit{MIS} > 0.99$ is independent with $h_{\mathit{size}}$}\\
\midrule
Configuration & $\chi^2$ & $df$ & $p$-value \\
\midrule
$|M_0|=5,|M_1|=3,|V|=10$ & 34.15 & 10 & $6.39\times 10^{-4}$ \\
$|M_0|=4,|M_1|=4,|V|=10$ & 38.26 & 10 & $1.39\times 10^{-4}$ \\
\bottomrule
\end{tabular}
\end{table}
\section{Appendix}
\label{sec:exp}
We add two sets of experimental results to further verify the relationship between
agent capacity and the compositionality of symbolic language that emerged in our natural referential game.
As a supplement to the \emph{Experiments} section, these two sets of data (coresponding to two
kinds of configuration) are used to prove that the relationship is independent of configuration.
Specifically, with the configuration of: a)$|M_0|=5,|M_1|=3,|V|=10$ and b)$|M_0|=4,|M_1|=4,|V|=10$,
we train the speaker-listener agents to emerge symbolic language when varying the agent capacities,
i.e., hidden layer size ($h_{size}$), from 6 to 100.
\begin{figure}[t]
\centering \includegraphics[width=0.99\columnwidth]{fig/Appendix_Figure1_MIS.pdf}
\caption{Compositionality of symbolic language under different parameters
($[\mu-\sigma,\mu+\sigma]$, where $\mu$ is the mean value and $\sigma$ is
the standard deviation).}
\label{fig:exp1}
\end{figure}
\begin{figure}[t]
\centering \includegraphics[width=0.99\columnwidth]{fig/Appendix_Figure2_Ratio.pdf}
\caption{The ratio of high compositional language. (a) $MIS>0.99$. (b)
$MIS>0.9$. }
\label{fig:exp2}
\end{figure}
Figure~\ref{fig:exp1} reports the supplementally experimental results. Consistent with
previous experiments, it can be observed that the mean value of MIS decreases as the value
of $h_{size}$ increases, no matter what configuration we take. MIS significantly decreases
from around 0.8 to less than 0.7 when $h_{size}$ increases from 6 to 100.
Just like we do in the \emph{Experiment} section, we further breakdown our results to show the importance
of agent capacity for emerging a symbolic language with high compositionality. Figure~\ref{fig:exp2} reports
the ratio of high compositional symbolic language in all emerged languages,
Figure~\ref{fig:exp2} (a) and (b) for $\mathit{MIS}>0.99$ and $\mathit{MIS}>0.9$, respectively.
Under these two supplementary configuration, we also find that the ratio of high compositional symbolic languages
decreases drastically with the increase of $h_{size}$, and that such ratio would be closed to zero when agent capacity
comes too large (i.e., $h_{size} > 80$).
For these two supplementary sets of data, we also perform $\chi^2$ test to check the statistical
significance between the high compositionality and agent
capacity. Table~\ref{tab:exp10} reports the $\chi^2$ test results for
$\mathit{MIS}>0.99$ and $\mathit{MIS}>0.9$, respectively. It can be observed that
for different vocabulary sizes, the p-value is always less than 0.05, which means
the high compositionality has a statistical significance related to agent
capacity.
In conclusion, combining these two supplementary sets of data and experimental results in
the \emph{Experiment} section, we prove that the negative correlation between agent capacity
and compostionality of emergent language is independent with configuration (i.e., vocabulary size,
count of colors and shapes)
%\section{Agent Capacity vs. Compositionality}
%\label{ssec:exp}
\begin{figure}[t]
\centering \includegraphics[width=0.99\columnwidth]{fig/Figure7_The_ratio_of_high_compositional_language.pdf}
\caption{The ratio of high compositional language. (a) $MIS>0.99$. (b)
$MIS>0.9$. }
\label{fig:exp2}
\end{figure}
\begin{figure*}[t]
\centering
\includegraphics[width=\textwidth]{fig/Figure9.pdf}
\caption{Accuracy of Listeners when varying $h_{size}$ from 1 to 8. Each curve
represents an average accuracy trend from 50 repeated training, with the
range of [$\mu - \sigma$, $\mu + \sigma$], where $\mu$ is the average
accuracy and $\sigma$ is the standard deviation.}
\label{fig:exp3}
\end{figure*}
%\begin{figure}[t]
% \centering
% \includegraphics[width=0.99\columnwidth]{fig/Figure10_p_value.pdf}
% \caption{The Chi-square test between high-compositionality and agent
% capacity. (a) $MIS>0.99$. (b)
% $MIS>0.9$.}
% \label{fig:exp10}
%\end{figure}
\begin{table}[b]
\centering
\small
\caption{The Chi-square test between high-compositionality and agent capacity.}
\label{tab:exp10}
\begin{tabular}{cccc}
\toprule
\multicolumn{4}{c}{$H_0$: $\mathit{MIS} > 0.90$ is independent with $h_{\mathit{size}}$}\\
\midrule
Vocabulary size & $\chi^2$ & $df$ & $p$-value \\
\midrule
4 & 22.20 & 10 & $1.41\times 10^{-2}$ \\
6 & 27.52 & 10 & $2.16\times 10^{-3}$ \\
10 & 64.46 & 10 & $5.14\times 10^{-10}$ \\
\bottomrule
\multicolumn{4}{c}{\vspace{1em}}\\
\toprule
\multicolumn{4}{c}{$H_0$: $\mathit{MIS} > 0.99$ is independent with $h_{\mathit{size}}$}\\
\midrule
Vocabulary size & $\chi^2$ & $df$ & $p$-value \\
\midrule
4 & 30.19 & 10 & $7.97\times 10^{-4}$ \\
6 & 25.96 & 10 & $3.80\times 10^{-3}$ \\
10 & 33.80 & 10 & $2.00\times 10^{-4}$ \\
\bottomrule
\end{tabular}
\end{table}
\begin{figure}[t]
\centering
\includegraphics[width=0.8\columnwidth]{fig/Figure8_Three_artificial_languages_with_different_MIS.pdf}
\caption{Three pre-defined language for teaching. (a) LA: high compositionality
($MIS=1$). (b) LB: mediate compositionality ($MIS=0.83$). (c) LC: low compositionality ($MIS=0.41$).}
\label{fig:bench}
\end{figure}
\section{Experiments}
\label{sec:exp}
We exploit the relationship between agent capacity and the compositionality of
symbolic language that emerged in our natural referential game.
For various configuration of
vocabulary size, we fix $|M_0|=|M_1|=3$ and train the speaker-listener agents to emerge symbolic
language when varying the agent capacities, i.e., hidden layer size
($h_{size}$), from 6 to 100.
Figure~\ref{fig:exp1} reports the experimental results. It can be observed that
the mean value of MIS decreases as the value of $h_{size}$ increases. Taking the
configuration of vocabulary size $|V|=10$ as an example, the mean value of MIS
is around 0.8 when $h_{size}\le 20$; MIS significantly decreases to 0.75 when
$h_{size}$ increases from 20 to 40; MIS further reduces to 0.7 when $h_{size}$
increases from 40 to 100.
For different vocabulary sizes, the MIS shares the
similar behavior.
It is because symbols in low-compositional languages carry semantic information
about more concepts. As a result, higher capacity is required to characterize the
complex semantic information for low-compositional language to emerge.
In summary, lower agent capacity improves the possibility of
emerging high compositional symbolic language.
\subsection{Ratio of high compositional language.}
We further breakdown our results to investigate the importance of agent capacity
to the compositionality of symbolic language. Figure~\ref{fig:exp2} reports the
ratio of high compositional symbolic language in all emerged languages,
Figure~\ref{fig:exp2} (a) and (b) for $\mathit{MIS}>0.99$ and $\mathit{MIS}>0.9$, respectively. It
can be observed that the ratio of high compositional symbolic languages
decreases drastically with the increase of $h_{size}$.
Taking vocabulary size $|V|=4$ as an example, symbolic languages with
compositionality $\mathit{MIS}>0.99$ take $>$10\% mainly over all the emerged symbolic
languages, when $h_{size}<20$; the ratio reduces to 0\%$\sim$5\% when $h_{size}$
increases to 40; the ratio reduces around 3\% when $h_{size}$ goes beyond 40.
$\mathit{MIS}>0.9$ reports similar results.
Notably, when $h_{size}$ is large enough (e.g., $>40$), high compositional
symbolic language is hard to emerge in a natural referential game, for
easy-to-emerge low compositional symbolic language is sufficient in scenarios of
referential game.
On the other side, agents are enforced to use compositionality to express
more meanings, for the constraint from low capacity.
Additionally, we also perform $\chi^2$ test to check the statistical
significance between the high compositionality and agent
capacity. Table~\ref{tab:exp10} reports the $\chi^2$ test results for
$\mathit{MIS}>0.99$ and $\mathit{MIS}>0.9$, respectively. It can be observed that
for different vocabulary sizes, the p-value is always less than 0.05, which means
the high compositionality has a statistical significance related to agent
capacity.
%\subsection{Breakdown}
%\label{ssec:language}
%\begin{figure}[t]
% \centering
% \includegraphics[width=0.9\columnwidth]{fig/occupy}
% \caption{}
% \label{fig:exp4}
%\end{figure}
\subsection{Breakdown into language teaching.}
We further breakdown the learning process to investigate the language teaching
scenario, where the Speaker teaches the Listener its fixed symbolic language.
We define three symbolic languages in different compositionality for Speaker to
teach, i.e., high (LA, $\mathit{MIS}=1$), mediate (LB, $\mathit{MIS}=0.83$), low (LC, $\mathit{MIS}=0.41$), see
Figure~\ref{fig:bench}.
Figure~\ref{fig:exp3} reports the accuracy of Listener, i.e., the ratio of the correctly
predicted symbols spoke by Speaker ($t=\hat(t)$), which varies with the
training iterations under different agent capacities.
Figure~\ref{fig:exp3} (a) shows that when $h_{size}$ equals to 1, the agent capacity is
too low to handle languages. Figure~\ref{fig:exp3} (b) shows that when $h_{size}$
equals to 2, agent can only learn $LA$ whose compositionality (i.e. \emph{MIS})
is highest in all three languages. Combing these two observations, we can infer that
language with lower compositionality requires higher agent capacity to ensure
communicating successfully (i.e., $h_{size}$).
Additionally, Figure~\ref{fig:exp3} (c)$\sim$(h) shows that the
higher agent capacity causes a faster training process for all three languages, but the
improvement for different languages is quite different. It is obvious that language with lower compositionality also requires higher agent
capacity to train faster.
%In conclude, teaching an artificial language with
%lower compositionality to agent require higher agent capacity both for learning
%successfully and training faster.
\section{Introduction}
\label{sec:introduction}
The emergence of language has always been an important issue,
which attracts attention from a broad range of communities,
including philology, biology, and computer
science. Especially in computer science, efforts in recent years trying to explore
the emergent language in virtual multi-agent environments, where
agents are trained to communicate with neural-network-based methods such as deep
reinforcement learning~\cite{kottur-etal-2017-natural,bogin2018emergence,lazaridou2018emergence,choi2018compositional,jaques2019social,mul2019mastering,kharitonov2019egg,labash2020perspective,chaabouni2020compositionality}.
%Such works can be roughly classified into two categories,
%referential game~\cite{} and multi-agent reinforcement learning (MARL)~\cite{}, based on
%the environment setting.
The quality of emergent language is typically measured by its
\emph{compositionality}.
Compositionality is a principle that determines
whether the meaning of a complex expression (e.g, phrase), which is assembled out of a
given set of simple components (e.g., symbols), can be determined by its
constituent components and the rule combining them~\cite{andreas2018measuring,chaabouni2020compositionality}.
\note{For example, the expression ``AAAI is a conference'' consists of two
meaningful words ``AAAI'' and ``conference'', and a rule for definition (``is'').
Compositionality is considered to be a source of productivity,
systematicity, and learnability of language, and the reason why a language with finite
vocabulary can express almost infinite concepts.}
%More recently, measuring the compositionality \note{xxxxx}.}
%It
%is a concept in the philosophy of language [1], which describes and quantifies
%how complex expressions can be assembled out of simpler parts [2]. For example,
%Figure1(a) shows a perfect compositional language (with maximum
%compostionality). In this example, each shape is represented by a unique value
%of symbol $s_0$ and each color is represented by symbol $s_1$. Figure1(b) shows a
%language with low compostionality. Colors and shapes are ambiguous if only we
%extract information from a single symbol.
%
%
%\begin{figure}[t]
% \centering
% \includegraphics[width=\columnwidth]{fig/Figure1_motivation.pdf}
% \caption{The distribution of compositionality for 100 emerged symbolic
% languages without
% any induction. It can be observed that high compositional symbolic language
% seldom emerged (e.g., $<5\%$ for compositionality $>0.99$). Moreover, varying
% the vocabulary size does not affect the compositionality notably.}
% \label{fig:induction}
% \end{figure}
%\begin{table*}[t]
% \centering
% \small
% \caption{Handcrafted inductions in related works.}
% \label{tab:rel}
% \begin{tabular}{llllll}
% \toprule
% Works & Handcrafted induction & Compositionality\\
% \midrule
% \cite{kirby2015compression}&Expressivity and compressibility&Not quantitative, Speaker\\
% \cite{kottur-etal-2017-natural}&Listener's memory&Not quantitative, Speaker\\
% \cite{choi2018compositional}&Maximum message length&Not quantitative, Speaker+Listener\\
% \cite{lazaridou2018emergence}&Structure of input data&Quantitative, Speaker\\
% \cite{evtimova2018emergent}&Multi-modal scenarios&Quantitative, Speaker\\
% \cite{li2019ease}&Population size, resetting all listeners&Quantitative, Speaker\\
% \cite{chaabouni-etal-2019-word}&Word-order constraints&Not quantitative, Speaker\\
% \cite{chaabouni2020compositionality}&Easier to decode&Quantitative, Speaker\\
% \textbf{Ours} & \textbf{None} & \textbf{Quantitative, Speaker+Listener} \\
% \bottomrule
% \end{tabular}
% \end{table*}
Prior studies focus on achieving high compositional symbolic language
through \emph{deliberately handcrafted} inductions, e.g., additional rewards~\cite{mordatch2017emergence},
constructed loss functions~\cite{kharitonov2019egg}, structural input data~\cite{lazaridou2018emergence,evtimova2018emergent},
memoryless~\cite{kottur-etal-2017-natural,li2019ease}, and ease-of-teaching~\cite{li2019ease}.
\note{Such optimization methodologies are driven by the challenges to generate high compositional symbolic without induction in an existing multi-agent environment.}
Figure~\ref{fig:induction} reports the compositionality when training two agents
in the widely-used listener-speaker referential game~\cite{david1969convention} for emerging 100
languages, and it can be observed that \note{the compositionality
of emergent language is seldom high (e.g., $<5\%$ for compositionality $>0.99$)
without any induction. Moreover, varying
the vocabulary size does not affect the compositionality notably.}
Though such unnatural inductions are useful, they prevent us from better understanding the mystery of
the emergence of language and even intelligence among our pre-human ancestors.
Yet, few works investigate the emergence of high compositional symbolic language
\emph{naturally}, i.e., without handcrafted inductions.
In other words, it is never clear whether \emph{natural}
environment and agents are sufficient for achieving high compositionality.
This paper is the first one to achieve high compositional
language without any deliberately handcrafted induction. The key observation
is that the internal \emph{agent capacity} plays a crucial role in the
compositionality of emergent language.
%by thoroughly
%analyzing the compositionality after removing the inductions in
%the most widely-used listener-speaker referential game framework.
Concretely, the relationship between the agent capacity and the compositionality
of emergent language is characterized, with a novel mutual information-based
metric for the compositionality.
%both theoretically and experimentally.
%theoretically
Regarding the theoretical analysis, we propose
%use the \note{Markov Series Channel (MSC)~\cite{} to model the language
% transmission process and}
a novel mutual information-based metric to measure the compositionality quantitatively.
%experimentally
Regarding the experimental validation, we exploit the relationship between agent
capacity and the compositionality of symbolic language that emerged
\emph{naturally} in our experiments.
%two different dedicated experiments, i.e., \note{XXX and XXX, are utilized for XXX}.
%Regarding the experimental validation, it is conducted on a listener-speaker
%referential game framework with eliminated unnatural inductions.
Both the theoretical analysis and experimental results lead to a counter-intuitive
conclusion that \emph{lower agent capacity facilitates the emergence of language
with higher compositionality}. \note{Therefore, by only reducing the agent capacity
in such a natural environment, we
can generate a more compositional language with a higher probability.}
%Prior studies focus on investigating how to affect the
%compositionality of the emergent language. Researchers
%have found that various environmental pressures would affect compositionality,
%e.g., small vocabulary sizes[3], memoryless[4],
%carefully constructed rewards[5] and ease-of-teaching[6]. However, these works
%only consider \emph{nurture} [7] (i.e., environmental factors), rather than
%\emph{nature} (i.e., hereditary factors from agents), when inducing or exploring
%the emergent language without exception. Moreover, some environmental pressures,
%like regrading the entropy as an item of additional rewards, may be too ideal to
%exist in the real world.
%In contrast to prior work, we investigate the compositionality of emergent
%language from a new perspective, i.e., the agent capacity. Different from
%previous work that only considers external environmental factors, we study the
%impact of agent internal capacity on the compositionality of emergent
%language. Specifically, we first analyze the correlation between agent capacity
%and compositionality theoretically, and propose a novel metric to evaluate
%compostionality quantitatively. Then, on the basis of the theoretical analysis
%and the metric proposed, we verify the relationship between agent capacity and
%compostionality experimentally.
%
%
%Theoretically, on the basis of mutual information theory[8], we analyse the
%correlation between compostionality of the emergent language and complexity of
%the semantic information carried by a symbol. Such semantic information can be
%characterized in neural network-based agents and requires the certain capacity
%(i.e., the count of neural nodes in the hidden layer). Specifically, we use the
%MSC (Markov Series Channel)[9] to model the language transmission process and
%use the probability distribution of symbols and concepts to model policies of
%agents. After modelling, we use the mutual information matrix $MRI^B$ to
%quantitatively represent the semantic information, and each column of $MRI^B$
%correspond to information carried by one symbol. We find that each column of the
%matrix should be an one-hot vector for a perfectly compositional language, cause
%a symbol only transmit information of a certain concept exclusively. Therefore,
%the average similarity between the columns of $MRI^B$ and a one-hot vector is
%higher, indicating that the emergent language is more compostional (i.e., the
%compostionality is higher). We propose the metric \emph{MIS} to measure
%compositionality by calculating such average similarity
%quantitatively. Different from other metrics, such as \emph{topographic
% similarity}[10] and \emph{posdis}[11], \emph{MIS} is a bilateral metric
%because it takes both listener and speaker's understanding of semantics into
%account. Moreover, \emph{MIS} comes lower indicates that the emergent language
%tends to delivery semantic information about more concepts in each symbol, so
%that the complexity of semantic information carried by one symbol tend to be
%higher. As a result, higher agent capacity is required to characterize the more
%complex semantic information when \emph{MIS} (i.e., compositionality) is lower.
%
%Experimentally, we verify the relationship between agent capacity and
%compostionality. We build a listener-speaker referential game as experimental
%framework, and train agents of Stochastic Policy Gradient Algorithm[12] with the
%correctness of forecast output from the listener as the criterion (i.e.,
%reward). The criterion does not imply any environmental pressures on the
%agents. Therefore, we can study the impact of capacity on the compositionality
%without any environmental pressures’ affection. Moreover, to study the impact of
%capacity on the compositionality under a more ‘natural’ environment, the speaker
%and listener are disconnected models without sharing parameters. Our first
%experiment is to verify that agent need higher capacity to master an artificial
%language with lower compositionality under a scenario of language
%teaching. Specifically, we fabricate the speaker to output preassigned languages
%with different compostionality respectively, and train the listener to interpret
%the preassigned language. For all artificial language, we compare the accuracy
%curve during training process of the listener with different capacity, and show
%how capacity affect learning languages with different compostionality. Our
%second experiment is to verify that lower agent capacity would facilitate higher
%compostionality of the emergent language under a scenario of language
%inducing. Specifically, we training a speaker and a listener to create a
%communication protocol (i.e., emergent language), so that the listener can
%select the same object which is received by the speaker. By adjusting capacity
%and comparing the compositionality of emergent language, we show that the
%emergent language attend to have higher compositionality when agent capacity is
%restricted more stringently. As a result, these two experiments verify the
%negative correlation between agent capacity and compostionality both in language
%teaching and language inducing.
%
%This paper makes the following contributions:
%
%We propose a ‘bilateral’ metric \emph{MIS}, which takes both listener and speaker's understanding of semantics into account. Compare to previous ‘unilateral’ metrics, \emph{MIS} can handle situations where the semantics of the listener and the speaker are not exactly the same (, we discuss the problem in next section).
%
%We analyse the relationship between compostionality and agent capacity theoretically.
%
%We verify the negative correlation between agent capacity and compostionality both in language teaching and language inducing.
%%\endsection
In this paper, we made the following contributions:
\begin{itemize}[topsep=0pt,itemsep=0cm]
\item To our best knowledge, we are the first work to successfully achieve
high compositional symbolic
language naturally, without any deliberately handcrafted induction.
\item We analyze the compositionality of emerged symbolic language
after removing deliberately handcrafted inductions.
\item We propose a novel mutual information-based metric to measure the
compositionality quantitatively, which is more reasonable.
\item We experimentally exploited the relationship between agent
capacity. Both theoretical analysis and
experimental results lead to a counter-intuitive conclusion that lower agent
capacity facilitates the emergence of symbolic language with higher
compositionality.
\end{itemize}
The rest of this paper is arranged as follows.
Section~\ref{sec:relatedwork} summarizes the related works.
Section~\ref{sec:thory}
introduces the experimental setup used in this study. Section~\ref{sec:mis}
describes our proposed novel mutual-information-based metric for measuring
the compositionality of symbolic language. Section~\ref{sec:exp} gives the
experimental results of the exploration for the relationship between agent
capacity and compositionality. Section~\ref{sec:con} concludes this paper.
\section{Conclusion}
\label{sec:con}
In this paper, we are the first work to achieve high compositional
symbolic language without any deliberately handcrafted induction.
We made the key observation that the internal \emph{agent capacity} plays a crucial role in the compositionality of symbolic language.
Together with the theoretical analysis, experimental results led to a
counter-intuitive conclusion that \emph{lower agent capacity facilitates the emergence of symbolic language with higher compositionality}.
Therefore, by only reducing the agent capacity in such a natural environment, we
generated a higher compositional symbolic language with a higher probability.
\section{Related Works}
\label{sec:relatedwork}
%external environmental factors
Previous works focus on the \emph{deliberately handcrafted} inductions that affect the
compositionality of emergent language.
Some significant works on studying the environmental inductions on the compositionality of emergent language are summarized in Table~\ref{tab:rel}.
For example, ~\citet{kirby2015compression} explored how the pressures for expressivity and compressibility lead the structured language.
~\citet{kottur-etal-2017-natural} constrained the vocabulary size and whether the listener has memory to coax the compositionality of the emergent language.
~\citet{lazaridou2018emergence} showed that the degree of structure found in the input data affects the emergence of the symbolic language.
~\citet{li2019ease} studied how the pressure, ease of teaching, impact on the iterative language of the population regime.
~\citet{evtimova2018emergent} designed novel multi-modal scenarios, which the speaker and the listener should access to different modalities of the input object, to explore the language emergence.
These inductions are deliberately designed, which are too ideal to be true in
the real world.
In this paper, these handcrafted inductions above are all removed, and the high compositional language is learned only by the agent capacity.
%measure
To measure the compositionality of emergent language, metrics are
proposed~\cite{kottur-etal-2017-natural,choi2018compositional,lazaridou2018emergence,evtimova2018emergent,chaabouni2020compositionality}.
%Widely accepted metrics can be classified into two categories, measuring
%positive signaling~\cite{} and measuring positive listening~\cite{}. The former
%metrics measure the relationship between spoken symbols and received concepts
%\rmk{not clear}, from the perspective of \emph{speakers}.
%For example,.
%The latter metrics measure the relationship between received symbols and
%predicted concepts \rmk{not clear}, from the perspective of \emph{listeners}.
%For example,.
%However, these metrics are not appropriate, for they only measure
%compositionality of symbolic language in \emph{unilateral} role\rmk{not sure},
%either speakers or listeners. They can not measure the degree of \emph{bilateral}
%understanding between speakers and listeners, i.e., the concept-symbol mapping
%consistency between speakers and listeners.
At the initial stage, many studies only analyzed the language compositionality qualitatively (i.e. not quantitatively).
For example, ~\citet{choi2018compositional} printed the agent messages with the letter `abcd' at some training round, and directly analyzed the compositionality on these messages.
~\citet{kottur-etal-2017-natural} introduced the dialog tree to show the evolution of language compositionality during the training process.
Latter, some quantitative metrics are explored.
The topographic similarity\cite{lazaridou2018emergence} is introduced to measure the distances between all the possible pairs of meanings and the corresponding pairs of signals.
\citet{chaabouni2020compositionality} proposed the positional disentanglement, which measures whether symbols in a specific position relate to the specific attribute of the input object.
From Table~\ref{tab:rel}, most metrics are proposed on the sight of speaker. In our view, human beings developed the language based on a bilateral communication between the speaker and the listener. One research~\cite{choi2018compositional} considered the metric bilaterally, but it is not a quantitative metric. In this paper, we propose a novel quantitative metric from both the speaker and the listener's perspective.
In conclusion, the previous works induced the compositional language based on some deliberately handcrafted inductions,
and the quantitative metric from the sight of both the speaker and the listener is still lacking.
In this paper, we remove all the handcrafted inductions as shown in Table~\ref{tab:rel} and get a high compositional language through the internal agent capacity.
Moreover, we propose a quantitative metric which take both the speaker and the listener into account.
\section{ Framework of Language Emerging}
\label{sec:thory}
\begin{figure}[t]
\centering \includegraphics[width=\columnwidth]{fig/Figure2_The_referential_game_environment.pdf}
\caption{The referential game in this paper.}
\label{fig:game}
\end{figure}
\begin{figure*}[t]
\centering
\includegraphics[width=1.8\columnwidth]{fig/Figure3_The_architecture_of_agents.pdf}
\caption{The architecture of agents. \emph{Left:} speaker. \emph{Right:} listener.}
\label{fig:agents}
\end{figure*}
\begin{algorithm}[t]
\caption{Learning Algorithm$(t,\hat{t})$}
\label{al:learning}
\small
\begin{algorithmic}[1]
\IF{Training the speaker agent S}
\FOR{Batch T randomly selected from $M_0\times M_1$}
\FOR{$t=(c_0,c_1)$ in T}
\STATE $P(s_0|t),P(s_1|t)=\pi_{old}^S(s=(s_0,s_1)|t)$
\STATE Sample $s_0$ with $P(s_0|t)$, $s_1$ with $P(s_1|t)$
\STATE $P(\hat{t}|s) = \pi^L(\hat{t}|s)$
\STATE Sample $\hat{t}$ with $P(\hat{t}|s)$
\STATE Get reward $r(\hat{t},t)$
\STATE $J(\theta^S,\theta^L)=E_{\pi_{old}^S,\pi^L}[r(\hat{t},t)\cdot\frac{\pi^S(s|t)}{\pi^S_{old}(s|t)}]$
\STATE Update $\theta^S$ by $\bigtriangledown_{\theta^S}J$
\ENDFOR
\STATE $\pi_{old}^S\leftarrow \pi^S$
\ENDFOR
\ENDIF
\IF{Training the listener agent L}
\FOR{Batch T randomly selected from $M_0\times M_1$}
\FOR{$t=(c_0,c_1)$ in T}
\STATE $P(s_0|t),P(s_1|t)=\pi^S(s=(s_0,s_1)|t)$
\STATE Sample $s_0$ with $P(s_0|t)$, $s_1$ with $P(s_1|t)$
\STATE $P(\hat{t}|s) = \pi^L_{old}(\hat{t}|s)$
\STATE Sample $\hat{t}$ with $P(\hat{t}|s)$
\STATE Get reward $r(\hat{t},t)$
\STATE $J(\theta^S,\theta^L)=E_{\pi^S,\pi_{old}^L}[r(\hat{t},t)\cdot\frac{\pi^L(s|t)}{\pi^L_{old}(s|t)}]$
\STATE Update $\theta^L$ by $\bigtriangledown_{\theta^L}J$
\ENDFOR
\STATE $\pi_{old}^L\leftarrow \pi^L$
\ENDFOR
\ENDIF
\end{algorithmic}
\end{algorithm}
Before going to the detail of the training algorithms, we first introduce the environment, gaming rules, and agent architecture for enabling the emergence of symbolic language.
\subsection{Environment setup}
\label{ssec:env}
Figure~\ref{fig:game} shows the entire environment used in this study,
i.e., a commonly used referential game. Roughly, the referential game requires the speaker and listener to work cooperatively to accomplish a certain task.
In this paper, the task is to have the listener agent reconstruct the object
what the speaker claims it has seen, only through their emerged communication protocol. The consistent success in this game indicates that language has emerged between speaker and listener.
\textbf{Game rules} In our referential game, agents follow the following rules to finish the game in a cooperative manner. In each round, once received an input object $t$, Speaker $S$ speaks symbols $s$ to Listener $L$ ; Listener $L$ reconstruct the predicted result $\hat{t}$ based on the listened symbols $s$; if $t=\hat{t}$, agents win this game and receive positive rewards ($r(t,\hat{t})=1$); otherwise agents fail this game and receive negative rewards ($r(t,\hat{t})=-1$).
Precisely, during the game, Speaker $S$ receives an input object$t$, which is a concept-pair with two concepts
from the concept set $M_0$ and $M_1$, i.e., two one-hot vectors representing shape and color, respectively. Based on the $t$, Speaker $S$ speaks a symbol sequence $s$, which similarly contains two words from $V$.
The Listener $L$ receives $s$ and output predicted result $\hat{t}$, a single word (one-hot vector) corresponded with a concept-pair from the Cartesian product of $M_0\times M_1$, which represents all the meanings of two combined words
from $M_0$ and $M_1$. Please note that since $t$ and $\hat{t}$ have different length, we say $t=\hat{t}$ if $t$ expresses the same concept-pair as $\hat{t}$, e.g., ``red circle''.
\subsection{Agent architecture}
\label{ssec:agent}
Figure~\ref{fig:agents} shows the architecture of the constructed agents,
including the Speaker $S$ and Listener $L$.
\textbf{Speaker.} Regarding the Speaker $S$, it is constructed as a three-layer neural
network. The Speaker $S$ processes the input object $t$ with a fully-connected
layer to obtain the hidden layer $h^s$, which is further processed with fully-connected layers to obtain the output
layer. The output layer results indicate the probability distribution of symbols
with given input object $t$, i.e., $o_i^{s}=P(s_i|t)$ $i\in{0,1}$. \note{The final
readout symbols are sampled based on such probability distribution.}
\textbf{Listener.} Regarding the Listener $L$, it is constructed as a
three-layer neural network, too. Different from Speaker $S$ that tries to separate input object into words, $L$ tries to concatenates words to understand the combined meaning. The output layer results are also the probability distribution of
symbols $\hat{t}$ with given input sequence $s$, i.e, $o^{L}=P(\hat{t}|s_0,s_1)$.
\subsection{Learning algorithm}
\label{ssec:training}
To remove all the handcrafted induction as well as for a more realistic
scenario, agents for this referential game are independent of each other,
with no shared model parameters or architectural connections. As shown in
Algorithm~\ref{al:learning}, we train the separate Speaker $S$ and Listener $L$ with
Stochastic Policy Gradient methodology in a tick-tock manner, i.e, training one
agent while keeping the other one. Roughly, when training the Speaker, the
target is set to maximize the expected reward
$J(\theta_S, \theta_L)=E_{\pi_S,\pi_L}[r(t, \hat{t})]$ by adjusting the parameter
$\theta_S$, where $\theta_S$ is the neural network parameters of Speaker $S$
with learned output probability distribution $\pi_S$, and $\theta_L$ is the
neural network parameters of Listener with learned probability distribution $\pi_L$.
Similarly, when training the Listener, the target is set to maximize the
expected reward$ J(\theta_S, \theta_L)$ by fixing the parameter $\theta_S$ and
adjusting the parameter $\theta_L$.
Additionally, to avoid the handcrafted induction on emergent language, we only
use the predicted result $\hat{t}$ of the listener agent as the
evidence of whether giving positive rewards. Then, the gradients of the
expected reward $ J(\theta_S, \theta_L)$ can be calculated as follows:
\begin{align}
\nabla_{\theta^S} J &= \mathbb{E}_{\pi^S, \pi^L} \left[ r(\hat{t}, t) \cdot
\frac{\nabla_{\theta^S}\pi^S(s_0, s_1 | t)}{\pi^S_{old}(s_0, s_1 | t)} \right] \\
\nabla_{\theta^L} J &= \mathbb{E}_{\pi^S, \pi^L} \left[ r(\hat{t}, t) \cdot
\frac{\nabla_{\theta^L} \pi^L(\hat{t} | s_0, s_1)}{\pi^L_{old}(\hat{t} | s_0, s_1)} \right]
\end{align}
\section{Mutual Information Similarity (MIS)}\label{sec:mis}
In this section, we propose the \emph{Mutual Information Similarity (MIS)} as a metric of compositionality and give a thorough theoretical analysis.
MIS is the similarity between an identity matrix and the mutual information matrix of concepts and symbols.
\begin{figure}[t]
\centering
\includegraphics[width=\columnwidth]{fig/Figure4_The_information_channel.pdf}
\caption{The information channel modeling of the agents in the referential game.}
\label{fig:modeling}
\end{figure}
\begin{figure}[t]
\centering
\includegraphics[width=0.8\columnwidth]{fig/Figure5_An_emergent_language.pdf}
\caption{An emergent language that the unilateral metrics cannot measure its non-compositionality. Notice that given $s_1 = \mathrm{a}$, the listener can neither determine the shape nor the color without the knowledge about $s_0$.}
\label{fig:unilateral}
\end{figure}
Before giving the definition of MIS, we first model the agents in the referential games. As shown in Figure~\ref{fig:modeling}, the listener and speaker in the referential game are connected in tandem. The speaker agent can be regard as a channel, whose input is a concept $c = (c_0, c_1)$ and output is a symbol $s = (s_0, s_1)$. The listener agent can be regard as another channel, whose input is a symbol $s = (s_0, s_1)$ and output is a predict result $\hat{t} = (\hat{c}_0, \hat{c}_1)$. Since the output of the listener only depends on the symbol $s$, we can model the policy of the speaker agent and the listener agent by the probability distribution $P(s = (s_0, s_1) | t = (c_0, c_1))$ and $P(\hat{t} = (\hat{c}_0, \hat{c}_1) | s_0, s_1)$, respectively.
Now we can analyse the information of the concepts preserved in the transmission process given the symbol transmitted, i.e. the conditional mutual information $I\left(t,\hat{t}|s\right)$. Whenever a stable language emerged, the speaker and the listener consistently use a specific symbol $s$ to refer to a specific object $t$. Therefore we can safely say $I\left(t,\hat{t}|s\right) = I\left(t,\hat{t}|s_{t,\hat{t}}\right)$ where $s_{t,\hat{t}}=\max_s\left\{P\left(\hat{t}|s\right)P\left(s|t\right)\right\}$. This conditional mutual information can be obtained by Equation~\ref{eq:cmi}.
\begin{equation}\label{eq:cmi}
I\left(t,\hat{t}|s_{t,\hat{t}}\right) = \sum_t\sum_{\hat{t}}P\left(t,\hat{t}|s_{t,\hat{t}}\right)\log\frac{P\left(t,\hat{t}|s_{t,\hat{t}}\right)}{P\left(t\right) P\left(\hat{t}|s_{t,\hat{t}}\right)}
\end{equation}
We define the ratio of preserved information $R(t, s)$ as Equation~\ref{eq:ri}, where $H(t)$ denotes the information entropy of $t$. $R(t,s)$ measures the degree of alignment between symbols and objects.
\begin{equation}\label{eq:ri}
R\left(t,s\right)=\frac{I\left(t,\hat{t}|s=s_{t,\hat{t}}\right)}{H\left(t\right)}
\end{equation}
Following the Equation~\ref{eq:ri} we can obtain the normalized mutual information matrix $M$ by collecting $R(c_i, s_j)$ for all $i, j$, as Equation~\ref{eq:mri}.
\begin{equation}\label{eq:mri}
M =
\begin{pmatrix}
R\left(c_0,s_0\right) & R\left(c_0,s_0\right)\\
R\left(c_0,s_0\right) & R\left(c_0,s_0\right)
\end{pmatrix}
\end{equation}
Each column of $M$ corresponds to the semantic information carried by one symbol. In a perfectly compositional language, each symbol represents one specific concept exclusively. Therefore, the similarity between the columns of $M$ and a one-hot vector is aligned with the compositionality of the emergent language.
\begin{figure}[t]
\centering \includegraphics[width=0.99\columnwidth]{fig/Figure6_Compostionality_of_symbolic_language.pdf}
\caption{Compositionality of symbolic language under different parameters
($[\mu-\sigma,\mu+\sigma]$, where $\mu$ is the mean value and $\sigma$ is
the standard deviation).}
\label{fig:exp1}
\end{figure}
Finally, we define \emph{raw mutual information similarity} ($\mathit{MIS}_0$)
as the average cosine similarity of $M$ columns and one-hot vectors, as
Equation~\ref{eq:mis2}. Furthermore, $\mathit{MIS}$ is the normalized mutual
information similarity into the $[0,1]$ value range, which can be computed with
following formula:
\begin{equation}\label{eq:mis2}\begin{aligned}
\mathit{MIS}_0 &= \frac{1}{2}\sum_{j=0}^1\frac{\max_{i=0,1}R\left(c_i,s_j\right)}{\epsilon + \sqrt{\sum_{i=0}^{1}R^2\left(c_i,s_j\right)}}, \epsilon > 0\\
\mathit{MIS} &= 2\mathit{MIS}_0 - 1
\end{aligned}\end{equation}
Generalized to $m$ symbols and $n$ objects, MIS can be computed with
following formula:
\begin{equation}\label{eq:mis2}\begin{aligned}
\mathit{MIS}_0 &= \frac{1}{m}\sum_{j=0}^{m-1}\frac{\max_{i\in[0,n-1]}R\left(c_i,s_j\right)}{\epsilon + \sqrt{\sum_{i=0}^{n-1}R^2\left(c_i,s_j\right)}}, \epsilon > 0\\
\mathit{MIS} &= \frac{n\cdot \mathit{MIS}_0 - 1}{n-1}
\end{aligned}\end{equation}
MIS is a bilateral metric. Unilateral metrics, e.g. \emph{topographic similarity (topo)}\cite{} and \emph{posdis}\cite{}, only take the policy of the speaker into consideration. We provide an example to illustrate the inadequacy of unilateral metrics, shown in Figure~\ref{fig:unilateral}. In this example, the speaker only uses $s_1$ to represent the shape. From the perspective of the speaker, the language is perfectly compositional (i.e. both topo and posdis are 1). However, the listener cannot distinguish the shape depend only on $s_1$, showing the non-compositionality in this language. The bilateral metric MIS addresses such defects by taking the policy of the listener into account, thus $\mathit{MIS} < 1$.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment