#! /bin/sh # This is a shell archive, meaning: # 1. Remove everything above the #! /bin/sh line. # 2. Save the resulting text in a file. # 3. Execute the file with /bin/sh (not csh) to create the files: # KWRmacs.tex # KWRdefs.sty # KeyComms.tex # KeyComms.aux # KeyComms.bbl # KeyComms.blg # This archive created: Thu Jul 14 11:36:20 1994 export PATH; PATH=/bin:$PATH if test -f 'KWRmacs.tex' then echo shar: will not over-write existing file "'KWRmacs.tex'" else cat << \SHAR_EOF > 'KWRmacs.tex' \newcommand{\newfontobj}[2]{ \newcommand{#1}[1]{ \expandafter\def\csname##1\endcsname{{#2 ##1}}}} \newfontobj{\class}{\rm} \class{P} \class{UP} \class{NP} \class{PH} \class{E} \class{NE} \class{EE} \class{NEE} \class{EXP} \class{NEXP} \class{PSPACE} \class{PSpace} \class{NPSPACE} \class{EXPTIME} \class{RE} \class{REC} \class{Ptime} \class{Pspace} \class{BPP} \class{BPPtime} \class{RP} \class{ZPP} \class{AM} \class{MA} \class{PP} \class{coNP} \class{coAM} \class{coRP} \class{MP} \class{MidBitP} % \class{CP} \class{FewP} \class{Few} \class{PF} \class{FP} \class{OptP} \class{SPP} \class{WPP} \class{LWPP} \class{CBP} \class{ECBP} \class{GECBP} \class{CBF} \class{GapCBF} \class{UBP} \class{GapUBP} \class{REG} \class{CFL} \class{TLIN} \class{NTLIN} \class{2DPDA} \class{AC} \class{NC} \class{BC} \class{SC} \class{DLOG} \class{NLOG} \class{DLOGTIME} \class{NLOGTIME} \class{ALOGTIME} \class{ATISP} \class{DLIN} \class{NLIN} \class{NLT} \class{DNLT} \class{NNLT} \class{NQL} \class{QL} \class{DQL} \class{BQL} \class{RQL} \class{PQL} \class{UQL} \class{QLH} \class{QLSPACE} \class{CFA} \class{CFR} \class{Con} \class{LOGSPACE} \class{L} \class{FL} \class{ACC} \class{NL} \class{SSM} \class{DGSM} \class{BM} \class{VM} \class{CREW} \class{EREW} \class{CRCW} \class{NPMV} \class{NPSV} \class{BPSV} \class{BPMV} \class{ZPMV} \class{ZPSV} \class{NNSV} \class{NNMV} \class{RPSV} \class{RPMV} % \class{PRSV} % \class{PRMV} % \class{PPMV} % \class{PPSV} \class{GapP} \class{Gap} \class{PrMV} \class{PrSV} \class{FO} \class{SF} \class{FOL} \class{LOGH} \class{LINH} \class{RUD} \class{LBR} \class{FIN} \class{COFIN} \class{RBM} \class{AH} \class{EQ} \class{RTLS} \newcommand{\PIMMUNE}{\mbox{\rm P-IMMUNE}} \newcommand{\PrtwoV}{\mbox{\rm PR2V}} \newcommand{\NPtwoV}{\mbox{\rm NP2V}} \newcommand{\BPtwoV}{\mbox{\rm BP2V}} % The following is a terrible hack, but it works---usually. \newcommand{\parityP}{{\mathchoice{\raisebox{1pt}{$\displaystyle\oplus$}{\rm P}}{\raisebox{1pt}{$\textstyle\oplus$}{\rm P}}{\raisebox{1pt}{$\scriptstyle\oplus$}{\rm P}}{\raisebox{1pt}{$\scriptscriptstyle\oplus$}{\rm P}}}} \newcommand{\parityQL}{{\mathchoice{\raisebox{1pt}{$\displaystyle\oplus$}{\rm QL}}{\raisebox{1pt}{$\textstyle\oplus$}{\rm QL}}{\raisebox{1pt}{$\scriptstyle\oplus$}{\rm QL}}{\raisebox{1pt}{$\scriptscriptstyle\oplus$}{\rm QL}}}} % Ditto for Number-P --KWR \newcommand{\numberP}{{\mathchoice{\raisebox{1pt} {$\displaystyle\#$}{\rm P}}{\raisebox{1pt} {$\textstyle\#$}{\rm P}}{\raisebox{1pt} {$\scriptstyle\#$}{\rm P}}{\raisebox{1pt} {$\scriptscriptstyle\#$}{\rm P}}}} % And C-parity-P: \newcommand{\CparityP}{{\rm C}\parityP} % And C=P: \newcommand{\cep}{\mbox{{\rm C}$_=${\rm P}}} % %\newcommand{\cep} {C\!\!\!\!=\!\!\!P} \newfontobj{\lang}{\it} \lang{SAT} \lang{CNF} \lang{QBF} \lang{HAM} \lang{CLIQUE} \lang{Clique} \lang{VSAT} \lang{HP} \lang{AP} \lang{VALCOMPS} \lang{padK} \lang{prefix} \lang{TRUTH} \lang{TOT} \lang{INF} \lang{TP} \lang{worst} % \newcommand{\HamCirc}{\mbox{H{\sc AMILTONIAN}} \mbox{C{\sc IRCUIT}}} % \newcommand{\IndSet}{\mbox{I{\sc NDEPENDENT}} \mbox{S{\sc ET}}} % \newcommand{\VertexCover}{\mbox{V{\sc ERTEX}} \mbox{C{\sc OVER}}} \newcommand{\threeSAT}{3\SAT} \newcommand{\CNFSAT}{\CNF\mbox{\it -}\SAT} \newcommand{\HamCirc}{\mbox{\it Hamiltonian } \mbox{\it Circuit\/}} \newcommand{\IndSet}{\mbox{\it Independent } \mbox{\it Set\/}} \newcommand{\VertexCover}{\mbox{\it Vertex } \mbox{\it Cover\/}} \newcommand{\TPi}{\mbox{{\rm T}$\Pi$}} \newfontobj{\func}{\it} \func{sat} \func{op} \func{Ran} \func{Dom} \func{Supp} \func{Graph} \func{Subgraph} \func{Prefs} \func{Code} \func{ran} \func{dom} \func{graph} \func{maj} \func{subgraph} \func{prefs} \func{wt} \func{Perm} \func{Det} \func{NAND} \func{NOR} \func{nand} \func{nor} \func{UE} \func{ue} \func{normal} \func{prim} \func{PLUS} \func{BIT} \func{Mask} \func{Eq} \func{BSUM} \func{Pos} \func{gap} \func{lag} \func{Enc} \func{Matrix} % \func{Accept} % \func{Halt} \func{double} \func{Divides} \func{Prime} \func{IsProof} \func{car} \func{cdr} \func{Bim} \func{str} \func{num} \func{BSC} \newcommand{\Accept}{\mbox{\it Acc\/}} \newcommand{\Halt}{\mbox{\it Halt\/}} \newfontobj{\type}{\tt} \type{String} \type{Strings} \type{Char} \type{Chars} \type{Bool} \type{Event} \type{Events} \type{Language} \type{Languages} \type{Class} \type{Classes} \type{Hierarchy} \type{Hierarchies} \newcommand{\cfatime}{\mbox{CFA-TIME}} % Hierarchy stuff \newcommand{\ql}{\mbox{\it ql\/}} % \newcommand{\Skp}{\Sigma_k^p} % \newcommand{\Pkp}{\Pi_k^p} % \newcommand{\Dkp}{\Delta_k^p} % \newcommand{\Skql}{\Sigma_k^{ql}} % \newcommand{\Pkql}{\Pi_k^{ql}} % \newcommand{\Dkql}{\Delta_k^{ql}} % \newcommand{\Sql}{\Sigma^{ql}} % \newcommand{\Pql}{\Pi^{ql}} % \newcommand{\Dql}{\Delta^{ql}} \newcommand{\Sig}{\protect\raisebox{.25ex}{$\textstyle\sum$}} \newcommand{\Pig}{\protect\raisebox{.25ex}{$\textstyle\prod$}} \newcommand{\Dig}{{\Delta}} \newcommand{\Soh}{\Sig^0} \newcommand{\Poh}{\Pig^0} \newcommand{\Doh}{\Dig^0} \newcommand{\Sp}{\Sig^p} \newcommand{\Pp}{\Pig^p} \newcommand{\Dp}{\Dig^p} \newcommand{\Sql}{\Sig^{ql}} \newcommand{\Pql}{\Pig^{ql}} \newcommand{\Dql}{\Dig^{ql}} \newcommand{\Slog}{\Sig^{\log}} \newcommand{\Plog}{\Pig^{\log}} \newcommand{\Dlog}{\Dig^{\log}} \newcommand{\Slin}{\Sig^{lin}} \newcommand{\Plin}{\Pig^{lin}} \newcommand{\Dlin}{\Dig^{lin}} \newcommand{\Skql}{\Sig_k^{ql}} \newcommand{\Pkql}{\Pig_k^{ql}} \newcommand{\Dkql}{\Dig_k^{ql}} \newcommand{\Skoh}{{\sum}_k^0} \newcommand{\Pkoh}{{\prod}_k^0} \newcommand{\Dkoh}{{\Delta}_k^0} \newcommand{\Sohoh}{{\sum}_0^0} \newcommand{\Pohoh}{{\prod}_0^0} \newcommand{\Dohoh}{{\Delta}_0^0} \newcommand{\Soneoh}{{\sum}_1^0} \newcommand{\Poneoh}{{\prod}_1^0} \newcommand{\Doneoh}{{\Delta}_1^0} \newcommand{\Stwooh}{{\sum}_2^0} \newcommand{\Ptwooh}{{\prod}_2^0} \newcommand{\Dtwooh}{{\Delta}_2^0} \newcommand{\Sthreeoh}{{\sum}_3^0} \newcommand{\Pthreeoh}{{\prod}_3^0} \newcommand{\Dthreeoh}{{\Delta}_3^0} % This is the old definition. % \newcommand{\parityP}{\raisebox{1pt}{$\oplus$}{\rm P}} \newcommand{\CA}{{\cal A}} \newcommand{\CB}{{\cal B}} \newcommand{\CC}{{\cal C}} \newcommand{\CD}{{\cal D}} \newcommand{\CE}{{\cal E}} \newcommand{\CF}{{\cal F}} \newcommand{\CG}{{\cal G}} \newcommand{\CH}{{\cal H}} \newcommand{\CI}{{\cal I}} \newcommand{\CJ}{{\cal J}} \newcommand{\CK}{{\cal K}} \newcommand{\CL}{{\cal L}} \newcommand{\CM}{{\cal M}} \newcommand{\CN}{{\cal N}} \newcommand{\CO}{{\cal O}} \newcommand{\CP}{{\cal P}} \newcommand{\CQ}{{\cal Q}} \newcommand{\CR}{{\cal R}} \newcommand{\CS}{{\cal S}} \newcommand{\CT}{{\cal T}} \newcommand{\CU}{{\cal U}} \newcommand{\CV}{{\cal V}} \newcommand{\CW}{{\cal W}} \newcommand{\CX}{{\cal X}} \newcommand{\CY}{{\cal Y}} \newcommand{\CZ}{{\cal Z}} \newcommand{\rma}{\mbox{a}} \newcommand{\rmb}{\mbox{b}} \newcommand{\rmD}{\mbox{D}} \newcommand{\rmR}{\mbox{R}} \newcommand{\rmS}{\mbox{S}} \newcommand{\co}{{\hbox{co-}}} % ENVIRONMENTS FOR RESULTS, DEFINITIONS, EXAMPLES, and OPEN PROBLEMS % Results, definitions, and examples are numbered /by section/. % Open problems are numbered for the entire paper. % Conventions are counted as definitions; % conjectures are counted as open problems. % "\newdefinition" is an add-on by Stuart Kurtz which behaves like % \newtheorem but doesn't italicize the text body. \newtheorem{theorem}{Theorem}[section] \newtheorem{corollary}[theorem]{Corollary} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{mainrobustnesstheorem}[theorem]{Main Robustness Theorem} \newtheorem{robustnesstheorem}[theorem]{Robustness Theorem} \newtheorem{claim}[theorem]{Claim} \newdefinition{definition}{Definition}[section] % \newdefinition{definition}{Definition}[lecnum] \newdefinition{convention}[definition]{Convention} \newdefinition{example}{Example}[section] \newdefinition{openproblem}{Open Problem} \newdefinition{conjecture}[openproblem]{Conjecture} \newenvironment{theorem*}{\sl \begin{trivlist} \item[]{\bf Theorem\enspace}}{\end{trivlist}} \newenvironment{proposition*}{\sl \begin{trivlist} \item[]{\bf Proposition\enspace}}{\end{trivlist}} \newenvironment{counterexample*}{\sl \begin{trivlist} \item[]{\bf Counterexample\enspace}}{\end{trivlist}} \newenvironment{convention*}{\sl \begin{trivlist} \item[]{\bf Convention\enspace}}{\end{trivlist}} \newenvironment{conjecture*}{\sl \begin{trivlist} \item[]{\bf Conjecture\enspace}}{\end{trivlist}} \newcommand{\proofsketch}{\noindent{\bf Proof Sketch.}\enspace} \newcommand{\Proofsketch}[1]{\noindent{\bf Proof Sketch #1.}\enspace} \newcommand{\proof}{\noindent{\bf Proof.}\enspace} \newcommand{\Proof}[1]{\bigbreak\noindent{\bf Proof #1.}\enspace} \def\beginproof{{\noindent {\bf Proof~~}}} \def\beginproofofclaim{{\noindent {\bf Proof of Claim. }}} \newenvironment{myproof}{\proof}{\qed\bigskip} \newenvironment{problem}{\sl \begin{trivlist} \item[]{\bf Problem.\enspace}}{\end{trivlist}} \newenvironment{notation}{\it \begin{trivlist} \item[]{Notation.\enspace}}{\end{trivlist}} \newenvironment{note}{\unskip\footnote\bgroup\ignorespaces}{\egroup} \newenvironment{construction}{\bigbreak\begin{block}}{\end{block} \bigbreak} \newenvironment{block}{\begin{list}{\hbox{}}{\leftmargin 1em \itemindent -1em \topsep 0pt \itemsep 0pt \partopsep 0pt}}{\end{list}} \newenvironment{Adjust}[1]{\begin{list}{\hbox{}}{\leftmargin #1 \topsep 0pt \itemsep 0pt \partopsep 0pt}}{\end{list}} \newcommand{\thinbar}{\rule{\textwidth}{0.015in}} \newenvironment{Comment}{\vspace{-2.5ex}\begin{trivlist} \item[]\thinbar\vspace{-2ex} \item[]\sl}{\nopagebreak[4]\vspace{-2.5ex}\item[] \thinbar\end{trivlist}} \newcommand{\topic}[1]{\medskip\noindent{\bf #1}} \newcommand{\stub}[1]{\typeout{*** Stub! ***} $\langle${\bf Stub:} {\em #1}$\rangle$} \newcommand{\Strut}[1]{\rule{0pt}{#1}} \newcommand{\restrict}{\mathclose{\mid}} \newcommand{\restricted}[1]{\restrict_{#1}} \newcommand{\symdiff}{\bigtriangleup} % \newcommand{\diff}{\backslash} \newcommand{\eqq}{\stackrel{?}{=}} \newcommand{\neqq}{\stackrel{?}{{\neq}}} \newcommand{\range}[1]{{\rm range}(#1)} \newcommand{\domain}[1]{{\rm domain}(#1)} \newcommand{\supp}{\mbox{\it L\/}} \newcommand{\fnAND}{\mbox{\it AND\/}} \newcommand{\fnOR}{\mbox{\it OR\/}} \newcommand{\fnor}{\mbox{\it or\/}} \newcommand{\fnand}{\mbox{\it and\/}} \newcommand{\noteq}{\neq} \newcommand{\COMPL}[1]{\overline{#1}} \newcommand{\Compl}[1]{\widetilde{#1}} \newcommand{\compl}[1]{\mbox{$\sim$}{#1}} \newcommand{\set}[1]{\{\,#1\,\}} \newcommand{\setf}{\mbox{\it set-$f$}} \newcommand{\setg}{\mbox{\it set-$g$}} \newcommand{\pair}[1]{\mathopen{\langle}#1\mathclose{\rangle}} \newcommand{\tuple}[1]{\mathopen{\langle}#1\mathclose{\rangle}} \newcommand{\PAIR}[1]{{\langle}#1{\rangle}} \newcommand{\floor}[1]{\lfloor#1\rfloor} \newcommand{\ceiling}[1]{\lceil #1\rceil} \newcommand{\Ahat}{\widehat{A}} \newcommand{\Bhat}{\widehat{B}} \newcommand{\Rhat}{\widehat{R}} \newcommand{\prob}{{\rm Pr}} \newcommand{\Prob}{{\rm Prob}} \renewcommand{\Pr}{{\rm Pr}} \newcommand{\Cmpl}{\mathbin{\hbox{\rule[0.5ex]{0.6em}{0.25ex}}}} \newcommand{\predicate}[1]{\mathopen{\lbrack\!\lbrack} #1 \mathclose{\rbrack\!\rbrack}} \newcommand{\BPQ}[1]{\BP{\kern -2pt}_{#1}\,} \newcommand{\parity}{\oplus} \newcommand{\odd}{\oplus^+} \newcommand{\even}{\oplus^-} \newcommand{\oddeven}{\oplus^{\pm}} \newcommand{\evenodd}{\oplus^{\mp}} \newcommand{\BP}{{\rm BP}} \newcommand{\Count}{{\rm C}} \newcommand{\ZP}{{\rm ZP}} \newcommand{\Rplus}{{\rm R}^+} \newcommand{\Rminus}{{\rm R}^-} \newcommand{\Rpm}{{\rm R}^{\pm}} \newcommand{\Rmp}{{\rm R}^{\mp}} \newcommand{\sizedepth}{\hbox{SIZE-DEPTH}} \newcommand{\twodpda}{\hbox{2DPDA}} \newcommand{\twoHDFA}{\hbox{2HDFA}} \newcommand{\RAMLIN}{\mbox{{\rm RAM-LIN}}} \newcommand{\ramlin}{\RAMLIN} \newcommand{\DRAMLIN}{\mbox{{\rm DRAM-LIN}}} \newcommand{\dramlin}{\DRAMLIN} \newcommand{\NRAMLIN}{\mbox{{\rm NRAM-LIN}}} \newcommand{\nramlin}{\NRAMLIN} \newcommand{\RAMTIME}{\mbox{{\rm RAM-TIME}}} \newcommand{\BRAMTIME}{\mbox{{\rm BRAM-TIME}}} \newcommand{\SRAMTIME}{\mbox{{\rm SRAM-TIME}}} \newcommand{\ramtime}{\RAMTIME} \newcommand{\ramtmtime}{\mbox{{\rm RAM-TM-TIME}}} \newcommand{\ramlogtime}{\RAMTIME^{\log}} \newcommand{\TC}{\mbox{\rm TC}} \newcommand{\TCTIME}{\mbox{\rm TC-TIME}} \newcommand{\DSPACE}{\hbox{\rm DSPACE}} \newcommand{\NSPACE}{\hbox{\rm NSPACE}} \newcommand{\TIME}{\hbox{\rm TIME}} \newcommand{\DTIME}{\hbox{\rm DTIME}} \newcommand{\NTIME}{\hbox{\rm NTIME}} \newcommand{\dmutime}{\mbox{{\rm D}$\mu${\rm TIME}}} \newcommand{\nmutime}{\mbox{{\rm N}$\mu${\rm TIME}}} \newcommand{\dmudtime}{\mbox{{\rm D}$\mu_d${\rm TIME}}} \newcommand{\nmudtime}{\mbox{{\rm N}$\mu_d${\rm TIME}}} \newcommand{\dmulogtime}{\mbox{{\rm D}$\mu_{\log}${\rm TIME}}} \newcommand{\nmulogtime}{\mbox{{\rm N}$\mu_{\log}${\rm TIME}}} \newcommand{\dmuzerotime}{\mbox{{\rm D}$\mu_0${\rm TIME}}} \newcommand{\nmuzerotime}{\mbox{{\rm N}$\mu_0${\rm TIME}}} \newcommand{\dmuonetime}{\mbox{{\rm D}$\mu_1${\rm TIME}}} \newcommand{\nmuonetime}{\mbox{{\rm N}$\mu_1${\rm TIME}}} \newcommand{\dmutwotime}{\mbox{{\rm D}$\mu_2${\rm TIME}}} \newcommand{\dmuthreetime}{\mbox{{\rm D}$\mu_3${\rm TIME}}} \newcommand{\mulog}{\mbox{$\mu_{\log}$}} \newcommand{\mutime}{\mbox{$\mu$-{\it time\/}}} \newcommand{\mudtime}{\mbox{$\mu_d$-{\it time\/}}} \newcommand{\mulogtime}{\mbox{$\mu_{\log}$-{\it time\/}}} \newcommand{\muacc}{\mbox{$\mu$-{\it acc\/}}} \newcommand{\mudacc}{\mbox{$\mu_d$-{\it acc\/}}} \newcommand{\mulogacc}{\mbox{$\mu_{\log}$-{\it acc\/}}} \newcommand{\muoneacc}{\mbox{$\mu_1$-{\it acc\/}}} \newcommand{\code}{\mbox{\it code\/}} \newcommand{\qlin}{\mbox{\it qlin\/}} \newcommand{\lin}{\mbox{\it lin\/}} \newcommand{\difce}{\mbox{\it diff\/}} \newcommand{\loglog}{\log\!\log} \newcommand{\thyme}{\mbox{\it time\/}} % \newcommand{\ACCEPT}{\mbox{A{\footnotesize CCEPT}}} % \newcommand{\REJECT}{\mbox{R{\footnotesize EJECT}}} % \newcommand{\HALT}{\mbox{H{\footnotesize ALT}}} % \newcommand{\goUP}{\mbox{U{\footnotesize P}}} % \newcommand{\DOWNRIGHT}{\mbox{D{\footnotesize OWN} R{\footnotesize IGHT}}} % \newcommand{\DOWNLEFT}{\mbox{D{\footnotesize OWN} L{\footnotesize EFT}}} % \newcommand{\PULL}{\mbox{P{\footnotesize ULL}}} % \newcommand{\PUT}{\mbox{P{\footnotesize UT}}} % \newcommand{\DOWN}{\mbox{D{\footnotesize OWN}}} % \newcommand{\RIGHT}{\mbox{R{\footnotesize IGHT}}} % \newcommand{\LEFT}{\mbox{L{\footnotesize EFT}}} \newcommand{\ACCEPT}{{\mbox{\sc Accept}}} \newcommand{\REJECT}{{\mbox{\sc Reject}}} \newcommand{\HALT}{{\mbox{\sc Halt}}} \newcommand{\goUP}{{\mbox{\sc Up}}} \newcommand{\DOWNRIGHT}{{\mbox{\sc Down Right}}} \newcommand{\DOWNLEFT}{{\mbox{\sc Down Left}}} \newcommand{\PULL}{{\mbox{\sc Pull}}} \newcommand{\PUT}{{\mbox{\sc Put}}} \newcommand{\DOWN}{{\mbox{\sc Down}}} \newcommand{\RIGHT}{{\mbox{\sc Right}}} \newcommand{\LEFT}{{\mbox{\sc Left}}} \newcommand{\LOAD}{{\mbox{\sc Load}}} \newcommand{\WRITE}{{\mbox{\sc Write}}} \newcommand{\READ}{{\mbox{\sc Read}}} \newcommand{\ONLY}{{\mbox{\sc Only}}} \newcommand{\POP}{{\mbox{\sc Pop}}} \newcommand{\PUSH}{{\mbox{\sc Push}}} \newcommand{\ctt}{\mbox{ctt}} \newcommand{\dtt}{\mbox{dtt}} \newcommand{\teetee}{\mbox{tt}} \newcommand{\Ptt}{\P_{\mbox{\scriptsize tt}}} \newcommand{\Pctt}{\P_{\mbox{\scriptsize ctt}}} \newcommand{\Pdtt}{\P_{\mbox{\scriptsize dtt}}} % The following are only logical --KWR \newcommand{\eqdef}{{\mbox{$=_{\rm def}$}}} \newcommand{\eqdf}{\stackrel{\rm df}{=}} \newcommand{\AND}{\; \wedge \;} \newcommand{\OR}{\; \vee \;} \newcommand{\XOR}{\; \oplus \;} \newcommand{\myor}{\;\;\vee\;\;} \newcommand{\bigAND}{\bigwedge} \newcommand{\bigOR}{\bigvee} \newcommand{\And}{\;\&\;} \newcommand{\Or}{\;\hbox{\rm or}\;} \newcommand{\implies}{\Longrightarrow} % \newcommand{\iff}{\Longleftrightarrow} \newcommand{\eqv}{\leftrightarrow} \newcommand{\imp}{\rightarrow} \newcommand{\TRUE}{\hbox{\rm TRUE}} \newcommand{\FALSE}{\hbox{\rm FALSE}} \newcommand{\into}{\rightarrow} \newcommand{\union}{\cup} \newcommand{\bigunion}{\bigcup} \newcommand{\intersect}{\cap} \newcommand{\bigintersect}{\bigcap} \newcommand{\contains}{\supseteq} % \def\union{\,\bigcup\limits\,} % \def\inter{\,\bigcap\limits\,} \newcommand{\bo}{{\bf 0}} \newcommand{\bl}{{\bf 1}} % when there is time, fix the following mess with a nice macro \newcommand{\dash}{\hbox{-}} \newcommand{\redur}{\mathrel{\hbox{$\leq_{\it r}$}}} \newcommand{\rredu}{\redur} \newcommand{\pmredu}{\mathrel{\hbox{$\leq_{\it m}^{\it p}$}}} \newcommand{\rpmredu}{\mathrel{\hbox{$\leq_{\it m}^{\it rp}$}}} \newcommand{\corpmredu}{\mathrel{\hbox{$\leq_{\it m}^{\it co-rp}$}}} \newcommand{\bppmredu}{\mathrel{\hbox{$\leq_{\it m}^{\it bpp}$}}} \newcommand{\vvmredu}{\mathrel{\hbox{$\leq_{\it m}^{\it vv}$}}} \newcommand{\Rredu}{\mathrel{\hbox{$\leq_{\it R}$}}} \newcommand{\URredu}{\mathrel{\hbox{$\leq_{\it UR}$}}} \newcommand{\mredu}{\mathrel{\hbox{$\leq_{\it m}$}}} \newcommand{\Tredu}{\mathrel{\hbox{$\leq_{\it T}$}}} \newcommand{\ttredu}{\mathrel{\hbox{$\leq_{\it tt}$}}} \newcommand{\onettredu}{\mathrel{\hbox{$\leq_{\it 1tt}$}}} \newcommand{\ptredu}{\mathrel{\hbox{$\leq_{\it T}^{\it p}$}}} \newcommand{\pttredu}{\mathrel{\hbox{$\leq_{\it tt}^{\it p}$}}} \newcommand{\pcttredu}{\mathrel{\hbox{$\leq_{\it ctt}^{\it p}$}}} \newcommand{\pdttredu}{\mathrel{\hbox{$\leq_{\it dtt}^{\it p}$}}} \newcommand{\pIttredu}{\mathrel{\hbox{$\leq_{\it 1tt}^{\it p}$}}} \newcommand{\pIIttredu}{\mathrel{\hbox{$\leq_{\it 2tt}^{\it p}$}}} \newcommand{\pmsredu}{\mathrel{<_{\it m}^{\it p}}} \newcommand{\pmequiv}{\mathop{\equiv_{\it m}^{\it p}}} \newcommand{\notpmredu}{\mathop{\not\leq_{\it m}^{\it p}}} \newcommand{\plredu}{\mathop{\leq_1^{\it p}}} \newcommand{\notplredu}{\mathop{\not\leq_1^{\it p}}} \newcommand{\liredu}{\mathop{\leq_{1\dash{\it li}}^{\it p}}} \newcommand{\pmttredu}{\mathrel{\hbox{$\leq_{\it mtt}^{\it p}$}}} \newcommand{\pposredu}{\mathrel{\hbox{$\leq_{\it pos}^{\it p}$}}} \newcommand{\npposredu}{\mathrel{\hbox{$\leq_{\it pos}^{\it np}$}}} \newcommand{\snpmredu}{\mathrel{\hbox{$\leq_{\it m}^{\it snp}$}}} \newcommand{\logmredu}{\mathrel{\hbox{$\leq_{\it m}^{\it log}$}}} \newcommand{\cfrmredu}{\mathrel{\hbox{$\leq_{\it m}^{\it cfr}$}}} \newcommand{\qlmredu}{\mathrel{\hbox{$\leq_{\it m}^{\it ql}$}}} \newcommand{\qltredu}{\mathrel{\hbox{$\leq_{\it T}^{\it ql}$}}} \newcommand{\qlmequiv}{\mathop{\equiv_{\it m}^{\it ql}}} \newcommand{\Dpt}{\CD^{\it p}_{\it T}} \newcommand{\Dppos}{\CD^{\it p}_{\it pos}} \newcommand{\Dnppos}{\CD^{\it np}_{\it pos}} \newcommand{\Rpequiv}{\equiv_{\Rplus}} \newcommand{\Rmequiv}{\equiv_{\Rminus}} \newcommand{\BPequiv}{\equiv_{\BP}} % \newcommand{\plusone}{\hspace{-.1ex}+\hspace{-.3ex} 1} % \newcommand{\minusone}{\hspace{-.1ex}-\hspace{-.3ex} 1} \newcommand{\plusone}{{\mathchoice{\hspace{-.15ex}+\hspace{-.33ex} 1}{\hspace{-.15ex}+\hspace{-.33ex} 1}{\hspace{-0ex}+\hspace{-.13ex} 1}{+1}}} \newcommand{\minusone}{{\mathchoice{\hspace{-.2ex}-\hspace{-.33ex} 1}{\hspace{-.2ex}-\hspace{-.33ex} 1}{\hspace{-0ex}-\hspace{-.13ex} 1}{-1}}} \newcommand{\sqdot}{\rule{0.5mm}{0.5mm}} \newcommand{\mdot}{\!\cdot\!} \newcommand{\lam}[1]{\lambda #1\,\sqdot\,} \newcommand{\lamp}[1]{\lam{\pair{#1}}} \newcommand{\SO}{{\cal O}} \newcommand{\Card}[1]{\Vert #1 \Vert} \newcommand{\card}[1]{| #1 |} \newcommand{\norm}[1]{\mbox{\it norm\/}(#1)} \newcommand{\Quad}[1]{\hspace*{#1em}} \newcommand{\qqquad}{\qquad\quad} % the following makes a nice fat QED box \newcommand{\sqr}[2]{{\vcenter{\hrule height.#2pt \hbox{\vrule width.#2pt height#1pt \kern#1pt \vrule width.#2pt} \hrule height.#2pt}}} \newcommand{\qed}{{\unskip\nobreak\hfil\penalty50 \hskip1em\hbox{}\nobreak\hfil$\sqr{8}{8}$ \parfillskip=0pt \finalhyphendemerits=0 \par} \bigskip} \newcommand{\Qed}[1]{{\unskip\nobreak\hfil\penalty50 \hskip1em\hbox{}\nobreak\hfil$\sqr{8}{8}$\enspace\bf #1 \parfillskip=0pt \finalhyphendemerits=0 \par} \bigskip} \newcommand{\eop}{\hspace*{\fill}$\Box$\vspace{24truept}} \newcommand{\refeq}[1]{(\ref{#1})} \newcommand{\re}{r.e.\ } \newcommand{\Nat}{{\rm \bf N}} \newcommand{\Zed}{{\rm \bf Z}} \newcommand{\Int}{\Zed} \newcommand{\Reals}{{\rm \bf R}} \newcommand{\ints}{{\bf Z}} \newcommand{\reals}{{\bf R}} \newcommand{\sep}{{\,;\,}} \newcommand{\hep}{{\,\#\,}} \newcommand{\emptystring}{\lambda} \newcommand{\emptyclass}{\mbox{\bf\O}} \newcommand{\Frakla}{\CL_A} \newcommand{\Eff}{\CF} \newcommand{\QProof}{\mbox{$\CQ${\it Proof\/}}} %% Improve the following hack!!! \newenvironment{header}{ \newcommand{\From}{\item[{\bf From:}]} \newcommand{\To}{\item[{\bf To:}]} \newcommand{\Subject}{\item[{\bf Subject:}]} \newcommand{\Date}{\item[{\bf Date:}]} \newcommand{\Revised}{\item[{\bf Revised:}]} \newcommand{\Bar}{\item[\rule{0.986\textwidth}{.03in}]} \begin{list}{\hbox{}}{ \leftmargin 1.5em \itemindent -1em \topsep 0pt \labelwidth 0pt \itemsep -5pt }\Bar}{\Bar\end{list} \medskip} \newcommand {\Choose} [2] {(\,^{#1}_{#2}\,)} \newcommand {\worddef}[1] {\boldmath{\bf #1}\unboldmath} \newcommand{\GF}{\mbox{GF}} \newcommand{\gh}{\mbox{gh}} \newcommand{\ap}{\mbox{ap}} \newcommand{\proves}{\vdash} \newcommand{\negproves}{\not\vdash} % this is the macro-file for ampmp.tex. I called it tickmacro.tex \newenvironment{beweis} {\proof}{\qed} \class{AmpMP} \class{MaskP} \newcommand {\ul} [1] {\underline{#1}} \newcommand {\ol} [1] {\overline{#1}} \newcommand {\subs} {\subseteq} \newcommand {\parp} {\parityP} \newcommand {\cparp} {\CparityP} \newcommand {\bpparp} {\BP[\parityP]} \newcommand {\mb} {\MP} \newcommand {\nump} {\numberP} \newcommand {\numP} {\numberP} \newcommand {\NumP} {\numberP} \newcommand {\ampmp} {\AmpMP} \newcommand {\con} {\cdot} \newcommand {\acc} {\mbox{\it acc\/}} \newcommand{\qacc}{q_{\mbox{\it acc}}} \newcommand{\qrej}{q_{\mbox{\it rej}}} \newcommand{\numacc}{\#\acc} \newcommand{\polylog}{\mathop{\rm polylog}\nolimits} \newcommand {\nule} {\{0,1\}} \newcommand{\aat}{\mbox{@}} \newcommand{\aats}{\mbox{@s}} \newcommand{\ats}{\mbox{\small @}} \newcommand{\pct}{{\%}} \newcommand{\dol}{{\$}} \newcommand{\propcont}{\;\subset\;} %%%Macros from Lide Li \newcommand{\ch}{\raisebox{.5ex}{$\chi$}} \newcommand{\Edot}{\mbox{\boldmath$\exists$}\cdot} \newcommand{\Pdot}[1]{\mbox{{\bf P}$\cdot${#1}}} \newcommand{\Liff}{\Longleftrightarrow} \newcommand{\qedright}{\begin{flushright}\qed\end{flushright}} \def\monus{\buildrel\textstyle.\over{\smash{-}\vrule height .5ex width 0pt}} \def\concat{\widehat{\phantom\alpha}} \newcommand{\Poly}{{\rm Poly}} \newcommand{\poly}{\mbox{\it poly\/}} \def\oper{\circ} \def\ZON{\{0,1\}^n} \def\OneN{\{1,\cdots,n\}} \def\OneS{\{1,\cdots,s\}} \newcommand{\myneg}{\overline} \newcommand{\CeqP}{\mbox{${\rm C}_=\P$}} \newcommand{\coCeqP}{\mbox{${\bf co}{\rm C}_=\P$}} \newcommand{\fmaxsat}{\mbox{$f_{\mbox{\scriptsize\it MAX3SAT}}$}} %%%%%% \newcommand{\otherwise}{\mbox{\it otherwise\/}} \newcommand{\same}{\mbox{\it same\/}} \newcommand{\order}{O} \newcommand {\onehelp} {\mathop{{\rm 1}\dash{\rm help}}} SHAR_EOF fi # end of overwriting check if test -f 'KWRdefs.sty' then echo shar: will not over-write existing file "'KWRdefs.sty'" else cat << \SHAR_EOF > 'KWRdefs.sty' % Stuart's defs.sty. [This is the most important part --KWR] % % Construct a "newdefinition" command analogous to the newtheorem % command. The primary difference is that the newdefinition widget % does not cause its body to be italicized. % % This is all cribbed from latex.tex. % Contact Stuart A. Kurtz if there are any difficulties. \def\newdefinition#1{\@ifnextchar[{\@odefn{#1}}{\@ndefn{#1}}} \def\@ndefn#1#2{% \@ifnextchar[{\@xndefn{#1}{#2}}{\@yndefn{#1}{#2}}} \def\@xndefn#1#2[#3]{\expandafter\@ifdefinable\csname #1\endcsname {\@definecounter{#1}\@addtoreset{#1}{#3}% \expandafter\xdef\csname the#1\endcsname{\expandafter\noexpand \csname the#3\endcsname \@thmcountersep \@thmcounter{#1}}% \global\@namedef{#1}{\@defn{#1}{#2}}\global\@namedef{end#1}{\@enddefinition}}} \def\@yndefn#1#2{\expandafter\@ifdefinable\csname #1\endcsname {\@definecounter{#1}% \expandafter\xdef\csname the#1\endcsname{\@thmcounter{#1}}% \global\@namedef{#1}{\@defn{#1}{#2}}\global\@namedef{end#1}{\@enddefinition}}} \def\@odefn#1[#2]#3{\expandafter\@ifdefinable\csname #1\endcsname {\global\@namedef{the#1}{\@nameuse{the#2}}% \global\@namedef{#1}{\@defn{#2}{#3}}% \global\@namedef{end#1}{\@enddefinition}}} \def\@defn#1#2{\refstepcounter {#1}\@ifnextchar[{\@ydefn{#1}{#2}}{\@xdefn{#1}{#2}}} \def\@xdefn#1#2{\@begindefinition{#2}{\csname the#1\endcsname}\ignorespaces} \def\@ydefn#1#2[#3]{\@opargbegindefinition{#2}{\csname the#1\endcsname}{#3}\ignorespaces} \def\@begindefinition#1#2{ \trivlist \item[\hskip \labelsep{\bf #1\ #2.}]} \def\@opargbegindefinition#1#2#3{\trivlist \item[\hskip \labelsep{\bf #1\ #2\ (#3).}]} \def\@enddefinition{\endtrivlist} % Construct a "newscholium" command analogous to the newtheorem % command. The primary difference is that the newscholium widget % does not cause its body to be italicized. % % This is all cribbed from latex.tex. % Contact Stuart A. Kurtz if there are any difficulties. \def\newscholium#1{\@ifnextchar[{\@oschm{#1}}{\@nschm{#1}}} \def\@nschm#1#2{% \@ifnextchar[{\@xnschm{#1}{#2}}{\@ynschm{#1}{#2}}} \def\@xnschm#1#2[#3]{\expandafter\@ifdefinable\csname #1\endcsname {\@definecounter{#1}\@addtoreset{#1}{#3}% \expandafter\xdef\csname the#1\endcsname{\expandafter\noexpand \csname the#3\endcsname \@thmcountersep \@thmcounter{#1}}% \global\@namedef{#1}{\@schm{#1}{#2}}\global\@namedef{end#1}{\@endscholium}}} \def\@ynschm#1#2{\expandafter\@ifdefinable\csname #1\endcsname {\@definecounter{#1}% \expandafter\xdef\csname the#1\endcsname{\@thmcounter{#1}}% \global\@namedef{#1}{\@schm{#1}{#2}}\global\@namedef{end#1}{\@endscholium}}} \def\@oschm#1[#2]#3{\expandafter\@ifdefinable\csname #1\endcsname {\global\@namedef{the#1}{\@nameuse{the#2}}% \global\@namedef{#1}{\@schm{#2}{#3}}% \global\@namedef{end#1}{\@endscholium}}} \def\@schm#1#2{\refstepcounter {#1}\@ifnextchar[{\@yschm{#1}{#2}}{\@xschm{#1}{#2}}} \def\@xschm#1#2{\@beginscholium{#2}{\csname the#1\endcsname}\ignorespaces} \def\@yschm#1#2[#3]{\@opargbeginscholium{#2}{\csname the#1\endcsname}{#3}\ignorespaces} \def\@beginscholium#1#2{\footnotesize \trivlist % \item[]{\bf #1\ #2.\hskip \labelsep}} \def\@opargbeginscholium#1#2#3{\footnote \trivlist % \item[]{\bf #1\ #2\ (#3).\hskip \labelsep}} \def\@endscholium{\endtrivlist} %%% The \text command stolen from AMSLaTeX \def\RIfM@{\relax\ifmmode} \def\RIfMIfI@{\relax\ifmmode\ifinner} \def\Invalid@#1{\def#1{\Err@{\Invalid@@\string#1}}} \def\Invalid@@{Invalid use of } \def\DN@{\def\next@} \def\eat@#1{} \def\text{\RIfM@\expandafter\text@\else\expandafter\text@@\fi} \def\text@@#1{\leavevmode\hbox{#1}} \newif\iffirstchoice@ \firstchoice@true \def\text@#1{\mathchoice {\hbox{\everymath{\displaystyle}\def\textfonti{\the\textfont\@ne}% \def\textfontii{\the\textfont\tw@}\textdef@@ T#1}} {\hbox{\firstchoice@false \everymath{\textstyle}\def\textfonti{\the\textfont\@ne}% \def\textfontii{\the\textfont\tw@}\textdef@@ T#1}} {\hbox{\firstchoice@false \everymath{\scriptstyle}\def\textfonti{\the\scriptfont\@ne}% \def\textfontii{\the\scriptfont\tw@}\textdef@@ S\rm#1}} {\hbox{\firstchoice@false \everymath{\scriptscriptstyle}\def\textfonti {\the\scriptscriptfont\@ne}% \def\textfontii{\the\scriptscriptfont\tw@}\textdef@@ s\rm#1}}} \def\textdef@@#1{\textdef@#1\rm\textdef@#1\bf\textdef@#1\sl\textdef@#1\it} \def\rmfam{0} \def\textdef@#1#2{% \DN@{\csname\expandafter\eat@\string#2fam\endcsname}% \if S#1\edef#2{\the\scriptfont\next@\relax}% \else\if s#1\edef#2{\the\scriptscriptfont\next@\relax}% \else\edef#2{\the\textfont\next@\relax}\fi\fi} SHAR_EOF fi # end of overwriting check if test -f 'KeyComms.tex' then echo shar: will not over-write existing file "'KeyComms.tex'" else cat << \SHAR_EOF > 'KeyComms.tex' \documentstyle[titlepage,KWRdefs,11pt]{article} % \renewcommand{\baselinestretch}{1.2} \setlength{\textwidth}{6.35in} %6.25in or 6.4in \setlength{\textheight}{9in} \setlength{\topmargin}{0in} \setlength{\headheight}{0in} \setlength{\headsep}{0in} \setlength{\footheight}{.5in} \setlength{\footskip}{.5in} \setlength{\oddsidemargin}{.1in} %.2in or 0in \setlength{\evensidemargin}{.1in} \setlength{\parindent}{0.3in} %.5in \setlength{\parskip}{0in} %.1 in or .2 in or 0in \setlength{\partopsep}{2ex} \setlength{\topsep}{1ex} \setlength{\itemsep}{.5ex} \input{KWRmacs} \newcommand{\myemptyset}{\mbox \O} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \hfuzz=3pt \vfuzz=3pt \title{{\bf Communication Complexity of Key Agreement on Limited Ranges}\\ (extended abstract)} \author{ Jin-Yi Cai\thanks{Department of Computer Science, State Univ. of NY at Buffalo, 226 Bell Hall, Buffalo NY 14260-2000. Email: {\tt cai@cs.buffalo.edu}. Supported in part by NSF Grants CCR-9057486 and CCR-9319093, and by an Alfred P. Sloan Fellowship. } \and Richard J. Lipton\thanks{Department of Computer Science, Princeton University, Princeton, NJ 08544. E-mail: {\tt rjl@cs.princeton.edu}. Supported in part by NSF Grant CCR-9304718. } \and Luc Longpr\'e \thanks{ College of Computer Science, Cullinane Hall, Northeastern University, Boston, MA 02115. Email: {\tt luc@ccs.northeastern.edu}. Research supported in part by NSF Grant CCR-9211174. } \and Mitsunori Ogihara\thanks{ Department of Computer Science, University of Rochester, Room 620, Computer Science Building, Rochester, NY 14627. E-mail: {\tt ogihara@cs.rochester.edu}. Supported in part by the NSF under grant CCR-9002292 and the JSPS under grant NSF-INT-9116781/JSPS-ENG-207. } \and Kenneth W. Regan\thanks{Department of Computer Science, State Univ. of NY at Buffalo, 226 Bell Hall, Buffalo NY 14260-2000. Email: {\tt regan@cs.buffalo.edu}. } \and D. Sivakumar\thanks{Department of Computer Science, State Univ. of NY at Buffalo, 226 Bell Hall, Buffalo NY 14260-2000. Email: {\tt sivak-d@cs.buffalo.edu}. } } \date{May 1994} \begin{document} %\large \bibliographystyle{alpha} \maketitle \sloppy \begin{abstract} This paper studies a variation on classical key-agreement and consensus problems in which the set $S$ of possible keys is the range of a random variable that can be sampled. We give tight upper and lower bounds of $\ceiling{\log_2 k}$ bits on the communication complexity of agreement on some key in $S$, using a form of Sperner's Lemma, and give bounds on other problems. In the case where keys are generated by a probabilistic polynomial-time Turing machine, agreement is shown to be possible with zero communication if every fully polynomial-time approximation scheme (fpras) has a certain symmetry-breaking property. \end{abstract} \section{Introduction}\label{intro} A fundamental problem in key agreement between two parties, commonly called ``Alice'' and ``Bob,'' is for Alice to communicate some string $w$ to Bob over an expensive, noisy, and/or insecure channel. Most work allows $w$ to be any given string, making no assumptions about its source, or considers $w$ to be uniformly generated among strings of some length $n$. We study cases in which $w$ comes from a relatively small set $S \subseteq \set{0,1}^n$, namely the set of possible outputs of a random variable (figuratively, a key generator) $G$. Alice and Bob can gain information about $S$ by interacting with $G$ with the help of a private coin, and each can send messages to the other. For instance, $G$ may generate large primes $p$ satisfying certain requirements, and Alice and Bob wish to agree on some suitable $p$, while exchanging much fewer than $\log_2 p$ bits. Several kinds of problems we consider are: \begin{enumerate} \item {\em Any-key agreement.\/} Upon the end of their conversation, Alice commits to a string $s_A \in S$, Bob commits to $s_B \in S$, and they succeed if $s_A = s_B$. \item {\em Selected-key agreement.\/} Alice commits to a designated key $w \in S$ before the conversation, and they succeed if after the conversation, Bob also commits to $w$. \item {\em Subset agreement.\/} Upon the end of their conversation, Alice commits to a subset $S_A$ of $S$ of some pre-determined size $m$, Bob commits to $S_B$, and they succeed if $S_A = S_B$. \item {\em Weak subset agreement.\/} Same as last item, but with success if $S_A \intersect S_B \neq \emptyset$. % \item % {\em General\/.} Given some binary string relation $R$ (which is % known to them), Alice and Bob converse, commit to strings $w_A,w_B$ % (which need not belong to $S$), and succeed if $R(w_A,w_B)$ holds. \end{enumerate} \noindent For a first motivation, suppose Alice and Bob are users at remote sites. There are many applications that call for generating one or a few primes of length $n$ having special properties, and they want to make sure that with high probability they generate the same primes. The simple method where Alice generates a prime $p$ and sends it to Bob costs $n$ bits. Somewhat more economical is for Alice to send the {\em rank\/} of $p$ in the set $S$ of admissible primes, costing $\log_2\Card{S} < n$ bits. However, this requires both computing and inverting the {\em ranking function\/} of $S$ as defined on all of $\set{0,1}^n$, which can be ($\NP$-) hard even in cases where $S$ has cardinality two (see \cite{GoSi91}). Suppose, however, that the generator $G$ makes $S$ relatively small in size. This is not unreasonable: consider either the effect of the small seed space of a good pseudorandom number generator used as input to a prime-number generator, or the possibility that a third party supplying the generator to Alice and Bob could also supply a secret that restricts the range. Then Alice and Bob can gain enough useful information about $S$ by interacting with $G$ to send ranking information based on their samples of $S$. This has the advantage that so long as $G$ itself does not fall into enemy hands, the information about rank within $S$ itself gives away little information about the actual values in $S$. When it is important to select one or $m$ keys from $S$ ``at random,'' our intent is that Alice would build the designated key or keys, and then assist Bob in building the same ones, or at least one of them. This leads to the latter three problems. The first problem arises when any key will serve. Second, our setting gives what appears to be a new twist on much-studied distributed-consensus problems. In a typical consensus problem involving $k$ processors, each processor is given some value, and the goal is for the processors to agree on one of the values that were initially input. We can extend our setting from Alice and Bob to $k$ parties, and the difference is that now the keys are given as outcomes of a random variable, rather than being pre-set. In this abstract we concentrate on the two-party case, without faulty parties, and state some results about the extension at the end. Our work shows that the problems are interesting even with only Alice and Bob, with some open questions. Like the methods for distributed-consensus lower bounds in \cite{CHLT93,BoGa93,SaZa93,HeSh93}, our lower bounds use a form of {\em Sperner's Lemma\/}, but with the difference that the nodes in the simplicial complex used are labeled by random variables, rather than by processor IDs and features of local communication graphs (see \cite{CHLT93} or ``views'' in \cite{BoGa93}). Our work also differs from that of Maurer \cite{Mau91,Mau93} and Ahlswede and Csisz{\`a}r \cite{AhCs93} in its emphasis on the {\em communication complexity\/} of the problems: Do restrictions on the size or structure of the key space help Alice and Bob to agree while exchanging substantially fewer than $n$ bits? We treat questions of secrecy and channel noise as secondary, regarding them as factors that can make communication expensive. Orlitsky and others \cite{% % % OrGa84,ElGOr84, % OrGa90,Orl90,Orl91,Orl91b,Orl92,NOS93b} have studied communication complexity in settings where Alice observes a random variable $X$, Bob observes a random variable $Y$ (often dependent on $X$), and the object is for them to exchange single outcomes each has seen. For example, $Y$ may select two ``teams'' $i$ and $j$ from a ``league'' $S = \set{1,\dots,k}$, while $X$ reveals the winner to Alice. In \cite{Orl90} it is shown that $\ceiling{\log_2 k}$ bits are necessary and sufficient for a one-message protocol in which Alice tells Bob the winner. If two messages beginning with Bob are allowed, however, Bob can tell Alice the index $l$ of the first bit where the binary representations of $i$ and $j$ differ, and Alice sends the $l$th bit of her number, making at most $\ceiling{\loglog k} + 1$ bits; both of these protocols are error-free. Our setting has several differences: (i)~the ability for Alice and Bob to take repeated samples of the random variable $G$, (ii)~the lack of advance knowledge by Alice and Bob of the universe $S \subseteq \set{0,1}^n$, and (iii)~the inherent role of randomness and error. The selected-key problem can be represented in their framework, with $Y$ null, but even here there are differences in the representation and non-rankability of $S$. We show: \begin{theorem}\label{logktight} For any-key agreement with exponentially vanishing error, $\ceiling{\log_2 k}$ bits are both sufficient and necessary, where $k = \Card{S}$. The upper bound is obtained by a one-message protocol with runtime polynomial in $n$ and $k$, while the identical lower bound holds for arbitrary protocols, even when Alice is computationally omnipotent and knows the distribution of $G$. \end{theorem} This lower bound holds under the assumption that the source $G$ is a ``black box'' random variable, one that Alice and Bob can interact with only by sampling. When $G$ is a {\em feasible generator\/}, namely one computed by a probabilistic polynomial time Turing machine (PPTM), the question of lower bounds leads to the problem of whether every feasible generator has a ``monic refinement,'' as defined in Section~\ref{BPMV}. This is the same as asking whether the multi-valued mapping from seeds to valid keys has a {\em single-valued refinement\/} (see \cite{Sel91}) in the class called ``BPPSV'' by Grollmann and Selman \cite{GrSe88}. In Section~\ref{BPMV} we relate this further to questions about {\em fully polynomial randomized approximation schemes\/} (fpras) for functions in $\numberP$ (see \cite{KaLu83,JVV86,JeSi89}), noting that an fpras can always be ``rounded'' to take at most two values with high probability. \begin{theorem} If ties between two values in an fpras can be broken, then every feasible generator with polynomial-time decidable key set has a monic refinement, and then any-key agreement for a PPTM can be done with no communication, regardless of the size of $S$. \end{theorem} \noindent This sets up an interesting contrast between the sharp lower bounds in the case of arbitrary $G$, versus the problem that proving even a nonzero lower bound in the case of feasible $G$ requires proving that $\P \neq \numP$. Lipton \cite{Lip94a} observed a related contrast between arbitrary and feasible {\em channels\/} (formally, these are virtually the same as our generators) in coding theory, and noted that if there were a usable source in nature that is not feasible, then it could be used to violate the random-polynomial-time analogue of Church's Thesis, which is commonly believed. \section{Main Results}\label{main} In this abstract we assume familiarity with interactive protocols, the formalization of an $r$-round {\em conversation\/} $(\alpha_1,\beta_1,\dots,\alpha_r,\beta_r)$ between Alice and Bob (beginning with Alice, and with $\beta_r$ possibly null), and unambiguous encodings of conversations by binary strings $\alpha$. (Details may be found in \cite{GMR89}.) The case $r=1$, $\beta_r$ null is a {\em one-message protocol\/}. Each of Alice and Bob is allowed to interact privately with the random variable $G$ by making ``sample requests.'' Each sample request returns some string in $\set{0,1}^n$ according to the distribution of $G$, and costs $n$ time units. Alice and Bob may also use their private coin in computations. Neither is allowed to see the other's coinflips or sample strings; i.e., there is no ``common randomness.'' When $G$ is fixed we write $p_y$ as short for $\Prob[G=y]$. The set $S = \set{y : p_y > 0}$ is called the {\em support set\/} of $G$, and the number $k$ stands for an upper bound on its cardinality. We express time bounds in terms of $n$, $k$, and the {\em error tolerance\/} $\epsilon$ of the protocol on the problem at hand. The numbers $n$, $k$, and $\epsilon$ are known by both Alice and Bob. A function $\epsilon(n)$ that is $o(n^{-c})$ for every fixed $c > 0$ is said to be {\em negligible\/}, and if it is $o(2^{-cn})$ for some $c > 0$, then it is {\em exponentially vanishing\/}. The basic method for the upper bounds is to form a ``histogram'' of the strings received in sample requests, and look for reproducible ``gaps'' in the observed frequencies. \begin{theorem}\label{anykey:upper} Any-key agreement, for any $G$ with range $S \subseteq \set{0,1}^n$ of size $k$, can be achieved with exponentially-vanishing error by a protocol in which Alice sends one message of at most $\ceiling{\log_2 k}$ bits, and in which Alice and Bob run in time polynomial in $n$, $k$, and $\log(1/\epsilon)$. \end{theorem} \proofsketch Let $N$ be large enough so that for each element $y \in S$, the probability that the observed frequency of $y$ in $N$ independent trials belongs to the interval $[p_y - 1/k^2\dots p_y + 1/k^2]$ is greater than $1-\epsilon/2$. $N$ is bounded by a polynomial in $k$ and $\log(1/\epsilon)$ independent of $p_y$. Alice makes $N$ sample requests and observes some number $k' \leq k$ of distinct strings returned by $G$. Let $f_1,\dots,f_{k'}$, in nonincreasing order, stand for the observed frequencies of the strings in her histogram, and for convenience put $f_{k'+1} = 0$. Then there exists some $i$, $1 \leq i \leq k'$, such that $f_i - f_{i+1} > 2/k^2$. This represents a ``non-negligible gap'' in her histogram. Alice chooses any such $i$ (e.g., the least one) and sends $i$ to Bob. Then Bob makes $N$ sample requests, and forms his own histogram with frequencies $f'_1,\dots,f'_l$; here $l$ may differ from $k'$. Then with error at most $\epsilon$, the set $S_A$ of the $i$ most-frequent strings observed by Alice is the same as the set $S_B$ observed by Bob. Thus if Alice commits to the {\em lexicographically\/} greatest member of $S_A$, and Bob likewise with $S_B$, they succeed with probability at least $1-\epsilon$.\qed \medskip The case where all elements of $S$ are equally likely makes $i = k$ and meets the stated upper bound $\ceiling{\log_2 k}$ in the protocol. Note that on the promise that this is the case, or even that all elements of $S$ occur with probability that is non-negligible in $k$, Alice and Bob can agree with {\em zero\/} communication by doing polynomially-many samples until, with high probability, each sees all $k$ distinct elements. In general, Alice and Bob can try the strategy of doing some pre-determined (or communicated) numbers of trials and committing to the lex greatest string each sees. This may fail when $S$ has elements of probability $1/k^c$ for many different values of $c$. A different strategy is for Alice and Bob to choose their most-frequent elements, and this can be augmented with communication about lex high or low or otherwise ``distinctive'' elements among the frequent ones. With all of this latitude even for 0-bit and 1-bit protocols, it seems surprising that the upper bound in Theorem~\ref{anykey:upper} cannot be lowered even by 1 bit. This is so even when Alice knows the {\em true\/} distribution of $G$ and is computationally omnipotent! We first prove this in the case of a 1-message protocol. \begin{theorem}\label{anykey:lower} % For all time-bounded machines representing Bob and $\delta > 0$, % % Every 1-message protocol for any-key agreement that achieves % success probability $\delta$ requires $\ceiling{\log_2(k\delta)}$ bits. The best success probability achieved by a one-message protocol, where Alice has $u$ distinct messages available to her and Bob is polynomial-time, is bounded above by $u/k$. \end{theorem} \proof Let a PPTM $B$ representing ``Bob'' be fixed. It is enough to consider random variables $G$ whose range $S$ is a subset of $k$ elements $e_1,\dots,e_k$ whose identities are known to both Alice and Bob in advance. The one important point is that if $e_i \notin S$, i.e.\ if $\Prob[G = e_i] = 0$, and Alice and Bob commit to $e_i$, they are considered {\em not\/} to succeed, in accordance with the stipulation that they agree on a member of $S$. We furthermore consider random variables $G$ with the property that for some $m > 0$, and all $i$, $\Prob[G = e_i]$ is a multiple of $1/2^m$. (We use $m$ and $2^m$ this way for notational uniformity with the next section, and to relate Bob's time bound to $m$.) The space of all such random variables forms a {\em simplicial complex\/} $S_k$ embedded in the nonnegative orthant of the $(k-1)$-dimensional hyperplane of points in $\Reals^k$ whose coordinates sum to 1. Two nodes in $S_k$ are adjacent iff they differ by $1/2^m$ in two coordinates, and agree in all the others. The maximum clique size in this graph is $k$, and a $k$-clique is called a {\em simplex\/} in $S_k$. $S_k$ has $k$-many extreme nodes $G_i$ defined by $\Prob[G_i = e_i] = 1$, and the nodes $G$ where $\Prob[G = e_i] = 0$ are said to form the {\em opposite facet\/} of $G_i$. Every interior node, i.e. where all elements have nonzero probability, has $k^2 - k$ neighbors, and belongs to $2k$-many simplexes. Now we define a ``coloring function'' $C: S_k \into \set{1,\dots,k}$ for all nodes $G$ by: Take some $i \in S$ and message $x_t$ ($1 \leq t \leq u$) that maximizes the probability that Bob, given message $x_t$ and sampling $G$, commits to $i$. (If $i$ is not unique, any such $i$ will do.) This probability is well-defined since there is no further interaction after $x_t$ is transmitted, and a computationally omnipotent Alice can do no better than committing to $i$. Then define $C(G) = i$. This coloring satisfies the hypotheses of {\em Sperner's Lemma\/}, using the statement in \cite{CHLT93}, namely: % \begin{quote} Suppose for each $i$, $C(G_i) = i$, and for every node $G$ in the opposite facet to $i$, $C(G) \neq i$. Then there exists at least one simplex whose $k$ mutually-adjacent nodes are given $k$ distinct colors by $C$. \end{quote} % Since there are only $u$ different messages Alice can send, at least $k/u$ of these nodes are optimized by sending the same message $x_t$. Since these nodes represent random variables whose component probabilities differ by $1/2^m$ at most, Bob cannot statistically distinguish them in the polynomially-many trials he is allowed. Hence for any element $e_j$, the differences among these nodes $G$ in the probability that Bob receiving $x_t$ commits to $e_j$ are at most $\delta(m)$, where $\delta(m)$ is exponentially vanishing in $m$. Since an optimal Alice commits to a different element at each node, there is one at which Bob is correct with probability at most $u/k + \delta(m)$. Letting $m \into \infty$ and exploiting metric completeness and closure yields the conclusion.\qed \medskip \noindent If we restricted attention to r.v.'s $G$ with range $S$ of cardinality exactly $k$, then the argument trivially fails if Alice and Bob know $S$ in advance, but it goes through for the case of arbitrary $S \subseteq \set{0,1}^n$ if $k \ll 2^n$. The proof works even if Bob runs in sub-exponential time, so long as $m$ can be chosen large enough (e.g.\ $m = \Theta(n)$) that Bob cannot distinguish the adjacent nodes in his time bound. The reduction from multi-round protocols to one-message protocols can lower the success probability and blow up the running time by moderate amounts. \begin{theorem} For every $b$-bit protocol $(A,B)$ for any-key agreement that runs in time $t$ and succeeds with probability $p$, and $\delta > 0$, there is a one-message protocol $(A',B')$ that succeeds with probability $p - \delta$, and that runs in time linear in $t$, nearly-linear in $2^b$, and polynomial in $1/\delta$. \end{theorem} \proofsketch There are at most $N = 2^b$ possible conversations under the unambiguous encoding. For each $i$, $1 \leq i \leq N$, let $q_i$ denote the probability that Alice and Bob have conversation $\alpha_i$, and let $p_i$ be the success probability conditioned on $\alpha_i$ occurring. Then $p = \sum_{i} p_i q_i$. Elementary calculation shows that there exists some $i$ such that $p_i \geq p-\delta$ and $q_i \geq Q = \delta/N(1-p+\delta)$. Both $A'$ and $B'$ are given copies of the old Alice $A$ and the old Bob $B$ to simulate. A computationally-omnipotent $A'$ who knows the distribution of $G$ could calculate $i$ herself and send $\alpha_i$ to $B'$. We observe that a polynomial-time $A'$ can do almost as well: she can simulate enough runs of the $(A,B)$ protocol so that with confidence at least $1-\delta/2$, she finds a conversation $\alpha_i$ that gives $q_i > Q/2$ and $p_i > p-2\delta$. The number of runs needed is on the order of: \begin{equation}\label{mess} N\log N (1-p+\delta)(p-\delta)(1-p+\delta)\log^3(1/\delta)/\delta^3. \end{equation} When $B'$ receives $\alpha_i$ from $A'$, he does multiple runs of the old $(A,B)$ protocol until conversation $\alpha_i$ occurs, and chooses the same value $B$ did in the first such run. With probability at least $1-\delta/2$, this happens in the first $(1/q_i)\log_e (2/\delta)$ runs, so the time taken by the new Bob upon receiving a message from Alice can be bounded by a constant times $tN\log_2(1/\delta)\mdot(1-p)/\delta$, which is less than the time for $A'$. The conclusions follow.\qed % More interesting is that the reduction works even if $A'$ is % time-bounded. $A'$ simulates enough runs of the $(A,B)$ protocol % so that she sees enough occurrences of some conversation $\alpha_i$ % such that with confidence at least $1-\delta$ she knows both that % $q_i \geq Q/2$ and that $p_i \geq p-2\delta$. % The number of occurrences of $\alpha_i$ she needs for the latter is % approximately $m = (p-\delta)(1-(p-\delta))[(1/\delta)\log(1/\delta)]^2$. % With probability at least $1-\delta$, she will obtain $m$ occurrences % of $\alpha_i$ within $m(1/q_i)\log(1/\delta)$ trials. The number of trials % she needs to be equally confident that $q_i > Q/2$ is bounded by % $4\log^2(1/Q)\mdot (1-Q)/Q$, which is on the order of % $[N(1-p+\delta)/\delta] \mdot \log[N(1-p+\delta)/\delta]$. % A rough upper bound is had by multiplying % $m(1/q_i)\log(1/\delta)$ by $\log N$, % giving on the order of % \[ % N\log N (1-p+\delta)(p-\delta)(1-p+\delta)\log^3(1/\delta)/\delta^3 % \] % trials. The success probability of the one-message protocol with the % above description is at least $p - 4\delta$, and the obvious adjustment % fulfils the stated conditions.\qed \medskip Note that if $p$ is close to 1, and $\delta$ is about $(1-p)/2$ in Equation~\ref{mess}, then then the number of trials needed works out roughly to $N\mdot (1/\delta)$. In any event, if we tolerate a falloff in the success probability of the form 1/polynomial, then $A'$ and $B'$ still run in polynomial time. When the new Alice is computationally omnipotent and knows the distribution, however, the hit on the running time is only a factor of $\log(1/\delta)$, which is polynomial even when $1-p$ is exponentially vanishing. This suffices to prove the lower bound in the following stronger form of Theorem~\ref{logktight}, while the proof of the upper bound is deferred to the next subsection. \begin{theorem}\label{anykey:realtight} For any integer $a \geq 0$, in order to attain success probability $1/2^a - 1/2^n$ for any-key agreement with a polynomial-time Bob, $\ceiling{log_2 k} - a$ bits are both necessary and sufficient.\qed \end{theorem} \subsection{Other Agreement Problems} Now we study the communication complexity of the other three problems in the Introduction, namely selected-key agreement, subset agreement, and weak subset agreement. Where the allowed error probability $\epsilon$ on the protocol is unstated, it is assumed to be exponentially vanishing. \begin{theorem}\label{selkey} Agreement on a selected key $w$ can be achieved in expected polynomial time (in $n$, $k$, and $1/p_w$) by a one-message protocol that communicates at most $2\ceiling{\log_2 k}$ bits. \end{theorem} \proofsketch Let $w$ denote the element that Alice wishes to communicate. Let $c$ be such that $p_w > 1/k^c$. Assume further that Alice knows the value of $c$ (if not, she can sample in increasing powers of $k$ until she obtains a good estimate of $p_w$). Let $N$ be large enough so that the probability that the observed frequency of $w$ in $N$ independent trials is in the interval $[p_w - 1/4k^2 \dots p_w + 1/4k^2]$ is greater than $\epsilon/2$. Alice makes $N$ sample requests and chooses a ``gap'' of at least $1/k^d$ in her histogram, for some $d > c$. Note that such a gap must exist since $p_w > 1/k^d$. Let $f_i$ and $f_{i+1}$ denote the observed frequencies on either side of the gap, so that $f_i - f_{i+1} > 1/k^d$. Let $S_A$ denote the elements whose observed frequencies are at least $f_i$. As before, Alice sends to Bob the index $i$ of the gap. In addition, Alice sends the lexicographic rank of $w$ in the set $S_A$. Bob samples until he sees all $i$ elements promised by Alice and knows their frequencies with enough confidence to perceive the gap, and then deduces $w$ from the rank information in Alice's message. \qed \medskip \noindent ({\em Remarks:\/} If we want Bob to shut himself off in polynomial time in all possible computations, it seems we need Alice also to communicate $c$ to Bob, taking an extra $\log c = \loglog(1/p_w)$ bits. When $c$ is fixed; i.e., when the probability of the selected key is non-negligible, the time bounds are polynomial in $k$.) This leaves an interesting question about the gulf between $2\ceiling{\log_2 k}$ bits in the upper bound and the lower bound of $\ceiling{\log_2 k}$ bits that carries over from Theorem~\ref{logktight}. If Bob were able to compute the ranking function of $S$, as obtains in other cases of key-transfer we know in the literature, then clearly $\ceiling{\log_2 k}$ bits would suffice. Our {\em point\/} is that when $S$ is an arbitrary subset of $\set{0,1}^n$ of moderate size ($\log_2 k < n/2$), we see no way for Alice to tell Bob what to look for without taking samples and communicating some robust feature of the results, in addition to the ranking information. We suspect that our upper bound is tight, but have not been able to prove it. \medskip For the problem of subset agreement, let $m$ denote the sizes of sets $S_A$ and $S_B$ that Alice and Bob must commit to. We first observe that $m$ can be at most $k'$, where $k' \leq k$ denotes the number of elements that occur with non-negligible probability. \begin{corollary} For subset agreement in expected polynomial time, $\ceiling{\log_2 k}$ bits are necessary and sufficient. \end{corollary} \proofsketch The lower bound follows immediately from Theorem \ref{anykey:lower}. The protocol used in the proof of Theorem \ref{anykey:upper} can be modified to work for the subset agreement case. Alice samples sufficiently many times, and chooses a gap $i$ such that there are at least $m$ elements with observed frequencies at least $f_i$. Such a gap must exist, since there are at least $m$ elements with non-negligible probability. Alice then sends the number of elements above the ``chosen gap.'' % % In addition, as in the upper bound % for selected-key agreement, Alice sends the parameter $c$ that governs % the number of trials, resulting in the $O(1)$ additive overhead. % Finally Alice and Bob commit to the lexicographically largest $m$ strings from this set.\qed \medskip ({\em Remarks:\/} Again, if Bob is required always to shut himself off in a given time bound, we have Alice send an additional $\loglog(1/p_m)$ bits, where $p_m$ is the frequency of the $mth$ most likely element.) Before proceeding to the problem of weak subset agreement, we prove the upper bound in Theorem~\ref{anykey:realtight}, re-stated in the following way. Let $\ell = \ceiling{\log_2 k}$. \begin{proposition} With $b$ bits of communication, Alice and Bob can achieve any-key agreement with success probability at least $1/2^{\ell-b} - 1/2^n$, in polynomial time. \end{proposition} As in the proof of Theorem \ref{anykey:upper}, Alice finds an index $i$, $1 \leq i \leq k'$, such that the observed frequencies $f_i$ and $f_{i+1}$ satisfy the ``gap'' requirement $f_i - f_{i+1} > 1/k^2$. Instead of sending $i$ to Bob, Alice sends the {\em most significant\/} $b$ bits of the binary representation of $i$. Bob ``fills in'' the least significant $\ell - b$ bits of the index $i$ randomly. Clearly, the probability that Bob hits the index that Alice intended is at least $1/2^{\ell - b}$. \qed \medskip Using similar ideas, we provide an upper bound for weak subset agreement. \begin{corollary} Weak subset agreement can be achieved with $\ceiling{\log_2 k - \log_2 m}$ bits. \end{corollary} \proof Alice chooses a ``gap index'' $i$ and sends the binary string $x$ that represents the most significant $\ceiling{\log_2 k - \log_2 m}$ bits of $i$. Alice and Bob, respectively, initialize $S_A$ and $S_B$ to $\myemptyset$. For each possible binary string $y$ of $\ceiling{\log_2 m}$ bits, Alice and Bob consider the index $j = xy$ obtained by concatenation. Let $e_A^j$ and $e_B^j$, respectively, denote the lexicographically largest members of the set of elements that Alice and Bob observe to have frequencies at least $f_j$. Alice and Bob add $e_A^j$ and $e_B^j$, respectively, to $S_A$ and $S_B$. Clearly, $S_A$ and $S_B$ have $m$ elements each. Moreover, both Alice and Bob must consider the index $i$ that Alice picked initially; by the proof of Theorem \ref{anykey:upper}, $e_A^i = e_B^i$ with very high probability, so $S_A \cap S_B \neq \myemptyset$ with high probability. The running times are similar to those in Theorem~\ref{anykey:upper}.\qed Next we prove that $\ceiling{\log_2 k - 2 \log_2 m}$ bits are necessary for weak-subset agreement. We leave open whether either of these bounds can be improved to meet the other. \begin{corollary} Weak subset agreement requires at least $\ceiling{\log_2 k - 2 \log_2 m}$ bits. \end{corollary} \proof Let $\epsilon$ be an exponentially vanishing quantity. Suppose to the contrary that there is a protocol $P$ that uses $\log_2 k - 2 \log_2 m - 1$ bits of communication to succeed with probability $1 - \epsilon$ on any random variable $G$. We show that Alice and Bob can use the protocol $P$ to beat the lower bound of Theorem \ref{anykey:lower}: Alice and Bob simply run protocol $P$ to commit to sets $S_A$ and $S_B$ of size $m$, and then pick elements $s_A \in S_A$ and $s_B \in S_B$ uniformly at random. The probability that $s_A = s_B$ equals $1/m^2$, which is greater than the upper bound of $1/2m^2$ % % on the success probability % implied by Theorem \ref{anykey:lower}, by a non-negligible quantity. \qed %% Upper and Lower Bounds for the other problems (END) %% \section{Feasible Generators}\label{BPMV} In this section we remove the assumption that the generator $G$ is a ``black box,'' and instead suppose that it is modeled by a probabilistic polynomial-time Turing machine (PPTM) $M_G$. Without much loss of generality we may suppose that $M_G$ has a binary fair coin, and makes $m$ coinflips in any computation. Then $M_G$ can be regarded as an ordinary deterministic Turing machine computing a function from $\set{0,1}^m$ to $\set{0,1}^n$, with uniform distribution over strings $u \in \set{0,1}^m$. In order to talk about asymptotic complexity in a uniform manner, we give $n$ to $M_G$ on its input tape, and we also suppose that $m(n) = n^{O(1)}$. For a useful extension of generality, we allow the input tape of $M_G$ to hold an {\em argument string\/} $x \in \set{0,1}^n$. Then $M_G$ represents an {\em ensemble\/} of random variables $G_x$, with valid key-sets $S_x$. Formally, for any language $S$, let $S_x := \set{y: \pair{x,y} \in S}$, where $\pair{,}$ is some fixed feasible pairing function. \begin{definition}\label{feasgen} A {\em feasible generator\/} consists of a % % polynomial-time decidable % set $S$ and a PPTM $M$, such that for all arguments $x$, $M$ makes $m(|x|)$ coinflips and $\Prob_u[M(x,u) \in S_x] > 3/4$. \end{definition} \noindent % % If $S \in \BPP$, then we can make an essentially equivalent generator % $M'$ with $S' \in \P$, and if $m \geq n+1$ then we can also % arrange that every element in $S'$ occurs with non-zero probability. % Note that it is not the case that every random seed $u$ leads to a valid key, but the probability of failure is not too large. If $S$ is polynomial-time decidable, we can equivalently suppose $M(x,u) = \bot$ if $M(x,u) \notin S_x$. Two examples where as yet no deterministic polynomial-time algorithm is known are generating certified primes or normal bases for finite fields (the latter is known for fixed characteristic \cite{Len91}). The existing generators have the above properties: not every run gives a certified prime or a normal basis, though since the certificates or normality can be checked in polynomial time, invalid outputs can be discarded. In general we allow that $S$ may not admit deterministic polynomial-time generation in the sense of Sanchis and Fulk \cite{SaFu90}. In the primes and $\GF(2)$ cases, the argument $x$ is used only for its length $n$. Jerrum, Valiant, and Vazirani \cite{JVV86} considered generators in which $x$ stands for a graph, and $S_x$ is e.g.\ the set of perfect matchings in $x$. They were interested in generating elements with uniform or nearly-uniform distribution on $S_x$, as holds for normal bases \cite{vzGGi90}. We pose intuitively the opposite question: can the distribution on $S_x$ be heavily biased in favor of one element, or a small number of elements? \begin{definition} A feasible generator $(M,S)$ has a {\em monic refinement\/} $M'$ if for all arguments $x$, there exists a single $y \in S_x$ such that $\Prob_u[M'(x,u) = y] > 3/4$. \end{definition} \noindent Here the ``3/4'' is amplifiable by majority vote to give exponentially-vanishing error in polynomially-many trials. The function mapping $x$ to $y$ then belongs to the class BPPSV defined by Grollmann and Selman \cite{GrSe88}. Having a monic refinement is different from the notion of probabilistically ``isolating a unique element'' in Chari, Rohatgi, and Srinivasan \cite{CRS93}. They use the method from \cite{MVV87} of assigning random weights to edges so that with high probability, there is a unique minimum-weight perfect matching (when one exists at all), but different random weightings can yield different matchings. Now we reconsider the problems of Section~\ref{intro} when the generator is feasible, and when Alice and Bob {\em share\/} the argument string $x$. This models the situation of two remote users working on the same problem who want to generate the same primes or normal bases without common randomness or heavy communication. In these examples the size $k$ of the key sets $S$ is exponential in $n$, and so the sampling methods for the upper bounds in the last section take too long. Instead: \begin{proposition} Alice and Bob can solve any-key agreement with no communication (and success probability 3/4) for a feasible generator iff it has a monic refinement.\qed \end{proposition} We show, however, that the question of monic refinements is hard even when $k = 2$, using a natural class of PPTMs. A function $f : \Sigma^* \into \Nat$ is said to have a {\em fully polynomial time randomized approximation scheme\/} (fpras) \cite{KaLu83,JVV86} if there is a PPTM $M$ such that for all $x \in \Sigma^*$ and $\epsilon > 0$ (where we suppose $\epsilon = 1/c$ for some integer $c$): % \begin{equation}\label{fpraseq} \prob_u\left[\frac{f(x)}{(1+\epsilon)} \;\leq\; M(\pair{x,0^c},u) \;\leq\; f(x)(1+\epsilon)\right] \;\; > \;\; 3/4. \end{equation} % Jerrum and Sinclair \cite{JeSi89} showed that the permanent function for ``dense'' 0-1 matrices, which is still $\numberP$-complete, has an fpras. Note that $M$ is multi-valued. We observe that the approximation can be done by a total function which is at most 2-valued. The ``$3/4$'' here and in (\ref{fpraseq}) can be amplified to give exponentially vanishing error. \begin{proposition}\label{fpras-to-BP2V} Let $f$ have an fpras. Then there is a $p$-machine $M'$ such that for all $x \in \Sigma^*$ and $c > 0$, there are two values $y_1,y_2$ such that $f(x)/(1+\epsilon) \leq y_1 \leq y_2 \leq f(x)(1+\epsilon)$ and $\prob_r[M'(x,0^c) \in \set{y_1,y_2}] > 3/4$. \end{proposition} \noindent The proof idea is to let $a = M(x,u)$ and round $a$ off to the nearest of appropriately-chosen gridpoints. However, if the true value of $f(x)$ is midway between gridpoints, then we may expect ``equal scatter'' between the two values, with no non-negligible advantage for either. If instead we always ``round down,'' then we have a similar situation when $f(x)$ is close to a gridpoint. We call the problem of whether $M'$ can be made single-valued the ``symmetry-breaking problem for fpras.'' We first show: \begin{theorem} If every 2-valued feasible generator has a monic refinement, then all feasible generators with $S \in \P$ have monic refinements. \end{theorem} \noindent The proof is not so simple as for analogous results about $\NP$-machines in \cite{Sel91,HNOS93b}. One attempt is to let $M$ be given, and by analogy with the next-ID function of an $\NP$-machine, define $g(x,v) \mapsto b$ if $(\exists u \sqsupseteq vb)\,M(x,u) \in S$. (Here $b \in \set{0,1}$.) However, $v$ might be a node in the tree of $M$ with very few valid outputs below it, and hence the valid outputs of $g$ may not have high enough probability. A second attempt is to define $g(x,v) \mapsto 1$ if $\prob_{u \sqsupseteq v}[M(x,u) \in S_x] \geq 1/4$, and $g(x,v) \mapsto 0$ if $\prob_{u \sqsupseteq v}[M(x,u) \in S_x] \leq 3/4$. Then $g$ does meet the requirements of two-valuedness and high probability, so by hypothesis there is a total single-valued restriction $g'$ and an $M'$ which computes it with high probability. However, depth-first backtrack search on `1' values of $g'$ might take exponential time. Our proof modifies the second attempt to make the search halt in expected polynomial time, using a trick analogous to the ``method of conditional probabilities'' in \cite{AS92}. \bigskip \proofsketch Given $f$ and the PPTM $M$, let $q(n) = 2p(n) + 5$. For all $a$, $0 \leq a \leq p(n)+1$, and all $v \in \set{0,1}^{
'KeyComms.aux' \relax \bibstyle{alpha} \citation{GoSi91} \citation{CHLT93,BoGa93,SaZa93,HeSh93} \citation{CHLT93} \citation{BoGa93} \@writefile{toc}{\string\contentsline\space {section}{\string\numberline\space {1}Introduction}{1}} \newlabel{intro}{{1}{1}} \citation{Mau91,Mau93} \citation{AhCs93} \citation{OrGa90,Orl90,Orl91,Orl91b,Orl92,NOS93b} \citation{Orl90} \citation{Sel91} \citation{GrSe88} \citation{KaLu83,JVV86,JeSi89} \citation{Lip94a} \newlabel{logktight}{{1.1}{2}} \citation{GMR89} \@writefile{toc}{\string\contentsline\space {section}{\string\numberline\space {2}Main Results}{3}} \newlabel{main}{{2}{3}} \newlabel{anykey:upper}{{2.1}{3}} \citation{CHLT93} \newlabel{anykey:lower}{{2.2}{4}} \newlabel{mess}{{1}{5}} \newlabel{anykey:realtight}{{2.4}{5}} \@writefile{toc}{\string\contentsline\space {subsection}{\string\numberline\space {2.1}Other Agreement Problems}{5}} \newlabel{selkey}{{2.5}{5}} \@writefile{toc}{\string\contentsline\space {section}{\string\numberline\space {3}Feasible Generators}{7}} \newlabel{BPMV}{{3}{7}} \citation{Len91} \citation{SaFu90} \citation{JVV86} \citation{vzGGi90} \citation{GrSe88} \citation{CRS93} \citation{MVV87} \citation{KaLu83,JVV86} \citation{JeSi89} \newlabel{feasgen}{{3.1}{8}} \newlabel{fpraseq}{{2}{8}} \citation{Sel91,HNOS93b} \citation{AS92} \newlabel{fpras-to-BP2V}{{3.2}{9}} \citation{GOP94} \bibdata{newpapers} \bibcite{AhCs93}{AC93} \@writefile{toc}{\string\contentsline\space {section}{\string\numberline\space {4}Conclusion}{10}} \bibcite{AS92}{AS92} \bibcite{BoGa93}{BG93} \bibcite{CHLT93}{CHLT93} \bibcite{CRS93}{CRS93} \bibcite{GMR89}{GMR89} \bibcite{GOP94}{GOP94} \bibcite{GrSe88}{GS88} \bibcite{GoSi91}{GS91} \bibcite{HNOS93b}{HNOS93} \bibcite{HeSh93}{HS93} \bibcite{JeSi89}{JS89} \bibcite{JVV86}{JVV86} \bibcite{KaLu83}{KL83} \bibcite{Len91}{Len91} \bibcite{Lip94a}{Lip94} \bibcite{Mau91}{Mau91} \bibcite{Mau93}{Mau93} \bibcite{MVV87}{MVV87} \bibcite{NOS93b}{NOS93} \bibcite{OrGa90}{OG90} \bibcite{Orl90}{Orl90} \bibcite{Orl91b}{Orl91a} \bibcite{Orl91}{Orl91b} \bibcite{Orl92}{Orl92} \bibcite{Sel91}{Sel91} \bibcite{SaFu90}{SF90} \bibcite{SaZa93}{SZ93} \bibcite{vzGGi90}{vzGG90} SHAR_EOF fi # end of overwriting check if test -f 'KeyComms.bbl' then echo shar: will not over-write existing file "'KeyComms.bbl'" else cat << \SHAR_EOF > 'KeyComms.bbl' \begin{thebibliography}{HNOS93} \bibitem[AC93]{AhCs93} R.~Ahlswede and I.~Csisz{\`a}r. \newblock Common randomness in information theory and cryptography---part {I}: Secret sharing. \newblock {\em IEEE Trans. Info. Thy.}, 39:1121--1132, 1993. \bibitem[AS92]{AS92} N.~Alon and J.~Spencer. \newblock {\em The Probabilistic Method}. \newblock Wiley, 1992. \newblock With an appendix by P. Erd{\H{o}}s. \bibitem[BG93]{BoGa93} E.~Borowsky and E.~Gafni. \newblock Generalized {FLP} impossibility result for {$t$}-resilient asynchronous computations. \newblock In {\em Proc. 25th Annual ACM Symposium on the Theory of Computing}, pages 91--100, 1993. \bibitem[CHLT93]{CHLT93} S.~Chaudhuri, M.~Herlihy, N.~Lynch, and M.~Tuttle. \newblock A tight lower bound for {$k$}-set agreement. \newblock In {\em Proc. 34th Annual IEEE Symposium on Foundations of Computer Science}, pages 206--215, 1993. \bibitem[CRS93]{CRS93} S.~Chari, P.~Rohatgi, and A.~Srinivasan. \newblock Randomness-optimal unique element isolation, with applications to perfect matching and related problems. \newblock In {\em Proc. 25th Annual ACM Symposium on the Theory of Computing}, pages 458--467, 1993. \bibitem[GMR89]{GMR89} S.~Goldwasser, S.~Micali, and C.~Rackoff. \newblock The knowledge complexity of interactive proof systems. \newblock {\em SIAM J. Comput.}, 18:186--208, 1989. \bibitem[GOP94]{GOP94} O.~Goldreich, R.~Ostrovsky, and E.~Petrank. \newblock Computational complexity and knowledge complexity. \newblock In {\em Proc. 26th Annual ACM Symposium on the Theory of Computing}, 1994. \newblock to appear. \bibitem[GS88]{GrSe88} J.~Grollmann and A.~Selman. \newblock Complexity measures for public-key cryptosystems. \newblock {\em SIAM J. Comput.}, 17:309--335, 1988. \bibitem[GS91]{GoSi91} A.~Goldberg and M.~Sipser. \newblock Compression and ranking. \newblock {\em SIAM J. Comput.}, 20, 1991. \bibitem[HNOS93]{HNOS93b} L.~Hemaspaandra, A.~Naik, M.~Ogiwara, and A.~Selman. \newblock Computing solutions uniquely collapses the polynomial hierarchy. \newblock Technical Report CS-TR 93-28, Computer Science Dept., SUNY at Buffalo, August 1993. \bibitem[HS93]{HeSh93} M.~Herlihy and N.~Shavit. \newblock The asynchronous computability theorem for {$t$}-resilient tasks. \newblock In {\em Proc. 25th Annual ACM Symposium on the Theory of Computing}, pages 111--120, 1993. \bibitem[JS89]{JeSi89} M.~Jerrum and A.~Sinclair. \newblock Approximating the permanent. \newblock {\em SIAM J. Comput.}, 18:1149--1178, 1989. \bibitem[JVV86]{JVV86} M.~Jerrum, L.~Valiant, and V.~Vazirani. \newblock Random generation of combinatorial structures from a uniform distribution. \newblock {\em Theor. Comp. Sci.}, 43:169--188, 1986. \bibitem[KL83]{KaLu83} R.~Karp and M.~Luby. \newblock Monte-{C}arlo algorithms for enumeration and reliability problems. \newblock In {\em Proc. 24th Annual IEEE Symposium on Foundations of Computer Science}, pages 56--64, 1983. \bibitem[Len91]{Len91} H.W. Lenstra. \newblock Finding isomorphisms between finite fields. \newblock {\em Mathematics of Computation}, 56:329--347, 1991. \bibitem[Lip94]{Lip94a} R.~Lipton. \newblock A new approach to information theory. \newblock In {\em Proc. 11th Annual Symposium on Theoretical Aspects of Computer Science}, volume 775 of {\em Lect. Notes in Comp. Sci.}, pages 699--708. Springer Verlag, 1994. \bibitem[Mau91]{Mau91} U.~Maurer. \newblock Perfect cryptographic security from partially independent channels. \newblock In {\em Proc. 23rd Annual ACM Symposium on the Theory of Computing}, pages 561--572, 1991. \bibitem[Mau93]{Mau93} U.~Maurer. \newblock Secret key agreement by public discussion from common information. \newblock {\em IEEE Trans. Info. Thy.}, 39:733--742, 1993. \bibitem[MVV87]{MVV87} K.~Mulmuley, U.~Vazirani, and V.~Vazirani. \newblock Matching is as easy as matrix inversion. \newblock {\em Combinatorica}, 7:105--113, 1987. \bibitem[NOS93]{NOS93b} M.~Naor, A.~Orlitsky, and P.~Shor. \newblock Three results on interactive communication. \newblock {\em IEEE Trans. Info. Thy.}, 39:1608--1615, 1993. \bibitem[OG90]{OrGa90} A.~Orlitsky and A.~El Gamal. \newblock Average and randomized communication complexity. \newblock {\em IEEE Trans. Info. Thy.}, 36:3--16, 1990. \bibitem[Orl90]{Orl90} A.~Orlitsky. \newblock Worst-case interactive communication {I}: Two messages are almost optimal. \newblock {\em IEEE Trans. Info. Thy.}, 36:1111--1126, 1990. \bibitem[Orl91a]{Orl91b} A.~Orlitsky. \newblock Interactive communication: balanced distributions, correlated files, and average-case complexity. \newblock In {\em Proc. 32nd Annual IEEE Symposium on Foundations of Computer Science}, pages 228--238, 1991. \bibitem[Orl91b]{Orl91} A.~Orlitsky. \newblock Worst-case interactive communication {II}: Two messages are not optimal. \newblock {\em IEEE Trans. Info. Thy.}, 37:995--1005, 1991. \bibitem[Orl92]{Orl92} A.~Orlitsky. \newblock Average-case interactive communication. \newblock {\em IEEE Trans. Info. Thy.}, 38:1534--1547, 1992. \bibitem[Sel91]{Sel91} A.~Selman. \newblock A taxonomy of complexity classes of functions. \newblock Technical Report 91--12, Dept. of Comp. Sci., SUNY / Buffalo, 1991. \newblock Revised June 1992, to appear in {\em J. Comp. Sys. Sci.} \bibitem[SF90]{SaFu90} L.~Sanchis and M.~Fulk. \newblock On the efficient generation of language instances. \newblock {\em SIAM J. Comput.}, 19:281--296, 1990. \bibitem[SZ93]{SaZa93} M.~Saks and F.~Zaharoglou. \newblock Wait-free {$k$}-set agreement is impossible: The topology of public knowledge. \newblock In {\em Proc. 25th Annual ACM Symposium on the Theory of Computing}, pages 101--110, 1993. \bibitem[vzGG90]{vzGGi90} J.~von~zur Gathen and M.~Giesbrecht. \newblock Constructing normal bases in finite fields. \newblock {\em J. Symb. Comput.}, 10:547--570, 1990. \end{thebibliography} SHAR_EOF fi # end of overwriting check if test -f 'KeyComms.blg' then echo shar: will not over-write existing file "'KeyComms.blg'" else cat << \SHAR_EOF > 'KeyComms.blg' This is BibTeX, C Version 0.99c The top-level auxiliary file: KeyComms.aux The style file: alpha.bst Database file #1: newpapers.bib SHAR_EOF fi # end of overwriting check # End of shell archive exit 0