-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathindex.html
More file actions
749 lines (718 loc) · 48.2 KB
/
index.html
File metadata and controls
749 lines (718 loc) · 48.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="author" content="Robin Chan">
<title>Robin Shing Moon Chan</title>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css">
<link href="https://fonts.googleapis.com/css?family=Alegreya" rel="stylesheet">
<link href="https://fonts.googleapis.com/css?family=Spectral" rel="stylesheet">
<link rel="stylesheet" type="text/css" href="stylesheet.css?v=2">
<link rel="icon" href="data:image/svg+xml,<svg xmlns=%22http://www.w3.org/2000/svg%22 viewBox=%220 0 100 100%22><text y=%22.9em%22 font-size=%2290%22>🌚</text></svg>">
</head>
<body>
<div id="content" class="content-card">
<div class="card">
<div id="top"></div>
<header class="site-header">
<nav class="site-nav" aria-label="Primary">
<a class="site-nav__brand" href="#top"><span class="name-highlight">Robin</span> Shing Moon Chan <span class="site-nav__brand--local">(陳承滿)</span></a>
</nav>
</header>
<table class="layout-table">
<tbody>
<tr style="padding:0px">
<td style="padding:0px">
<div class="page-columns">
<aside class="profile-column">
<div class="profile-card" id="contact">
<p class="portrait">
<img src="images/chanr-2.jpg" class="portrait-image" alt="Portrait of Robin Chan">
</p>
<p class="subtitle">PhD Student, ETH Zürich</p>
<p class="contact">
<a href="mailto:robin.chan@inf.ethz.ch">robin.chan@inf.ethz.ch</a>
</p>
<div class="contact-links">
<a href="https://scholar.google.com/citations?user=x-BfbhEAAAAJ&hl=en">Google Scholar</a>
<span class="contact-divider" aria-hidden="true">/</span>
<a href="https://bsky.app/profile/robinsmchan.bsky.social">Bluesky</a>
<!-- <span class="contact-divider" aria-hidden="true">/</span>
<a href="https://www.linkedin.com/in/robin-chan-494a261a3/">LinkedIn</a> -->
</div>
</div>
</aside>
<div class="main-column">
<section id="about" class="about-section">
<p>
I'm a PhD student at the <a href="https://ml.inf.ethz.ch/">Institute for Machine Learning</a> at ETH Zürich, where I'm advised by Prof. Ryan Cotterell at <a href="https://rycolab.io/">Rycolab</a>.
My research focuses on inference-time language model control, integrating methods from probabilistic inference and human-computer interaction.
I'm quite active in the <a href="https://genlm.org/">GenLM</a> research consortium, where we are building an open-source ecosystem for language model probabilistic programming.
Before that, I obtained a master's degree in data science from ETH Zürich, graduating with a thesis on controlled LLM code generation at <a href="https://www.zurich.ibm.com/">IBM Research Europe (ZRL)</a>.
</p>
<p>
<strong>Personal bits:</strong> some of my favorite books: [<a href="https://www.goodreads.com/book/show/9402073-tauben-fliegen-auf">1</a>,
<a href="https://www.goodreads.com/book/show/406235.Giovanni_s_Room?from_search=true&from_srp=true&qid=rEcFQz1BK6&rank=1">2</a>,
<a href="https://www.goodreads.com/book/show/36809135-where-the-crawdads-sing">3</a>],
some of my favorite movies: [<a href="https://www.imdb.com/title/tt0113247/?ref_=nv_sr_srsg_1_tt_6_nm_1_q_la%2520haine">1</a>,
<a href="https://www.imdb.com/title/tt27503384/">2</a>, <a href="https://www.imdb.com/title/tt4975722/?ref_=nv_sr_srsg_1_tt_7_nm_0_q_moonlight">3</a>].
I like making music
[<a href="https://github.com/chanr0/chanr0.github.io/blob/main/images/singing.jpg?raw=true">me Christmas caroling with friends</a>].
</p>
</section>
</div>
</div>
<table style="width:100%;border-spacing:0px;border-collapse:separate;margin-right:auto;margin-left:auto;scroll-margin-top:96px;" id="news-table"><tbody>
<tr>
<td style="padding:20px;width:100%;vertical-align:middle">
<div class="news-attending-grid">
<div class="news-column">
<h2 id="news" class="section-heading">News</h2>
<section class="news-panel" aria-label="Latest news">
<div class="news-list" role="list">
<article class="news-item" role="listitem">
<time class="news-date" datetime="2026-03">March 2026</time>
<p class="news-summary">
We won a best paper award at CHI'26 (top 1% of submissions)! 🏆
</p>
</article>
<article class="news-item" role="listitem">
<time class="news-date" datetime="2026-03">March 2026</time>
<p class="news-summary">
We just published a new preprint about ensembling language models with sequential Monte Carlo with the people at GenLM!
</p>
</article>
<article class="news-item" role="listitem">
<time class="news-date" datetime="2025-10">January 2026</time>
<p class="news-summary">
One paper about interactive ambiguity resolution in natural language interfaces was accepted at <a href="https://chi2026.acm.org/">CHI 2026</a> (main).
Thanks to the team; preprint available <a href="https://arxiv.org/abs/2603.01795">here</a>! We also released an
<a href="https://sql-ambiguity.ivia.ch">interactive blog post</a> about it.
</p>
</article>
<article class="news-item" role="listitem">
<time class="news-date" datetime="2025-10">October 2025</time>
<p class="news-summary">
I got an outstanding reviewer award at NeurIPS'25 🏆
</p>
</article>
<article class="news-item" role="listitem">
<time class="news-date" datetime="2025-07">July 2025</time>
<p class="news-summary">
One paper accepted at <a href="https://ieeevis.org/year/2025/welcome">IEEE VIS 2025</a> in Vienna — congratulations to the team!
</p>
</article>
<article class="news-item" role="listitem">
<time class="news-date" datetime="2025-03">March 2025</time>
<p class="news-summary">
Our paper <em>Finding Needles in Document Haystacks: Augmenting Serendipitous Claim Retrieval Workflows</em> was accepted at <a href="https://chi2025.acm.org/">CHI 2025</a> in Yokohama, Japan — congratulations to the team!
</p>
</article>
<article class="news-item" role="listitem">
<time class="news-date" datetime="2024-12">December 2024</time>
<p class="news-summary">
Our paper <em>A Design Space for Intelligent Dialogue Augmentation</em> was accepted at <a href="https://iui.acm.org/2025/index.html">ACM IUI 2025</a>! I will be presenting it in Cagliari in March. Thanks to all co-authors!
</p>
</article>
<article class="news-item" role="listitem">
<time class="news-date" datetime="2024-10">October 2024</time>
<p class="news-summary">
The paper we wrote at IBM Research on API integration with LLMs was accepted at <a href="https://2024.emnlp.org/">EMNLP 2024</a> (Industry Track) — thanks to all co-authors! My colleague Thomas Gschwind from IBM Research will be presenting it in Miami in November.
</p>
</article>
<article class="news-item" role="listitem">
<time class="news-date" datetime="2024-09">September 2024</time>
<p class="news-summary">
Our paper <em>On Affine Homotopy between Language Encoders</em> was accepted at <a href="https://neurips.cc/">NeurIPS 2024</a>. I will be presenting it in Vancouver in December. Thanks to all co-authors!
</p>
</article>
<article class="news-item" role="listitem">
<time class="news-date" datetime="2024-08">August 2024</time>
<p class="news-summary">
We gave a tutorial about the representational capacity of neural language models at <a href="https://2024.aclweb.org/">ACL 2024</a> in Bangkok. Check out our <a href="https://acl2024.ivia.ch/">interactive tutorial webpage</a>!
</p>
</article>
<article class="news-item" role="listitem">
<time class="news-date" datetime="2024-05">May 2024</time>
<p class="news-summary">
Two papers I co-authored were accepted at <a href="https://2024.aclweb.org/">ACL 2024</a>!
</p>
</article>
<article class="news-item" role="listitem">
<time class="news-date" datetime="2024-03">March 2024</time>
<p class="news-summary">
Started a PhD at ETH Zürich, co-advised by Prof. Menna El-Assady and Prof. Ryan Cotterell! 🎉
</p>
</article>
<article class="news-item" role="listitem">
<time class="news-date" datetime="2023-09">September 2023</time>
<p class="news-summary">
Together with Katya Mirylenka, we will give a talk at <a href="https://zurich-nlp.ch/">Zurich-NLP</a> about our work at IBM Research, at the ETH AI Center. RSVP <a href="https://zurich-nlp.ch/events/">here</a>!
</p>
</article>
<article class="news-item" role="listitem">
<time class="news-date" datetime="2023-07">July 2023</time>
<p class="news-summary">
Our paper on counterfactual sample generation was accepted at <a href="https://2023.aclweb.org/">ACL</a>. I will be presenting it in Toronto in a few days! Check out our <a href="https://dcc-frontend.onrender.com/">blog post</a> about the paper!
</p>
</article>
</div>
</section>
</div>
<div class="attending-column">
<h2 id="attending" class="section-heading">Attending</h2>
<section class="attending-panel" aria-label="Upcoming conferences and events">
<div class="attending-list" role="list">
<article class="attending-item attending-entry" data-event-date="2026-04-01" role="listitem">
<time class="news-date" datetime="2026-04">April 2026</time>
<p class="news-summary">
CHI 2026, Barcelona
</p>
</article>
<article class="attending-item attending-entry" data-event-date="2026-01-15" role="listitem">
<time class="news-date" datetime="2026-01">January 2026</time>
<p class="news-summary">
CHI-FRO/GenLM Onsite, Boston
</p>
</article>
<article class="attending-item attending-entry" data-event-date="2026-01-20" role="listitem">
<time class="news-date" datetime="2026-01">January 2026</time>
<p class="news-summary">
SwissCHI, Lausanne
</p>
</article>
<article class="attending-item attending-entry" data-event-date="2025-09-15" role="listitem">
<time class="news-date" datetime="2025-09">September 2025</time>
<p class="news-summary">
GenLM Onsite, Boston
</p>
</article>
<article class="attending-item attending-entry" data-event-date="2025-09-15" role="listitem">
<time class="news-date" datetime="2025-04">April 2025</time>
<p class="news-summary">
COLT Lab, Barcelona
</p>
</article>
<article class="attending-item attending-entry" data-event-date="2025-09-15" role="listitem">
<time class="news-date" datetime="2025-03">March 2025</time>
<p class="news-summary">
IUI, Cagliari
</p>
</article>
<article class="attending-item attending-entry" data-event-date="2025-09-15" role="listitem">
<time class="news-date" datetime="2024-12">December 2024</time>
<p class="news-summary">
NeurIPS, Vancouver
</p>
</article>
</div>
</section>
</div>
</div>
</td>
</tr>
</tbody></table>
<table class="publication-table" style="width:100%;border:0px;border-spacing:0px;border-collapse:separate;margin-right:auto;margin-left:auto;scroll-margin-top:96px;"><tbody>
<tr>
<td style="padding:20px 20px 0;width:100%;vertical-align:middle">
<div class="section-heading-row">
<h2 id="publications" class="section-heading">Selected Publications</h2>
<p class="section-subnote">for a complete list, see <a href="https://scholar.google.com/citations?user=x-BfbhEAAAAJ&hl=en">Google Scholar</a>.</p>
</div>
</td>
</tr>
</table>
<table class="publication-table publication-table--hoverable" style="width:100%;border:0px;border-spacing:0px;border-collapse:separate;margin-right:auto;margin-left:auto;"><tbody>
<tr>
<td style="padding:20px;width:25%;vertical-align:middle">
<div class="one">
<div class="two">
<img src="images/ensembling.png" width="100%" class="publication-img" alt="Screenshot of the LLM Analyzer project">
</div>
</td>
<td style="padding:20px;width:75%;vertical-align:middle">
<h3 class="paper-title">Ensembling Language Models with Sequential Monte Carlo</h3>
<br>
<u class="small-font">Robin Chan</u>,
<small>Tianyu Liu</small>,
<small>Samuel Kiegeland</small>,
<small>Clemente Pasti</small>,
<small>Jacob Hoover Vigly</small>,
<small>Timothy J. O'Donnell</small>,
<small>Ryan Cotterell</small>,
<small>Tim Vieira</small>
<br>
<em class="small-font">Preprint</em><span class="small-font">, 2026 | </span> <a class="small-font" href="https://arxiv.org/pdf/2603.05432">pdf</a>
<br>
<div class="abstract">
<div class="abstract-title" onclick="toggleAbstract(event)">
<small>Abstract <i class="fas fa-chevron-down arrow" aria-hidden="true"></i></small>
</div>
<div class="abstract-content">
<small>
Practitioners have access to an abundance of language models and prompting strategies for solving many language modeling tasks; yet prior work shows that modeling performance is highly sensitive to both choices.
Classical machine learning ensembling techniques offer a principled approach: aggregate predictions from multiple sources to achieve better performance than any single one.
However, applying ensembling to language models during decoding is challenging: naively aggregating next-token probabilities yields samples from a locally normalized, biased approximation of the generally intractable ensemble distribution over strings.
In this work, we introduce a unified framework for composing <i>K</i> language models into <i>f</i>-ensemble distributions for a wide range of functions <i>f</i>: ℝ<sub>≥0</sub><sup><i>K</i></sup> → ℝ<sub>≥0</sub>.
To sample from these distributions, we propose a byte-level sequential Monte Carlo (SMC) algorithm that operates in a shared character space, enabling ensembles of models with mismatching vocabularies and consistent sampling in the limit.
We evaluate a family of <i>f</i>-ensembles across prompt and model combinations for various structured text generation tasks, highlighting the benefits of alternative aggregation strategies over traditional probability averaging, and showing that better posterior approximations can yield better ensemble performance.
</small>
</small>
</div>
</div>
</td>
</tr>
</table>
<table class="publication-table publication-table--hoverable" style="width:100%;border:0px;border-spacing:0px;border-collapse:separate;margin-right:auto;margin-left:auto;"><tbody>
<tr>
<td style="padding:20px;width:25%;vertical-align:middle">
<div class="one">
<div class="two">
<img src="images/pleasqlarify.png" width="100%" class="publication-img" alt="Screenshot of the LLM Analyzer project">
</div>
</td>
<td style="padding:20px;width:75%;vertical-align:middle">
<h3 class="paper-title">PleaSQLarify: Visual Pragmatic Repair for Natural Language Database Interfaces</h3>
<br>
<u class="small-font">Robin Chan</u>,
<small>Rita Sevastjanova</small>,
<small>Menna El-Assady</small>
<br>
<em class="small-font">Proceedings of the ACM CHI Conference on Human Factors in Computing Systems </em> <span class="small-font"> 2026 | </span> <a class="small-font" href="https://arxiv.org/pdf/2603.01795">pdf</a></span>
<br>
<small style="color:#d97706;">Best Paper Award (top 1% of submissions) 🏆</small>
<br>
<div class="abstract">
<div class="abstract-title" onclick="toggleAbstract(event)">
<small>Abstract <i class="fas fa-chevron-down arrow" aria-hidden="true"></i></small>
</div>
<div class="abstract-content">
<small>
Natural language database interfaces broaden data access, yet they remain brittle under input ambiguity.
Standard approaches often collapse uncertainty into a single query, offering little support for mismatches between user intent and system interpretation.
We reframe this challenge through pragmatic inference: while users economize expressions, systems operate on priors over the action space that may not align with the users'.
In this view, pragmatic repair—incremental clarification through minimal interaction—is a natural strategy for resolving underspecification.
We present <span style="font-variant: small-caps;">PleaSQLarify</span>, which operationalizes pragmatic repair by structuring interaction around interpretable decision variables that enable efficient clarification.
A visual interface complements this by surfacing the action space for exploration, requesting user disambiguation, and making belief updates traceable across turns. In a study with twelve participants, <span style="font-variant: small-caps;">PleaSQLarify</span> helped users recognize alternative interpretations and efficiently resolve ambiguity.
Our findings highlight pragmatic repair as a design principle that fosters effective user control in natural language interfaces.
</small>
</small>
</div>
</div>
<!-- <p class="publication-tags"><strong class="small-font">Interactive Systems </strong>·<strong class="small-font"> Language Model Explainability</strong></p> -->
</td>
</tr>
</table>
<!-- <table class="publication-table publication-table--hoverable" style="width:100%;border:0px;border-spacing:0px;border-collapse:separate;margin-right:auto;margin-left:auto;"><tbody>
<tr>
<td style="padding:20px;width:25%;vertical-align:middle">
<div class="one">
<div class="two">
<img src="images/llm-analyzer.png" width="100%" class="publication-img" alt="Screenshot of the LLM Analyzer project">
</div>
</td>
<td style="padding:20px;width:75%;vertical-align:middle">
<h3 class="paper-title">Understanding Large Language Model Behaviors through Interactive Counterfactual Generation and Analysis</h3>
<br>
<small>Furui Cheng</small>,
<small>Vilém Zouhar</small>,
<u class="small-font">Robin Chan</u>,
<small>Daniel Fürst</small>,
<small>Hendrik Strobelt</small>,
<small>Menna El-Assady</small>
<br>
<em class="small-font">IEEE Transactions on Visualization and Computer Graphics (IEEE VIS'25) </em><span class="small-font">, 2025 | </span> <a class="small-font" href="https://www.semanticscholar.org/reader/2b253bfc97d3617c52201153c6d2191503ab98e4">pdf</a>
<br>
<div class="abstract">
<div class="abstract-title" onclick="toggleAbstract(event)">
<small>Abstract <i class="fas fa-chevron-down arrow" aria-hidden="true"></i></small>
</div>
<div class="abstract-content">
<small>
Counterfactual examples are useful for exploring the decision boundaries of machine learning models and determining
feature attributions. How can we apply counterfactual-based methods to analyze and explain LLMs? We identify the following key
challenges. First, the generated textual counterfactuals should be meaningful and readable to users and thus can be mentally compared
to draw conclusions. Second, to make the solution scalable to long-form text, users should be equipped with tools to create batches of
counterfactuals from perturbations at various granularity levels and interactively analyze the results. In this paper, we tackle the above
challenges and contribute 1) a novel algorithm for generating batches of complete and meaningful textual counterfactuals by removing
and replacing text segments in different granularities, and 2) LLM Analyzer, an interactive visualization tool to help users understand
an LLM's behaviors by interactively inspecting and aggregating meaningful counterfactuals. We evaluate the proposed algorithm by
the grammatical correctness of its generated counterfactuals using 1,000 samples from medical, legal, finance, education, and news
datasets. In our experiments, 97.2% of the counterfactuals are grammatically correct. Through a use case, user studies, and feedback
from experts, we demonstrate the usefulness and usability of the proposed interactive visualization tool
</small>
</small>
</div>
</div>
</td>
</tr>
</table> -->
<table class="publication-table publication-table--hoverable" style="width:100%;border:0px;border-spacing:0px;border-collapse:separate;margin-right:auto;margin-left:auto;"><tbody>
<tr>
<td style="padding:20px;width:25%;vertical-align:middle">
<div class="one">
<div class="two">
<img src="images/haystacks.png" width="100%" class="publication-img" alt="Screenshot of the Document Haystacks project">
</div>
</td>
<td style="padding:20px;width:75%;vertical-align:middle">
<h3 class="paper-title">Finding Needles in Document Haystacks: Augmenting Serendipitous Claim Retrieval Workflows</h3>
<br>
<small>Moritz Dück</small>,
<small>Steffen Holter</small>,
<u class="small-font">Robin Chan</u>,
<small>Rita Sevastjanova</small>,
<small>Menna El-Assady</small>
<br>
<em class="small-font">Proceedings of the ACM CHI Conference on Human Factors in Computing Systems, </em> <span class="small-font"> 2025 | </span> <a class="small-font" href="https://dl.acm.org/doi/pdf/10.1145/3706598.3713715">pdf</a></span>
<div class="abstract">
<div class="abstract-title" onclick="toggleAbstract(event)">
<small>Abstract <i class="fas fa-chevron-down arrow" aria-hidden="true"></i></small>
</div>
<div class="abstract-content">
<small>
Preliminary exploration of vast text corpora for generating and validating hypotheses, typical in academic inquiry, requires flexible navigation and rapid validation of claims.
Navigating the corpus by titles, summaries, and abstracts might neglect information, whereas identifying the relevant context-specific claims through in-depth reading is unfeasible with rapidly increasing publication numbers.
Our paper identifies three typical user pathways for hypothesis exploration and operationalizes sentence-based retrieval combined with effective contextualization and provenance tracking in a unified workflow.
We contribute an interface that augments the previously laborious tasks of claim identification and consistency checking using NLP techniques while balancing user control and serendipity.
Use cases, expert interviews, and a user study with 10 participants demonstrate how the proposed workflow enables users to traverse literature corpora in novel and efficient ways.
For the evaluation, we instantiate the tool within two independent domains, providing novel insights into the analysis of political discourse and medical research.
</small>
</div>
</div>
</td>
</tr>
<!-- <tr>
<td style="padding:20px;width:25%;vertical-align:middle">
<div class="one">
<div class="two">
<img src="images/dialogue-augmentation.png" width="100%" class="publication-img" alt="Poster for the Dialogue Augmentation project">
</div>
</td>
<td style="padding:20px;width:75%;vertical-align:middle">
<h3 class="paper-title">A Design Space for Intelligent Dialogue Augmentation</h3>
<br>
<u class="small-font">Robin Chan</u>,
<small>Anne Marx</small>,
<small>Alison Kim</small>,
<small>Menna El-Assady</small>
<br>
<em class="small-font">Proceedings of the 30th International ACM Conference on Intelligent User Interfaces (IUI), </em> <span class="small-font"> 2025 | </span> <a class="small-font" href="https://dl.acm.org/doi/pdf/10.1145/3708359.3712096">pdf</a></span>
<div class="abstract">
<div class="abstract-title" onclick="toggleAbstract(event)">
<small>Abstract <i class="fas fa-chevron-down arrow" aria-hidden="true"></i></small>
</div>
<div class="abstract-content">
<small>
The use of intelligent agents in communication is a growing trend aimed at enhancing the efficiency and quality of interactions.
As such, <em>dialogue augmentation systems</em> -- text processing systems that interactively enhance ongoing written or spoken communication -- are gaining significant popularity across domains.
While technical limitations had previously inhibited their real-time usage for effective communication augmentation, recent developments in language processing have improved their capabilities to contribute to dialogue as intelligent, emancipated, and proactive agents.
While other works on dialogue augmentation focus on evaluating design considerations for specific applications of these systems, we lack a unified understanding of the broader design principles that apply to dialogue more generally.
Through a literature review and mixed-methods analysis of 78 existing systems, we iteratively define a comprehensive design space for intelligent dialogue augmentation systems.
To further ground our analysis, we interweave Clark's models of dialogue with concepts in human-AI collaboration and discuss trends in the evolving role of dialogue augmentation systems along five dimensions -- dialogue context, augmentation context, task, interaction, and model.
Based on the identified trends, we discuss concrete challenges for broader adoption, highlighting the need to design <em>trusted</em>, <em>seamless</em>, and <em>timely</em> augmentations.
The design space contributes as a mechanism for researchers to facilitate defining design choices during development, situate their systems in the current landscape of works, and understand opportunities for future research.
</small>
</div>
</div>
</td>
</tr> -->
<tr>
<td style="padding:20px;width:25%;vertical-align:middle">
<div class="one">
<div class="two">
<img src="images/homotopy.png" class="publication-img" width="100%" alt="Diagram illustrating affine homotopy between language encoders">
</div>
</td>
<td style="padding:20px;width:75%;vertical-align:middle">
<h3 class="paper-title">On Affine Homotopy between Language Encoders</h3>
<br>
<u class="small-font">Robin Chan</u>,
<small>Reda Boumasmoud</small>,
<small>Anej Svete</small>,
<small>Yuxin Ren</small>,
<small>Qipeng Guo</small>,
<small>Zhijing Jin</small>,
<small>Shauli Ravfogel</small>,
<small>Mrinmaya Sachan</small>,
<small>Bernhard Schölkopf</small>,
<small>Menna El-Assady</small>,
<small>Ryan Cotterell</small>
<br>
<em class="small-font">Advances in Neural Information Processing 38 (NeurIPS)</em><span class="small-font">, 2024 | </span> <a class="small-font" href="https://proceedings.neurips.cc/paper_files/paper/2024/file/86040ae1ecc64655bdbdfbbf774ead26-Paper-Conference.pdf">pdf</a></span>
<div class="abstract">
<div class="abstract-title" onclick="toggleAbstract(event)">
<small>Abstract <i class="fas fa-chevron-down arrow" aria-hidden="true"></i></small>
</div>
<div class="abstract-content">
<small>
Pre-trained language encoders -- functions that represent text as vectors -- are an integral component of many NLP tasks.
We tackle a natural question in language encoder analysis: What does it mean for two encoders to be similar? We contend that
a faithful measure of similarity needs to be <em>intrinsic</em>, that is, task-independent, yet still be informative of <em>extrinsic</em>
similarity -- the performance on downstream tasks. It is common to consider two encoders similar if they are <em>homotopic</em>, i.e.,
if they can be aligned through some transformation. In this spirit, we study the properties of <em>affine</em> alignment of language
encoders and its implications on extrinsic similarity. We find that while affine alignment is fundamentally an asymmetric notion of
similarity, it is still informative of extrinsic similarity. We confirm this on datasets of natural language representations. Beyond
providing useful bounds on extrinsic similarity, affine intrinsic similarity also allows us to begin uncovering the structure of the
space of pre-trained encoders by defining an order over them.
</small>
</div>
</div>
</td>
</tr>
<tr>
<td style="padding:20px;width:25%;vertical-align:middle">
<div class="one">
<div class="two">
<img src="images/api-integration.png" class="publication-img" width="100%" alt="Illustration for adapting LLMs for API integration">
</div>
</td>
<td style="padding:20px;width:75%;vertical-align:middle">
<h3 class="paper-title">Adapting LLMs for Structured Natural Language API Integration</h3>
<br>
<u class="small-font">Robin Chan</u>,
<small>Katsiaryna Mirylenka</small>,
<small>Thomas Gschwind</small>,
<small>Christoph Miksovic-Czasch</small>,
<small>Paolo Scotton</small>,
<small>Enrico Toniato</small>,
<small>Abdel Labbi</small>
<br>
<em class="small-font">Proceedings of EMNLP: Industry Track</em><span class="small-font">, 2024 | </span> <a class="small-font" href="https://aclanthology.org/2024.emnlp-industry.74.pdf">pdf</a></span>
<div class="abstract">
<div class="abstract-title" onclick="toggleAbstract(event)">
<small>Abstract <i class="fas fa-chevron-down arrow" aria-hidden="true"></i></small>
</div>
<div class="abstract-content">
<small>
Integrating APIs is crucial for enterprise systems, enabling seamless application interaction within workflows.
However, the vast and diverse API landscape makes combining calls based on user intent a significant challenge.
Existing methods rely on Named Entity Recognition (NER) and knowledge graphs, but struggle with control flow structures like
conditionals and loops. We propose a novel framework that leverages the success of Large Language Models (LLMs)
in code generation for natural language API integration. Our approach involves fine-tuning an LLM on automatically generated
API flows derived from services' OpenAPI specifications. This aims to surpass NER-based methods and compare the effectiveness
of different tuning strategies. Specifically, we investigate the impact of enforcing syntax through constrained generation or
retrieval-augmented generation. To facilitate systematic comparison, we introduce targeted test suites that assess the generalization
capabilities and ability of these approaches to retain structured knowledge. We expect to observe that fine-tuned LLMs can: (a) learn
structural constraints implicitly during training, and (b) achieve significant improvements in both in-distribution and out-of-distribution
performance.
</small>
</div>
</div>
<!-- <p class="publication-tags"><strong class="small-font">Language Model Control </strong>·<strong class="small-font"> Parameter-Efficient Fine-Tuning </strong></p> -->
</td>
</tr>
</tbody></table>
<!-- <table class="publication-table publication-table--hoverable" style="width:100%;border:0px;border-spacing:0px;border-collapse:separate;margin-right:auto;margin-left:auto;"><tbody>
<tr>
<td style="padding:20px;width:25%;vertical-align:middle">
<div class="one">
<div class="two">
<img src="images/nadav.png" width="100%" class="publication-img" alt="Diagram for probabilistic regular languages">
</div>
</td>
<td style="padding:20px;width:75%;vertical-align:middle">
<h3 class="paper-title">What Languages are Easy to Language-Model? A Perspective from Learning Probabilistic Regular Languages</h3>
<br>
<small>Nadav Borenstein</small>,
<small>Anej Svete</small>,
<u class="small-font">Robin Chan</u>,
<small>Josef Valvoda</small>,
<small>Franz Nowak</small>,
<small>Isabelle Augenstein</small>,
<small>Eleanor Chodroff</small>,
<small>Ryan Cotterell</small>
<br>
<em class="small-font">Proceedings of ACL,</em> <span class="small-font"> 2024 |</span> <a class="small-font" href="https://aclanthology.org/2024.acl-long.807.pdf">pdf</a></span>
<div class="abstract">
<div class="abstract-title" onclick="toggleAbstract(event)">
<small>Abstract <i class="fas fa-chevron-down arrow" aria-hidden="true"></i></small>
</div>
<div class="abstract-content">
<small>
What can large language models learn? By definition, language models (LM) are distributions
over strings. Therefore, an intuitive way of
addressing the above question is to formalize
it as a matter of learnability of <em>classes</em> of distributions over strings. While prior work in this
direction focused on assessing the theoretical
limits, in contrast, we seek to understand the
empirical learnability. Unlike prior empirical
work, we evaluate neural LMs on their home
turf—learning probabilistic languages—rather
than as classifiers of formal languages. In
particular, we investigate the learnability of
regular LMs (RLMs) by RNN and Transformer
LMs. We empirically test the learnability of
RLMs as a function of various complexity
parameters of the RLM and the hidden state
size of the neural LM. We find that the RLM
rank, which corresponds to the size of linear
space spanned by the logits of its conditional
distributions, and the expected length of
sampled strings are strong and significant
predictors of learnability for both RNNs and
Transformers. Several other predictors also
reach significance, but with differing patterns
between RNNs and Transformers.
</small>
</div>
</div>
</td>
</tr>
</tbody></table>
<table class="publication-table publication-table--hoverable" style="width:100%;border:0px;border-spacing:0px;border-collapse:separate;margin-right:auto;margin-left:auto;"><tbody>
<tr>
<td style="padding:20px;width:25%;vertical-align:middle">
<div class="one">
<div class="two">
<img src="images/rnn_website.png" width="100%" class="publication-img" alt="Graphic for representing regular languages as RNNs">
</div>
</td>
<td style="padding:20px;width:75%;vertical-align:middle">
<h3 class="paper-title">On Efficiently Representing Regular Languages as RNNs</h3>
<br>
<small>Anej Svete</small>,
<u class="small-font">Robin Chan</u>,
<small>Ryan Cotterell</small>
<br>
<em class="small-font">Findings of ACL</em><span class="small-font">, 2024 | </span> <a class="small-font" href="https://aclanthology.org/2024.findings-acl.244.pdf">pdf</a>
<br>
<div class="abstract">
<div class="abstract-title" onclick="toggleAbstract(event)">
<small>Abstract <i class="fas fa-chevron-down arrow" aria-hidden="true"></i></small>
</div>
<div class="abstract-content">
<small>
Recent work by <a href="https://aclanthology.org/2020.emnlp-main.156/" class="small-font">Hewitt et al. (2020)</a> provides
a possible interpretation of the empirical success of recurrent neural networks (RNNs) as
language models (LMs). It shows that RNNs
can efficiently represent bounded hierarchical structures that are prevalent in human language. This suggests that RNNs' success might
be linked to their ability to model hierarchy.
However, a closer inspection of <a href="https://aclanthology.org/2020.emnlp-main.156/" class="small-font">Hewitt et al.'s (2020)</a> construction shows that it is not limited to hierarchical LMs, posing the question
of what other classes of LMs can be efficiently
represented by RNNs. To this end, we generalize their construction to show that RNNs
can efficiently represent a larger class of LMs:
Those that can be represented by a pushdown
automaton with a bounded stack and a generalized stack update function. This is analogous
to an automaton that keeps a memory of a fixed
number of symbols and updates the memory
with a simple update mechanism. Altogether, the efficiency of representing this
diverse class of LMs with RNN LMs suggests
novel interpretations of their inductive bias.
</small>
</div>
</div>
</td>
</tr>
</tbody></table> -->
<table class="publication-table publication-table--hoverable" style="width:100%;border:0px;border-spacing:0px;border-collapse:separate;margin-right:auto;margin-left:auto;"><tbody>
</tbody></table>
<table class="publication-table publication-table--hoverable" style="width:100%;border:0px;border-spacing:0px;border-collapse:separate;margin-right:auto;margin-left:auto;"><tbody>
<tr>
<td style="padding:20px;width:25%;vertical-align:middle">
<div class="one">
<div class="two">
<img src="images/dcc_website.png" width="100%" class="publication-img" alt="Screenshot of the Data-Constrained Counterfactuals project">
</div>
</td>
<td style="padding:20px;width:75%;vertical-align:middle">
<h3 class="paper-title">Which Spurious Correlations Impact Reasoning in NLI Models? A Visual Interactive Diagnosis through Data-Constrained Counterfactuals</h3>
<br>
<u class="small-font">Robin Chan</u>,
<small>Afra Amini</small>,
<small>Menna El-Assady</small>
<br>
<em class="small-font">Proceedings of ACL: System Demonstrations</em><span class="small-font">, 2023 | </span>
<a href="https://aclanthology.org/2023.acl-demo.44.pdf" class="small-font">pdf</a><small> </small>
<div class="abstract">
<div class="abstract-title" onclick="toggleAbstract(event)">
<small>Abstract <i class="fas fa-chevron-down arrow" aria-hidden="true"></i></small>
</div>
<div class="abstract-content">
<small>
We present a human-in-the-loop dashboard tailored to diagnosing potential spurious features that NLI models rely on for predictions.
The dashboard enables users to generate diverse and challenging examples by drawing inspiration from GPT-3 suggestions.
Additionally, users can receive feedback from a trained NLI model on how challenging the newly created example is and make refinements based on the feedback.
Through our investigation, we discover several categories of spurious correlations that impact the reasoning of NLI models, which we group into three categories:
Semantic Relevance, Logical Fallacies, and Bias. Based on our findings, we identify and describe various research opportunities, including diversifying
training data and assessing NLI models' robustness by creating adversarial test suites.
</small>
</div>
</div>
<!-- <p class="publication-tags"><strong class="small-font">Counterfactuals </strong>·<strong class="small-font"> Mixed-Initiative Learning </strong>·<strong class="small-font"> Language Model Biases</strong></p> -->
</td>
</tr>
</tbody></table>
<!-- <table class="publication-table" style="width:100%;border:0px;border-spacing:0px;border-collapse:separate;margin-right:auto;margin-left:auto;"><tbody>
<tr>
<td style="padding:0px">
<br>
<p class="footer-note">
<a href="https://github.com/jonbarron/jonbarron_website">Website source</a>. Consider using <a href="https://leonidk.com/">Leonid Keselman</a>'s <a href="https://github.com/leonidk/new_website">Jekyll fork</a> of this page.
</p>
</td>
</tr>
</tbody>
</table> -->
</div>
</div>
</td>
</tr>
</table>
</td>
</tr>
</table>
</div>
<script>
function toggleAbstractByTitle(titleElement) {
var contentElement = titleElement.nextElementSibling;
var arrowElement = titleElement.querySelector('.arrow');
if (contentElement.style.display === 'none' || contentElement.style.display === '') {
contentElement.style.display = 'block';
arrowElement.className = 'fas fa-chevron-up arrow';
} else {
contentElement.style.display = 'none';
arrowElement.className = 'fas fa-chevron-down arrow';
}
}
function toggleAbstract(event) {
event.stopPropagation();
toggleAbstractByTitle(event.currentTarget);
}
function setupPublicationRowToggle() {
var publicationRows = document.querySelectorAll('.publication-table--hoverable > tbody > tr');
publicationRows.forEach(function (row) {
row.addEventListener('click', function (event) {
// Keep regular link behavior and avoid toggling while interacting with open abstract text.
if (event.target.closest('a') || event.target.closest('.abstract-content')) {
return;
}
var titleElement = row.querySelector('.abstract-title');
if (titleElement) {
toggleAbstractByTitle(titleElement);
}
});
});
}
window.addEventListener('load', function () {
document.body.classList.add('is-loaded');
setupPublicationRowToggle();
});
function markPastAttendingEvents() {
var eventItems = document.querySelectorAll('.attending-item[data-event-date]');
var today = new Date();
var currentMonthStart = new Date(today.getFullYear(), today.getMonth(), 1);
eventItems.forEach(function (eventItem) {
var dateString = eventItem.getAttribute('data-event-date');
var eventDate = new Date(dateString + 'T00:00:00');
var eventMonthStart = new Date(eventDate.getFullYear(), eventDate.getMonth(), 1);
if (!isNaN(eventDate.getTime()) && eventMonthStart < currentMonthStart) {
eventItem.classList.add('attending-item--past');
}
});
}
markPastAttendingEvents();
</script>
</body>
</html>