You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: config/publications.ts
+11-11Lines changed: 11 additions & 11 deletions
Original file line number
Diff line number
Diff line change
@@ -20,6 +20,17 @@ export interface Publication {
20
20
}
21
21
22
22
exportconstpublications: Publication[]=[
23
+
{
24
+
title: "Non-Markovian Discrete Diffusion with Causal Language Models",
25
+
authors: "Yangtian Zhang, Sizhuang He, Daniel Levine, Lawrence Zhao, David Zhang, Syed A. Rizvi, Shiyang Zhang, Emanuele Zappala, Rex Ying, David van Dijk",
26
+
venue: "NeurIPS 2025",
27
+
page: "caddi",
28
+
code: null,
29
+
paper: "https://arxiv.org/abs/2502.09767",
30
+
abstract: "Discrete diffusion models offer a flexible, controllable approach to structured sequence generation, yet they still lag behind causal language models in expressive power. A key limitation lies in their reliance on the Markovian assumption, which restricts each step to condition only on the current state and causes errors to accumulate. CaDDi lifts this constraint by conditioning on the entire generative trajectory, unifying sequential (causal) and temporal (diffusion) reasoning in a single transformer that can reuse pretrained language model weights.",
31
+
impact: "By bridging diffusion and autoregressive paradigms, CaDDi delivers stronger discrete sequence generation while remaining compatible with existing causal LLM infrastructure, opening the door to controllable editing and refinement with minimal architectural changes.",
32
+
tags: [Tag.GenerativeModel],
33
+
},
23
34
{
24
35
title: "Mixture-of-Personas Language Models for Population Simulation",
25
36
authors: "Ngoc Bui, Hieu Trung Nguyen, Shantanu Kumar, Julian Theodore, Weikang Qiu, Viet Anh Nguyen, Rex Ying",
impact: "MindLLM achieves state-of-the-art performance on a wide range of fMRI-to-text decoding tasks, and demonstrates strong generalization ability to unseen subjects and tasks. This work paves the way for future research on high-quality fMRI-to-text decoding.",
54
65
tags: [Tag.MultiModalFoundationModel],
55
66
},
56
-
{
57
-
title: "Non-Markovian Discrete Diffusion with Causal Language Models",
58
-
authors: "Yangtian Zhang, Sizhuang He, Daniel Levine, Lawrence Zhao, David Zhang, Syed A. Rizvi, Shiyang Zhang, Emanuele Zappala, Rex Ying, David van Dijk",
59
-
venue: "NeurIPS 2025",
60
-
page: "caddi",
61
-
code: null,
62
-
paper: "https://arxiv.org/abs/2502.09767",
63
-
abstract: "Discrete diffusion models offer a flexible, controllable approach to structured sequence generation, yet they still lag behind causal language models in expressive power. A key limitation lies in their reliance on the Markovian assumption, which restricts each step to condition only on the current state and causes errors to accumulate. CaDDi lifts this constraint by conditioning on the entire generative trajectory, unifying sequential (causal) and temporal (diffusion) reasoning in a single transformer that can reuse pretrained language model weights.",
64
-
impact: "By bridging diffusion and autoregressive paradigms, CaDDi delivers stronger discrete sequence generation while remaining compatible with existing causal LLM infrastructure, opening the door to controllable editing and refinement with minimal architectural changes.",
65
-
tags: [Tag.GenerativeModel],
66
-
},
67
67
{
68
68
title: "Scalable Generation of Spatial Transcriptomics from Histology Images via Whole-Slide Flow Matching",
0 commit comments