diff --git a/_bibliography/papers.bib b/_bibliography/papers.bib index 9e9cf7c8..8e430a19 100644 --- a/_bibliography/papers.bib +++ b/_bibliography/papers.bib @@ -11,13 +11,12 @@ @inproceedings{Differentiated2021 address = {New York, NY, USA}, url = {https://doi.org/10.1145/3474085.3475660}, doi = {10.1145/3474085.3475660}, - abstract = {Directly deploying a trained multi-modal classifier to a new environment usually leads to poor performance due to the well-known domain shift problem. Existing multi-modal domain adaptation methods treated each modality equally and optimize the sub-models of different modalities synchronously. However, as observed in this paper, the degrees of domain shift in different modalities are usually diverse. We propose a novel Differentiated Learning framework to make use of the diversity between multiple modalities for more effective domain adaptation. Specifically, we model the classifiers of different modalities as a group of teacher/student sub-models, and a novel Prototype based Reliability Measurement is presented to estimate the reliability of the recognition results made by each sub-model on the target domain. More reliable results are then picked up as teaching materials for all sub-models in the group. Considering the diversity of different modalities, each sub-model performs the Asynchronous Curriculum Learning by choosing the teaching materials from easy to hard measured by itself. Furthermore, a reliability-aware fusion scheme is proposed to combine all optimized sub-models to support final decision. Comprehensive experiments based on three multi-modal datasets with different learning tasks have been conducted, which show the superior performance of our model while comparing with state-of-the-art multi-modal domain adaptation models.}, booktitle = {Proceedings of the 29th ACM International Conference on Multimedia}, pages = {1322–1330}, numpages = {9}, keywords = {multi-modal analysis, domain adaptation, differentiated learning}, series = {MM '21}, - bibtex_show={true}, + bibtex_show={false}, selected={true}, preview={dlmm.png} } @@ -31,8 +30,9 @@ @article{chen2021adversarial pages={7079--7090}, year={2021}, publisher={IEEE}, - bibtex_show={true}, - preview={jm.png} + bibtex_show={false}, + preview={jm.png}, + pdf={Chen_Adversarial_Caching_Training.pdf} } @InProceedings{Liu_2025_CVPR, @@ -42,11 +42,28 @@ @InProceedings{Liu_2025_CVPR month = {June}, year = {2025}, pages = {5092-5101}, - bibtex_show={true}, + bibtex_show={false}, selected={true}, preview={cvpr25_poster_1035_00.png}, pdf={Liu_MODfinity_Unsupervised_Domain_Adaptation_with_Multimodal_Information_Flow_Intertwining_CVPR_2025_paper.pdf} } + +@article{Jie_2025, + title={FS-Diff: Semantic guidance and clarity-aware simultaneous multimodal image fusion and super-resolution}, + volume={121}, + ISSN={1566-2535}, + url={http://dx.doi.org/10.1016/j.inffus.2025.103146}, + DOI={10.1016/j.inffus.2025.103146}, + journal={Information Fusion}, + publisher={Elsevier BV}, + author={Jie, Yuchan and Xu, Yushen and Li, Xiaosong and Zhou, Fuqiang and Lv, Jianming and Li, Huafeng}, + year={2025}, + month=sep, + pages={103146}, + preview={Jie_FS-Diff.jpg}, + pdf={Jie_FS-Diff.pdf} +} + article{PhysRev.47.777, 使用示例: preview 加入预览的图片,预览的图片放在 `assets/img/publication_preview/` 目录下 diff --git a/_pages/about.md b/_pages/about.md index 9acdd2cd..de888cd7 100644 --- a/_pages/about.md +++ b/_pages/about.md @@ -59,22 +59,21 @@ selected_papers: false # includes a list of papers marked as "selected={true}" line-height: 1.8; } - /* 轮播图样式 - 增加高度 */ .carousel-showcase { margin: 25px 0; border-radius: 12px; overflow: hidden; box-shadow: 0 8px 20px rgba(0,0,0,0.12); - height: 600px; /* 从380px增加到600px */ + height: 600px; } .carousel-item { - height: 600px; /* 同步增加 */ + height: 600px; } .carousel-item img { width: 100%; height: 100%; - object-fit: contain; /* 改为contain确保完整显示 */ - background-color: #f8f9fa; /* 添加背景色避免空白 */ + object-fit: contain; + background-color: #f8f9fa; } .carousel-caption { background: linear-gradient(to top, rgba(0,0,0,0.8), transparent); @@ -118,19 +117,15 @@ selected_papers: false # includes a list of papers marked as "selected={true}"

实验室简介

- -

本实验室聚焦人工智能前沿领域,致力于多模态深度学习的核心技术研究与应用创新。团队重点攻关跨模态信息融合与鲁棒特征学习,在计算机视觉与信号处理领域取得突破性进展。

+
-
1
+
1
-

图像去噪与增强

-

研发新一代基于深度学习的图像复原算法,针对低光照、医学影像、遥感图像等复杂场景,实现高保真噪声抑制与细节重建。

+

弹性记忆网络(EMN)架构创新

+

首创时空高效的弹性记忆网络框架,通过参数压缩与映射的创新架构,在保持模型精度的同时,大幅模型提升推理速度,显著降低硬件资源消耗。

-
2
+
2
-

弹性记忆网络(EMN)架构创新

-

首创时空高效的弹性记忆网络框架,通过参数压缩与映射的创新架构,在保持模型精度的同时,大幅模型提升推理速度,显著降低硬件资源消耗。

+

多模态协同学习

+

探索视觉-语言-音频的联合表示学习,构建跨模态自监督预训练模型,为智能医疗诊断、工业质检、自动驾驶等场景提供统一感知解决方案。

-
3
+
3
-

多模态协同学习

-

探索视觉-语言-音频的联合表示学习,构建跨模态自监督预训练模型,为智能医疗诊断、工业质检、自动驾驶等场景提供统一感知解决方案。

+

图像去噪与增强

+

研发新一代基于深度学习的图像复原算法,针对低光照、医学影像、遥感图像等复杂场景,实现高保真噪声抑制与细节重建。

diff --git a/_projects/HeXuyi.md b/_projects/HeXuyi.md index 61e9a3c3..4c3a11aa 100644 --- a/_projects/HeXuyi.md +++ b/_projects/HeXuyi.md @@ -2,7 +2,7 @@ layout: page title: 何旭怡 description: 25级在读博士 -img: assets/img/hxy.jpg +img: assets/img/hxy2.jpg importance: 2025 category: Current students related_publications: false @@ -47,6 +47,9 @@ related_publications: false
{% include figure.liquid loading="eager" path="assets/img/hxy1.jpg" title="生活照片" class="img-fluid rounded z-depth-1" %}
+
+ {% include figure.liquid loading="eager" path="assets/img/hxy.jpg" title="生活照片" class="img-fluid rounded z-depth-1" %} +
diff --git a/_projects/WangChengjun.md b/_projects/WangChengjun.md new file mode 100644 index 00000000..86de6b8d --- /dev/null +++ b/_projects/WangChengjun.md @@ -0,0 +1,69 @@ +--- +layout: page +title: 王铖俊 +description: 24级在读博士 +img: assets/img/lsl.jpg +importance: 2024 +category: Current students +related_publications: false +--- + +
+
+ +
+

个人信息

+

硕士院校:河北大学

+
+ +
+

联系方式

+

电子邮箱:cswangchengjun@mail.scut.edu.cn

+
+ +
+

研究方向

+

多模态、持续学习

+
+
+
+ {% include figure.liquid path="assets/img/lsl.jpg" title="个人图片" class="img-fluid rounded z-depth-1" %} +
+
+ +

个人成果

+
+ [1] Wang C, Peng J, Tao Z, et al. Uncertainty-guided Robust labels refinement for unsupervised person re-identification. Neural Comput & Applic 36, 977–991 (2024). + [2] Peng J, Yu J, Wang C, et al. Adapt only once: Fast unsupervised person re-identification via relevance-aware guidance Pattern Recognition 150, 110360 (2024). + [3] Zhang S, Wang C, Peng J. ABC-Learning: Attention-Boosted Contrastive Learning for unsupervised person re-identification Engineering Applications of Artificial Intelligence 133, 108344 (2024). +
+ +

学生风采

+
+
+ {% include figure.liquid loading="eager" path="assets/img/lsl1.jpg" title="生活照片" class="img-fluid rounded z-depth-1" %} +
+
+ +
+
+ {% include figure.liquid loading="eager" path="assets/img/lsl2.jpg" title="生活照片" class="img-fluid rounded z-depth-1" %} +
+
+ {% include figure.liquid loading="eager" path="assets/img/lsl7.jpg" title="生活照片" class="img-fluid rounded z-depth-1" %} +
+
+ +
+
+ {% include figure.liquid loading="eager" path="assets/img/lsl6.jpg" title="生活照片" class="img-fluid rounded z-depth-1" %} +
+
+ {% include figure.liquid loading="eager" path="assets/img/lsl3.jpg" title="生活照片" class="img-fluid rounded z-depth-1" %} +
+
+ + +
+ 华南理工大学 +
diff --git a/_projects/YaoYiyang.md b/_projects/YaoYiyang.md index 26aee2c6..a3a142de 100644 --- a/_projects/YaoYiyang.md +++ b/_projects/YaoYiyang.md @@ -2,7 +2,7 @@ layout: page title: 姚一阳 description: 25级在读博士 -img: assets/img/lsl.jpg +img: assets/img/yyy.jpg importance: 2025 category: Current students related_publications: false @@ -39,25 +39,25 @@ related_publications: false

学生风采

- {% include figure.liquid loading="eager" path="assets/img/lsl1.jpg" title="生活照片" class="img-fluid rounded z-depth-1" %} + {% include figure.liquid loading="eager" path="assets/img/yyy1.jpg" title="生活照片" class="img-fluid rounded z-depth-1" %}
- {% include figure.liquid loading="eager" path="assets/img/lsl2.jpg" title="生活照片" class="img-fluid rounded z-depth-1" %} + {% include figure.liquid loading="eager" path="assets/img/yyy2.jpg" title="生活照片" class="img-fluid rounded z-depth-1" %}
- {% include figure.liquid loading="eager" path="assets/img/lsl7.jpg" title="生活照片" class="img-fluid rounded z-depth-1" %} + {% include figure.liquid loading="eager" path="assets/img/yyy3.jpg" title="生活照片" class="img-fluid rounded z-depth-1" %}
- {% include figure.liquid loading="eager" path="assets/img/lsl6.jpg" title="生活照片" class="img-fluid rounded z-depth-1" %} + {% include figure.liquid loading="eager" path="assets/img/yyy5.jpg" title="生活照片" class="img-fluid rounded z-depth-1" %}
- {% include figure.liquid loading="eager" path="assets/img/lsl3.jpg" title="生活照片" class="img-fluid rounded z-depth-1" %} + {% include figure.liquid loading="eager" path="assets/img/yyy4.jpg" title="生活照片" class="img-fluid rounded z-depth-1" %}
diff --git a/assets/img/hxy2.jpg b/assets/img/hxy2.jpg new file mode 100644 index 00000000..1524a9f6 Binary files /dev/null and b/assets/img/hxy2.jpg differ diff --git a/assets/img/publication_preview/Jie_FS-Diff.jpg b/assets/img/publication_preview/Jie_FS-Diff.jpg new file mode 100644 index 00000000..6db26545 Binary files /dev/null and b/assets/img/publication_preview/Jie_FS-Diff.jpg differ diff --git a/assets/img/yyy.jpg b/assets/img/yyy.jpg new file mode 100644 index 00000000..5ac92d9c Binary files /dev/null and b/assets/img/yyy.jpg differ diff --git a/assets/img/yyy1.jpg b/assets/img/yyy1.jpg new file mode 100644 index 00000000..7074faa4 Binary files /dev/null and b/assets/img/yyy1.jpg differ diff --git a/assets/img/yyy2.jpg b/assets/img/yyy2.jpg new file mode 100644 index 00000000..a4c0ba79 Binary files /dev/null and b/assets/img/yyy2.jpg differ diff --git a/assets/img/yyy3.jpg b/assets/img/yyy3.jpg new file mode 100644 index 00000000..7b31e7cf Binary files /dev/null and b/assets/img/yyy3.jpg differ diff --git a/assets/img/yyy4.jpg b/assets/img/yyy4.jpg new file mode 100644 index 00000000..d6f101b6 Binary files /dev/null and b/assets/img/yyy4.jpg differ diff --git a/assets/img/yyy5.jpg b/assets/img/yyy5.jpg new file mode 100644 index 00000000..aa552387 Binary files /dev/null and b/assets/img/yyy5.jpg differ diff --git a/assets/pdf/Chen_Adversarial_Caching_Training.pdf b/assets/pdf/Chen_Adversarial_Caching_Training.pdf new file mode 100644 index 00000000..1e8f9001 Binary files /dev/null and b/assets/pdf/Chen_Adversarial_Caching_Training.pdf differ diff --git a/assets/pdf/Jie_FS-Diff.pdf b/assets/pdf/Jie_FS-Diff.pdf new file mode 100644 index 00000000..43ff607b Binary files /dev/null and b/assets/pdf/Jie_FS-Diff.pdf differ