diff --git a/_bibliography/papers.bib b/_bibliography/papers.bib
index 9e9cf7c8..8e430a19 100644
--- a/_bibliography/papers.bib
+++ b/_bibliography/papers.bib
@@ -11,13 +11,12 @@ @inproceedings{Differentiated2021
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3474085.3475660},
doi = {10.1145/3474085.3475660},
- abstract = {Directly deploying a trained multi-modal classifier to a new environment usually leads to poor performance due to the well-known domain shift problem. Existing multi-modal domain adaptation methods treated each modality equally and optimize the sub-models of different modalities synchronously. However, as observed in this paper, the degrees of domain shift in different modalities are usually diverse. We propose a novel Differentiated Learning framework to make use of the diversity between multiple modalities for more effective domain adaptation. Specifically, we model the classifiers of different modalities as a group of teacher/student sub-models, and a novel Prototype based Reliability Measurement is presented to estimate the reliability of the recognition results made by each sub-model on the target domain. More reliable results are then picked up as teaching materials for all sub-models in the group. Considering the diversity of different modalities, each sub-model performs the Asynchronous Curriculum Learning by choosing the teaching materials from easy to hard measured by itself. Furthermore, a reliability-aware fusion scheme is proposed to combine all optimized sub-models to support final decision. Comprehensive experiments based on three multi-modal datasets with different learning tasks have been conducted, which show the superior performance of our model while comparing with state-of-the-art multi-modal domain adaptation models.},
booktitle = {Proceedings of the 29th ACM International Conference on Multimedia},
pages = {1322–1330},
numpages = {9},
keywords = {multi-modal analysis, domain adaptation, differentiated learning},
series = {MM '21},
- bibtex_show={true},
+ bibtex_show={false},
selected={true},
preview={dlmm.png}
}
@@ -31,8 +30,9 @@ @article{chen2021adversarial
pages={7079--7090},
year={2021},
publisher={IEEE},
- bibtex_show={true},
- preview={jm.png}
+ bibtex_show={false},
+ preview={jm.png},
+ pdf={Chen_Adversarial_Caching_Training.pdf}
}
@InProceedings{Liu_2025_CVPR,
@@ -42,11 +42,28 @@ @InProceedings{Liu_2025_CVPR
month = {June},
year = {2025},
pages = {5092-5101},
- bibtex_show={true},
+ bibtex_show={false},
selected={true},
preview={cvpr25_poster_1035_00.png},
pdf={Liu_MODfinity_Unsupervised_Domain_Adaptation_with_Multimodal_Information_Flow_Intertwining_CVPR_2025_paper.pdf}
}
+
+@article{Jie_2025,
+ title={FS-Diff: Semantic guidance and clarity-aware simultaneous multimodal image fusion and super-resolution},
+ volume={121},
+ ISSN={1566-2535},
+ url={http://dx.doi.org/10.1016/j.inffus.2025.103146},
+ DOI={10.1016/j.inffus.2025.103146},
+ journal={Information Fusion},
+ publisher={Elsevier BV},
+ author={Jie, Yuchan and Xu, Yushen and Li, Xiaosong and Zhou, Fuqiang and Lv, Jianming and Li, Huafeng},
+ year={2025},
+ month=sep,
+ pages={103146},
+ preview={Jie_FS-Diff.jpg},
+ pdf={Jie_FS-Diff.pdf}
+}
+
article{PhysRev.47.777,
使用示例:
preview 加入预览的图片,预览的图片放在 `assets/img/publication_preview/` 目录下
diff --git a/_pages/about.md b/_pages/about.md
index 9acdd2cd..de888cd7 100644
--- a/_pages/about.md
+++ b/_pages/about.md
@@ -59,22 +59,21 @@ selected_papers: false # includes a list of papers marked as "selected={true}"
line-height: 1.8;
}
- /* 轮播图样式 - 增加高度 */
.carousel-showcase {
margin: 25px 0;
border-radius: 12px;
overflow: hidden;
box-shadow: 0 8px 20px rgba(0,0,0,0.12);
- height: 600px; /* 从380px增加到600px */
+ height: 600px;
}
.carousel-item {
- height: 600px; /* 同步增加 */
+ height: 600px;
}
.carousel-item img {
width: 100%;
height: 100%;
- object-fit: contain; /* 改为contain确保完整显示 */
- background-color: #f8f9fa; /* 添加背景色避免空白 */
+ object-fit: contain;
+ background-color: #f8f9fa;
}
.carousel-caption {
background: linear-gradient(to top, rgba(0,0,0,0.8), transparent);
@@ -118,19 +117,15 @@ selected_papers: false # includes a list of papers marked as "selected={true}"
实验室简介
-
-
-
-
@@ -149,7 +144,6 @@ selected_papers: false # includes a list of papers marked as "selected={true}"
-
@@ -158,7 +152,7 @@ selected_papers: false # includes a list of papers marked as "selected={true}"
+ [1] Wang C, Peng J, Tao Z, et al. Uncertainty-guided Robust labels refinement for unsupervised person re-identification. Neural Comput & Applic 36, 977–991 (2024).
+ [2] Peng J, Yu J, Wang C, et al. Adapt only once: Fast unsupervised person re-identification via relevance-aware guidance Pattern Recognition 150, 110360 (2024).
+ [3] Zhang S, Wang C, Peng J. ABC-Learning: Attention-Boosted Contrastive Learning for unsupervised person re-identification Engineering Applications of Artificial Intelligence 133, 108344 (2024).
+