2026
書籍・チャプター
Sorry, no publications matched your criteria.
論文誌
Mitsuru Arita, Yugo Nakamura, Shigemi Ishida, Yutaka Arakawa
ZEL+: Wearable net-zero-energy lifelogging using heterogeneous energy harvesters for sustainable context sensing Journal Article
In: Pervasive and Mobile Computing, 2026.
@article{arita2026zel+,
title = {ZEL+: Wearable net-zero-energy lifelogging using heterogeneous energy harvesters for sustainable context sensing},
author = {Mitsuru Arita, Yugo Nakamura, Shigemi Ishida, Yutaka Arakawa},
url = {https://doi.org/10.1016/j.pmcj.2026.102180},
year = {2026},
date = {2026-04-01},
urldate = {2026-04-01},
journal = {Pervasive and Mobile Computing},
abstract = {This paper presents ZEL+, a wearable lifelogging system designed to operate with net-zero energy consumption by leveraging multiple energy harvesting technologies for continuous context sensing. Self-powered wearable devices often encounter difficulties in environments with inconsistent or low-intensity ambient energy, particularly in indoor settings. To address this challenge, ZEL+ incorporates three key design features. First, it employs a power-switching mechanism based on dual comparators and a capacitor to manage surplus energy and support operation under varying lighting conditions. Second, the system integrates heterogeneous energy harvesters not only as power sources but also as sensing elements. Specifically, a dye-sensitized solar cell provides stable responses under low-light indoor environments, while an amorphous solar cell exhibits sensitivity to changes in ambient illumination; together with a piezoelectric element capturing motion-induced signals, these components contribute complementary cues for location and activity recognition. Third, a Spatial Consistency-Based Correction (SCC) algorithm is applied as a post-processing step to mitigate transient recognition errors and improve the coherence of inferred lifelogs. The system is implemented as a 192 g nametag-shaped wearable device and evaluated in a real-world office environment with 11 participants. Under a person-dependent setting, ZEL+ achieved an accuracy of 96.62% for 8-location place recognition and 97.09% for static/dynamic activity recognition, while maintaining robust performance on more fine-grained tasks. In terms of energy sustainability, the device sustained autonomous operation using harvested energy alone for approximately 93.97% of a standard 8-hour office workday. These results indicate that ZEL+ provides a practical and energy-sustainable solution for continuous lifelogging in indoor mobile computing environments.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Koushi Hiraoka, Yugo Nakamura, Yutaka Arakawa
EdgeVLM as a Privacy Filter: Towards Privacy-Aware Activity Recognition from Wearable Camera Using Image Captions Journal Article
In: IEEE Access, 2026.
@article{11367700,
title = {EdgeVLM as a Privacy Filter: Towards Privacy-Aware Activity Recognition from Wearable Camera Using Image Captions},
author = {Koushi Hiraoka, Yugo Nakamura, Yutaka Arakawa},
url = {https://doi.org/10.1109/ACCESS.2026.3659343},
doi = {10.1109/ACCESS.2026.3659343},
year = {2026},
date = {2026-01-27},
journal = {IEEE Access},
abstract = {Egocentric video captured by wearable cameras offers rich contextual information for recognizing human activities in daily life. However, such video often includes sensitive personal details that must be protected from external threats. This creates a fundamental trade-off between preserving data utility and ensuring privacy, particularly in scenarios where continuous activity monitoring is required. In this study, we explore the concept of using EdgeVLM—a vision-language model designed to run entirely on edge devices—as a privacy filter for wearable camera data. We investigate the impact of caption granularity and demonstrate that our method locally transforms egocentric video into semantically rich textual image captions, enabling activity detection without transmitting raw visual content to the cloud. This edge-based processing preserves contextual cues while minimizing privacy risks through data minimization. To evaluate this approach, we conducted a quantitative user study (N=88) that found EdgeVLM-generated captions notably decreased participants’ privacy concerns compared to raw, blurred, or cartoonized images. Critically, from a bystander’s perspective, the proposed method demonstrated privacy protection levels statistically comparable to canny edge detection. Additionally, when combined with accelerometer data, the caption-based method achieved a 77.2% accuracy in recognizing desk activities such as typing, mousing, swiping, drinking, and writing—effectively replacing pixel-level visual information with text while maintaining performance comparable to models using unfiltered visuals. These findings indicate that EdgeVLM-based image captioning is a promising privacy-conscious solution for wearable camera applications, facilitating continuous activity recognition while protecting user privacy at the edge.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yugo Nakamura
AIoT-Driven Health Behavioral Security: Vision and Challenges Journal Article
In: ACM Transactions on Computing for Healthcare, vol. 7, iss. 1, pp. 1 - 7, 2026.
@article{nakamura2026aiot,
title = {AIoT-Driven Health Behavioral Security: Vision and Challenges},
author = {Yugo Nakamura},
url = {https://doi.org/10.1145/3771551},
doi = {10.1145/3771551},
year = {2026},
date = {2026-01-14},
urldate = {2026-01-14},
journal = {ACM Transactions on Computing for Healthcare},
volume = {7},
issue = {1},
pages = {1 - 7},
abstract = {As digital innovation progresses, individuals are increasingly, often unknowingly, exposed to digital temptations and deceptive tactics. These risks, now magnified by foundation AI models, call for protective measures beyond what current digital health technologies can offer. This article introduces Health Behavioral Security, a novel AIoT-driven framework that positions trusted AIoT devices as a distributed “System 0” layer—an always-on, pre-conscious filter that can sense, interpret, and modulate persuasive cues in real time. Guided by five behavior-centric constructs—Assets, Threats, Vulnerabilities, Risks, and Countermeasures—the framework could establish a secure behavioral ecosystem in which adaptive sensing and nudging loops help preserve autonomy while promoting well-being. We present this vision, outline key research challenges, and discuss future directions for advancing AIoT-based behavioral security in today’s increasingly digital society.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ryusei Fujimoto, Musashi Hadano, Yugo Nakamura, Yutaka Arakawa
C-AAE: A Compressive Anonymizing AutoEncoder for Privacy-Preserving Activity Recognition on Edge Devices Journal Article
In: ACM Transactions on Computing for Healthcare, 2026.
@article{fujimoto2026caae,
title = {C-AAE: A Compressive Anonymizing AutoEncoder for Privacy-Preserving Activity Recognition on Edge Devices},
author = {Ryusei Fujimoto, Musashi Hadano, Yugo Nakamura, Yutaka Arakawa},
doi = {10.1145/3793553},
year = {2026},
date = {2026-01-14},
journal = {ACM Transactions on Computing for Healthcare},
abstract = {Wearable accelerometers and gyroscopes capture fine-grained behavioral signatures that can inadvertently reveal user identities, making privacy protection essential for healthcare applications.
We present C-AAE, a lightweight compressive anonymizing autoencoder that performs on-device privacy filtering at the sensor edge.
The core idea of C-AAE is to integrate two complementary privacy filters: a learned, sensor-specific anonymization module, the Anonymizing AutoEncoder (AAE), and a learning-free, generic anonymization module, Adaptive Differential Pulse-Code Modulation (ADPCM).
The AAE locally learns to suppress identity cues while preserving activity-relevant representations, whereas ADPCM provides training-free anonymization through compression, further masking residual identity information and reducing communication cost.
Experiments on the MotionSense and PAMAP2 datasets show that C-AAE cuts user re-identification F1 scores by 10-15 percentage points relative to AAE alone, while keeping activity-recognition F1 within 5 percentage points of the unprotected baseline.
Implementation on a small-scale edge device (ESP32-WROOM-32) demonstrates real-time performance with markedly lower memory usage, latency, and power consumption.
Unlike differential-privacy mechanisms that rely on randomized noise, C-AAE offers a complementary, representation-level approach, enabling practical and resource-efficient on-device anonymization that remains compatible with formal DP frameworks for hybrid deployment on edge healthcare devices.
},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
We present C-AAE, a lightweight compressive anonymizing autoencoder that performs on-device privacy filtering at the sensor edge.
The core idea of C-AAE is to integrate two complementary privacy filters: a learned, sensor-specific anonymization module, the Anonymizing AutoEncoder (AAE), and a learning-free, generic anonymization module, Adaptive Differential Pulse-Code Modulation (ADPCM).
The AAE locally learns to suppress identity cues while preserving activity-relevant representations, whereas ADPCM provides training-free anonymization through compression, further masking residual identity information and reducing communication cost.
Experiments on the MotionSense and PAMAP2 datasets show that C-AAE cuts user re-identification F1 scores by 10-15 percentage points relative to AAE alone, while keeping activity-recognition F1 within 5 percentage points of the unprotected baseline.
Implementation on a small-scale edge device (ESP32-WROOM-32) demonstrates real-time performance with markedly lower memory usage, latency, and power consumption.
Unlike differential-privacy mechanisms that rely on randomized noise, C-AAE offers a complementary, representation-level approach, enabling practical and resource-efficient on-device anonymization that remains compatible with formal DP frameworks for hybrid deployment on edge healthcare devices.
Hyuckjin Choi, Sumin Jeong
Two-Time Scale RIS Phase Error and Channel Estimation Method for RIS-Aided Systems Journal Article
In: IEEE Wireless Communications Letters, vol. 15, pp. 1230-1234, 2026.
@article{ieeewcl_choi2026,
title = {Two-Time Scale RIS Phase Error and Channel Estimation Method for RIS-Aided Systems},
author = {Hyuckjin Choi, Sumin Jeong},
doi = {10.1109/LWC.2025.3650434},
year = {2026},
date = {2026-01-01},
journal = {IEEE Wireless Communications Letters},
volume = {15},
pages = {1230-1234},
abstract = {This letter addresses channel estimation for reconfigurable intelligent surface (RIS)-aided systems under hardware impairments, RIS phase offsets (POs). We propose a low-complexity PO estimator that averages downlink (DL) pilot responses across frequency, then reuses the DL phase to align uplink pilots at the user without extra training. Using only base station-RIS power inferred from DL, we develop a weighted least-squares cascaded-channel estimator. We also quantify PO impact on representative LS-based and two-timescale schemes. Simulations show robust PO compensation, sizable normalized mean square error reductions, and achievable-rate gains with limited pilots.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
国際会議
Toshiki Hayashida,Koushi Hiraoka,Isshin Nakao,Yugo Nakamura,Yutaka Arakawa
SenStick-EyeMeet: Distributed Multimodal Sensing of Nonverbal Interactions to Augment LLM-Assisted Meeting Understanding Proceedings Article
In: ICDCN 2026, 2026.
@inproceedings{hayashida2026ICDCN,
title = {SenStick-EyeMeet: Distributed Multimodal Sensing of Nonverbal Interactions to Augment LLM-Assisted Meeting Understanding},
author = {Toshiki Hayashida,Koushi Hiraoka,Isshin Nakao,Yugo Nakamura,Yutaka Arakawa},
year = {2026},
date = {2026-01-06},
urldate = {2026-01-06},
booktitle = {ICDCN 2026},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Koushi Hiraoka, Yugo Nakamura, Yutaka Arakawa
Exploring Fingertip Interaction Using Surface Acoustic Wave Sensing with a VPU Ring Proceedings Article
In: ICDCN2026, 2026.
@inproceedings{hiraoka2026ICDCN,
title = {Exploring Fingertip Interaction Using Surface Acoustic Wave Sensing with a VPU Ring},
author = {Koushi Hiraoka, Yugo Nakamura, Yutaka Arakawa},
year = {2026},
date = {2026-01-06},
urldate = {2026-01-06},
booktitle = {ICDCN2026},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
研究会・シンポジウム
Sorry, no publications matched your criteria.
