elf.rs 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485
  1. use super::{LoadInfo, ELF_MAGIC};
  2. use crate::io::UninitBuffer;
  3. use crate::kernel::task::loader::aux_vec::{AuxKey, AuxVec};
  4. use crate::path::Path;
  5. use crate::{
  6. io::ByteBuffer,
  7. kernel::{
  8. constants::ENOEXEC,
  9. mem::{FileMapping, MMList, Mapping, Permission},
  10. vfs::{dentry::Dentry, FsContext},
  11. },
  12. prelude::*,
  13. };
  14. use align_ext::AlignExt;
  15. use alloc::vec::Vec;
  16. use alloc::{ffi::CString, sync::Arc};
  17. use eonix_mm::{
  18. address::{Addr, AddrOps as _, VAddr},
  19. paging::PAGE_SIZE,
  20. };
  21. use xmas_elf::{
  22. header::{self, Class, HeaderPt1, Machine_, Type_},
  23. program::{self, ProgramHeader32, ProgramHeader64},
  24. P32, P64,
  25. };
  26. const INIT_STACK_SIZE: usize = 0x80_0000;
  27. #[derive(Debug, Clone, Copy)]
  28. #[repr(C)]
  29. pub struct HeaderPt2<P> {
  30. pub type_: Type_,
  31. pub machine: Machine_,
  32. pub version: u32,
  33. pub entry_point: P,
  34. pub ph_offset: P,
  35. pub sh_offset: P,
  36. pub flags: u32,
  37. pub header_size: u16,
  38. pub ph_entry_size: u16,
  39. pub ph_count: u16,
  40. pub sh_entry_size: u16,
  41. pub sh_count: u16,
  42. pub sh_str_index: u16,
  43. }
  44. #[derive(Debug, Clone, Copy)]
  45. #[repr(C)]
  46. pub struct ElfHeader<P> {
  47. pub pt1: HeaderPt1,
  48. pub pt2: HeaderPt2<P>,
  49. }
  50. pub struct LdsoLoadInfo {
  51. pub base: VAddr,
  52. pub entry_ip: VAddr,
  53. }
  54. pub struct Elf32 {
  55. pub file: Arc<Dentry>,
  56. pub elf_header: ElfHeader<P32>,
  57. pub program_headers: Vec<ProgramHeader32>,
  58. }
  59. impl Elf32 {
  60. const DYN_BASE_ADDR: usize = 0x4000_0000;
  61. const LDSO_BASE_ADDR: usize = 0xf000_0000;
  62. const STACK_BASE_ADDR: usize = 0xffff_0000;
  63. pub fn parse(elf_file: Arc<Dentry>) -> KResult<Self> {
  64. let mut elf_header = UninitBuffer::<ElfHeader<P32>>::new();
  65. elf_file.read(&mut elf_header, 0)?;
  66. let elf_header = elf_header.assume_init().map_err(|_| ENOEXEC)?;
  67. let ph_offset = elf_header.pt2.ph_offset;
  68. let ph_count = elf_header.pt2.ph_count;
  69. let mut program_headers = vec![ProgramHeader32::default(); ph_count as usize];
  70. elf_file.read(
  71. &mut ByteBuffer::from(program_headers.as_mut_slice()),
  72. ph_offset as usize,
  73. )?;
  74. Ok(Self {
  75. file: elf_file,
  76. elf_header,
  77. program_headers,
  78. })
  79. }
  80. fn is_shared_object(&self) -> bool {
  81. self.elf_header.pt2.type_.as_type() == header::Type::SharedObject
  82. }
  83. fn entry_point(&self) -> u32 {
  84. self.elf_header.pt2.entry_point
  85. }
  86. fn ph_count(&self) -> u16 {
  87. self.elf_header.pt2.ph_count
  88. }
  89. fn ph_offset(&self) -> u32 {
  90. self.elf_header.pt2.ph_offset
  91. }
  92. fn ph_entry_size(&self) -> u16 {
  93. self.elf_header.pt2.ph_entry_size
  94. }
  95. fn ph_addr(&self) -> KResult<u32> {
  96. let ph_offset = self.ph_offset();
  97. for program_header in &self.program_headers {
  98. if program_header.offset <= ph_offset
  99. && ph_offset < program_header.offset + program_header.file_size
  100. {
  101. return Ok(ph_offset - program_header.offset + program_header.virtual_addr);
  102. }
  103. }
  104. Err(ENOEXEC)
  105. }
  106. pub fn load(&self, args: Vec<CString>, envs: Vec<CString>) -> KResult<LoadInfo> {
  107. let mm_list = MMList::new();
  108. // Load Segments
  109. let (elf_base, data_segment_end) = self.load_segments(&mm_list)?;
  110. // Load ldso(if have)
  111. let ldso_load_info = self.load_ldso(&mm_list)?;
  112. // Heap
  113. mm_list.register_break(data_segment_end + 0x10000);
  114. let aux_vec = Elf32::init_aux_vec(
  115. self,
  116. elf_base,
  117. ldso_load_info
  118. .as_ref()
  119. .map(|ldso_load_info| ldso_load_info.base),
  120. )?;
  121. // Map stack
  122. let sp = Elf32::create_and_init_stack(&mm_list, args, envs, aux_vec)?;
  123. let entry_ip = if let Some(ldso_load_info) = ldso_load_info {
  124. // Normal shared object(DYN)
  125. ldso_load_info.entry_ip.into()
  126. } else if self.is_shared_object() {
  127. // ldso itself
  128. elf_base + self.entry_point() as usize
  129. } else {
  130. // statically linked executable
  131. (self.entry_point() as usize).into()
  132. };
  133. Ok(LoadInfo {
  134. entry_ip,
  135. sp,
  136. mm_list,
  137. })
  138. }
  139. fn init_aux_vec(
  140. elf: &Elf32,
  141. elf_base: VAddr,
  142. ldso_base: Option<VAddr>,
  143. ) -> KResult<AuxVec<u32>> {
  144. let mut aux_vec: AuxVec<u32> = AuxVec::new();
  145. let ph_addr = if elf.is_shared_object() {
  146. elf_base.addr() as u32 + elf.ph_addr()?
  147. } else {
  148. elf.ph_addr()?
  149. };
  150. aux_vec.set(AuxKey::AT_PAGESZ, PAGE_SIZE as u32)?;
  151. aux_vec.set(AuxKey::AT_PHDR, ph_addr)?;
  152. aux_vec.set(AuxKey::AT_PHNUM, elf.ph_count() as u32)?;
  153. aux_vec.set(AuxKey::AT_PHENT, elf.ph_entry_size() as u32)?;
  154. let elf_entry = if elf.is_shared_object() {
  155. elf_base.addr() as u32 + elf.entry_point()
  156. } else {
  157. elf.entry_point()
  158. };
  159. aux_vec.set(AuxKey::AT_ENTRY, elf_entry)?;
  160. if let Some(ldso_base) = ldso_base {
  161. aux_vec.set(AuxKey::AT_BASE, ldso_base.addr() as u32)?;
  162. }
  163. Ok(aux_vec)
  164. }
  165. pub fn load_segments(&self, mm_list: &MMList) -> KResult<(VAddr, VAddr)> {
  166. let base: VAddr = if self.is_shared_object() {
  167. Elf32::DYN_BASE_ADDR
  168. } else {
  169. 0
  170. }
  171. .into();
  172. let mut segments_end = VAddr::NULL;
  173. for program_header in &self.program_headers {
  174. let type_ = program_header.get_type().map_err(|_| ENOEXEC)?;
  175. if type_ == program::Type::Load {
  176. let segment_end = self.load_segment(program_header, mm_list, base)?;
  177. if segment_end > segments_end {
  178. segments_end = segment_end;
  179. }
  180. }
  181. }
  182. Ok((base, segments_end))
  183. }
  184. pub fn load_segment(
  185. &self,
  186. program_header: &ProgramHeader32,
  187. mm_list: &MMList,
  188. base_addr: VAddr,
  189. ) -> KResult<VAddr> {
  190. let virtual_addr = base_addr + program_header.virtual_addr as usize;
  191. let vmem_vaddr_end = virtual_addr + program_header.mem_size as usize;
  192. let load_vaddr_end = virtual_addr + program_header.file_size as usize;
  193. let vmap_start = virtual_addr.floor();
  194. let vmem_len = vmem_vaddr_end.ceil() - vmap_start;
  195. let file_len = load_vaddr_end.ceil() - vmap_start;
  196. let file_offset = (program_header.offset as usize).align_down(PAGE_SIZE);
  197. let permission = Permission {
  198. read: program_header.flags.is_read(),
  199. write: program_header.flags.is_write(),
  200. execute: program_header.flags.is_execute(),
  201. };
  202. if file_len != 0 {
  203. let real_file_length = load_vaddr_end - vmap_start;
  204. mm_list.mmap_fixed(
  205. vmap_start,
  206. file_len,
  207. Mapping::File(FileMapping::new(
  208. self.file.clone(),
  209. file_offset,
  210. real_file_length,
  211. )),
  212. permission,
  213. )?;
  214. }
  215. if vmem_len > file_len {
  216. mm_list.mmap_fixed(
  217. vmap_start + file_len,
  218. vmem_len - file_len,
  219. Mapping::Anonymous,
  220. permission,
  221. )?;
  222. }
  223. Ok(vmap_start + vmem_len)
  224. }
  225. pub fn load_ldso(&self, mm_list: &MMList) -> KResult<Option<LdsoLoadInfo>> {
  226. let ldso_path = self.ldso_path()?;
  227. if let Some(ldso_path) = ldso_path {
  228. let fs_context = FsContext::global();
  229. let ldso_file =
  230. Dentry::open(fs_context, Path::new(ldso_path.as_bytes()).unwrap(), true).unwrap();
  231. let ldso_elf = Elf32::parse(ldso_file).unwrap();
  232. let base = VAddr::from(Elf32::LDSO_BASE_ADDR);
  233. for program_header in &ldso_elf.program_headers {
  234. let type_ = program_header.get_type().map_err(|_| ENOEXEC)?;
  235. if type_ == program::Type::Load {
  236. ldso_elf.load_segment(program_header, mm_list, base)?;
  237. }
  238. }
  239. return Ok(Some(LdsoLoadInfo {
  240. base,
  241. entry_ip: base + ldso_elf.entry_point() as usize,
  242. }));
  243. }
  244. Ok(None)
  245. }
  246. fn ldso_path(&self) -> KResult<Option<String>> {
  247. for program_header in &self.program_headers {
  248. let type_ = program_header.get_type().map_err(|_| ENOEXEC)?;
  249. if type_ == program::Type::Interp {
  250. let file_size = program_header.file_size as usize;
  251. let file_offset = program_header.offset as usize;
  252. let mut ldso_vec = vec![0u8; file_size - 1]; // -1 due to '\0'
  253. self.file
  254. .read(&mut ByteBuffer::from(ldso_vec.as_mut_slice()), file_offset)?;
  255. let ldso_path = String::from_utf8(ldso_vec).map_err(|_| ENOEXEC)?;
  256. return Ok(Some(ldso_path));
  257. }
  258. }
  259. Ok(None)
  260. }
  261. fn create_and_init_stack(
  262. mm_list: &MMList,
  263. args: Vec<CString>,
  264. envs: Vec<CString>,
  265. aux_vec: AuxVec<u32>,
  266. ) -> KResult<VAddr> {
  267. mm_list.mmap_fixed(
  268. VAddr::from(Elf32::STACK_BASE_ADDR - INIT_STACK_SIZE),
  269. INIT_STACK_SIZE,
  270. Mapping::Anonymous,
  271. Permission {
  272. read: true,
  273. write: true,
  274. execute: false,
  275. },
  276. )?;
  277. let mut sp = VAddr::from(Elf32::STACK_BASE_ADDR);
  278. let env_pointers = Elf32::push_strings(&mm_list, &mut sp, envs)?;
  279. let arg_pointers = Elf32::push_strings(&mm_list, &mut sp, args)?;
  280. let argc = arg_pointers.len() as u32;
  281. Elf32::stack_alignment(&mut sp, &arg_pointers, &env_pointers, &aux_vec);
  282. Elf32::push_aux_vec(&mm_list, &mut sp, aux_vec)?;
  283. Elf32::push_pointers(&mm_list, &mut sp, env_pointers)?;
  284. Elf32::push_pointers(&mm_list, &mut sp, arg_pointers)?;
  285. Elf32::push_u32(&mm_list, &mut sp, argc)?;
  286. assert_eq!(sp.floor_to(16), sp);
  287. Ok(sp)
  288. }
  289. fn stack_alignment(
  290. sp: &mut VAddr,
  291. arg_pointers: &Vec<u32>,
  292. env_pointers: &Vec<u32>,
  293. aux_vec: &AuxVec<u32>,
  294. ) {
  295. let aux_vec_size = (aux_vec.table().len() + 1) * (size_of::<u32>() * 2);
  296. let envp_pointers_size = (env_pointers.len() + 1) * size_of::<u32>();
  297. let argv_pointers_size = (arg_pointers.len() + 1) * size_of::<u32>();
  298. let argc_size = size_of::<u32>();
  299. let all_size = aux_vec_size + envp_pointers_size + argv_pointers_size + argc_size;
  300. let align_sp = (sp.addr() - all_size).align_down(16);
  301. *sp = VAddr::from(align_sp + all_size);
  302. }
  303. fn push_strings(mm_list: &MMList, sp: &mut VAddr, strings: Vec<CString>) -> KResult<Vec<u32>> {
  304. let mut addrs = Vec::with_capacity(strings.len());
  305. for string in strings.iter().rev() {
  306. let len = string.as_bytes_with_nul().len();
  307. *sp = *sp - len;
  308. mm_list.access_mut(*sp, len, |offset, data| {
  309. data.copy_from_slice(&string.as_bytes_with_nul()[offset..offset + data.len()])
  310. })?;
  311. addrs.push(sp.addr() as u32);
  312. }
  313. addrs.reverse();
  314. Ok(addrs)
  315. }
  316. fn push_pointers(mm_list: &MMList, sp: &mut VAddr, mut pointers: Vec<u32>) -> KResult<()> {
  317. pointers.push(0);
  318. *sp = *sp - pointers.len() * size_of::<u32>();
  319. mm_list.access_mut(*sp, pointers.len() * size_of::<u32>(), |offset, data| {
  320. data.copy_from_slice(unsafe {
  321. core::slice::from_raw_parts(
  322. pointers.as_ptr().byte_add(offset) as *const u8,
  323. data.len(),
  324. )
  325. })
  326. })?;
  327. Ok(())
  328. }
  329. fn push_u32(mm_list: &MMList, sp: &mut VAddr, val: u32) -> KResult<()> {
  330. *sp = *sp - size_of::<u32>();
  331. mm_list.access_mut(*sp, size_of::<u32>(), |_, data| {
  332. data.copy_from_slice(unsafe {
  333. core::slice::from_raw_parts(&val as *const _ as *const u8, data.len())
  334. })
  335. })?;
  336. Ok(())
  337. }
  338. fn push_aux_vec(mm_list: &MMList, sp: &mut VAddr, aux_vec: AuxVec<u32>) -> KResult<()> {
  339. let mut longs: Vec<u32> = vec![];
  340. // Write Auxiliary vectors
  341. let aux_vec: Vec<_> = aux_vec
  342. .table()
  343. .iter()
  344. .map(|(aux_key, aux_value)| (*aux_key, *aux_value))
  345. .collect();
  346. for (aux_key, aux_value) in aux_vec.iter() {
  347. longs.push(*aux_key as u32);
  348. longs.push(*aux_value);
  349. }
  350. // Write NULL auxiliary
  351. longs.push(AuxKey::AT_NULL as u32);
  352. longs.push(0);
  353. *sp = *sp - longs.len() * size_of::<u32>();
  354. mm_list.access_mut(*sp, longs.len() * size_of::<u32>(), |offset, data| {
  355. data.copy_from_slice(unsafe {
  356. core::slice::from_raw_parts(
  357. longs.as_ptr().byte_add(offset) as *const u8,
  358. data.len(),
  359. )
  360. })
  361. })?;
  362. Ok(())
  363. }
  364. }
  365. pub struct Elf64 {
  366. elf_header: ElfHeader<P64>,
  367. program_headers: Vec<ProgramHeader64>,
  368. }
  369. impl Elf64 {
  370. // const LDSO_BASE_ADDR: usize = 0xffff00000000;
  371. // const STACK_BASE_ADDR: usize = 0xffff_ff00_0000;
  372. // const DYN_BASE_ADDR: usize = 0xaaaa00000000;
  373. fn parse(file: Arc<Dentry>) -> KResult<Self> {
  374. todo!()
  375. }
  376. fn load(&self, args: Vec<CString>, envs: Vec<CString>) -> KResult<LoadInfo> {
  377. todo!()
  378. }
  379. }
  380. pub enum Elf {
  381. ELF32(Elf32),
  382. ELF64(Elf64),
  383. }
  384. impl Elf {
  385. pub fn parse(elf_file: Arc<Dentry>) -> KResult<Self> {
  386. let mut header_pt1 = UninitBuffer::<HeaderPt1>::new();
  387. elf_file.read(&mut header_pt1, 0)?;
  388. let header_pt1 = header_pt1.assume_init().map_err(|_| ENOEXEC)?;
  389. assert_eq!(header_pt1.magic, ELF_MAGIC);
  390. match header_pt1.class() {
  391. Class::ThirtyTwo => Ok(Elf::ELF32(Elf32::parse(elf_file)?)),
  392. Class::SixtyFour => Ok(Elf::ELF64(Elf64::parse(elf_file)?)),
  393. _ => Err(ENOEXEC),
  394. }
  395. }
  396. pub fn load(&self, args: Vec<CString>, envs: Vec<CString>) -> KResult<LoadInfo> {
  397. match &self {
  398. Elf::ELF32(elf32) => elf32.load(args, envs),
  399. Elf::ELF64(elf64) => elf64.load(args, envs),
  400. }
  401. }
  402. }