elf.rs 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. use crate::{
  2. io::{ByteBuffer, UninitBuffer},
  3. kernel::{
  4. constants::ENOEXEC,
  5. mem::{FileMapping, MMList, Mapping, Permission},
  6. vfs::dentry::Dentry,
  7. },
  8. prelude::*,
  9. };
  10. use alloc::{ffi::CString, sync::Arc};
  11. use bitflags::bitflags;
  12. use eonix_mm::address::{Addr as _, AddrOps as _, VAddr};
  13. #[repr(u8)]
  14. #[allow(dead_code)]
  15. #[derive(Clone, Copy, PartialEq, Eq)]
  16. pub enum ElfFormat {
  17. Elf32 = 1,
  18. Elf64 = 2,
  19. }
  20. #[repr(u8)]
  21. #[allow(dead_code)]
  22. #[derive(Clone, Copy, PartialEq, Eq)]
  23. pub enum ElfEndian {
  24. Little = 1,
  25. Big = 2,
  26. }
  27. #[repr(u8)]
  28. #[allow(dead_code)]
  29. #[derive(Clone, Copy, PartialEq, Eq)]
  30. pub enum ElfABI {
  31. // SystemV = 0,
  32. Linux = 3,
  33. }
  34. #[repr(u16)]
  35. #[allow(dead_code)]
  36. #[derive(Clone, Copy, PartialEq, Eq)]
  37. pub enum ElfType {
  38. Relocatable = 1,
  39. Executable = 2,
  40. Dynamic = 3,
  41. Core = 4,
  42. }
  43. #[repr(u16)]
  44. #[allow(dead_code)]
  45. #[derive(Clone, Copy, PartialEq, Eq)]
  46. pub enum ElfArch {
  47. X86 = 0x03,
  48. Arm = 0x28,
  49. IA64 = 0x32,
  50. X86_64 = 0x3e,
  51. AArch64 = 0xb7,
  52. RiscV = 0xf3,
  53. }
  54. bitflags! {
  55. #[derive(Default, Clone, Copy)]
  56. pub struct Elf32PhFlags: u32 {
  57. const Exec = 1;
  58. const Write = 2;
  59. const Read = 4;
  60. }
  61. #[derive(Default, Clone, Copy)]
  62. pub struct Elf32ShFlags: u32 {
  63. const Write = 1;
  64. const Alloc = 2;
  65. const Exec = 4;
  66. const MaskProc = 0xf0000000;
  67. }
  68. }
  69. #[allow(dead_code)]
  70. #[derive(Default, Clone, Copy, PartialEq, Eq)]
  71. pub enum Elf32PhType {
  72. #[default]
  73. Null = 0,
  74. Load = 1,
  75. Dynamic = 2,
  76. Interp = 3,
  77. Note = 4,
  78. Shlib = 5,
  79. Phdr = 6,
  80. Tls = 7,
  81. Loos = 0x60000000,
  82. Hios = 0x6fffffff,
  83. Loproc = 0x70000000,
  84. Hiproc = 0x7fffffff,
  85. }
  86. #[allow(dead_code)]
  87. #[derive(Default, Clone, Copy, PartialEq, Eq)]
  88. pub enum Elf32ShType {
  89. #[default]
  90. Null = 0,
  91. ProgBits = 1,
  92. SymTab = 2,
  93. StrTab = 3,
  94. Rela = 4,
  95. Hash = 5,
  96. Dynamic = 6,
  97. Note = 7,
  98. NoBits = 8,
  99. Rel = 9,
  100. Shlib = 10,
  101. DynSym = 11,
  102. InitArray = 14,
  103. FiniArray = 15,
  104. PreInitArray = 16,
  105. Group = 17,
  106. SymTabShndx = 18,
  107. Loos = 0x60000000,
  108. Hios = 0x6fffffff,
  109. Loproc = 0x70000000,
  110. Hiproc = 0x7fffffff,
  111. }
  112. #[repr(C, packed)]
  113. #[derive(Clone, Copy)]
  114. pub struct Elf32Header {
  115. /// ELF magic number: 0x7f, "ELF"
  116. pub magic: [u8; 4],
  117. pub format: ElfFormat,
  118. pub endian: ElfEndian,
  119. /// ELF version, should be 1
  120. pub version: u8,
  121. pub abi: ElfABI,
  122. pub abi_version: u8,
  123. padding: [u8; 7],
  124. pub elf_type: ElfType,
  125. pub arch: ElfArch,
  126. /// ELF version, should be 1
  127. pub version2: u32,
  128. pub entry: u32,
  129. pub ph_offset: u32,
  130. pub sh_offset: u32,
  131. pub flags: u32,
  132. pub eh_size: u16,
  133. pub ph_entry_size: u16,
  134. pub ph_entry_count: u16,
  135. pub sh_entry_size: u16,
  136. pub sh_entry_count: u16,
  137. pub sh_str_index: u16,
  138. }
  139. #[repr(C)]
  140. #[derive(Default, Clone, Copy)]
  141. pub struct Elf32PhEntry {
  142. pub ph_type: Elf32PhType,
  143. pub offset: u32,
  144. pub vaddr: u32,
  145. pub paddr: u32,
  146. pub file_size: u32,
  147. pub mem_size: u32,
  148. pub flags: Elf32PhFlags,
  149. /// `0` and `1` for no alignment, otherwise power of `2`
  150. pub align: u32,
  151. }
  152. #[repr(C)]
  153. #[derive(Default, Clone, Copy)]
  154. pub struct Elf32ShEntry {
  155. pub name_offset: u32,
  156. pub sh_type: Elf32ShType,
  157. pub flags: Elf32ShFlags,
  158. pub addr: u32,
  159. pub offset: u32,
  160. pub size: u32,
  161. pub link: u32,
  162. pub info: u32,
  163. pub addr_align: u32,
  164. pub entry_size: u32,
  165. }
  166. #[allow(dead_code)]
  167. pub struct ParsedElf32 {
  168. entry: u32,
  169. file: Arc<Dentry>,
  170. phents: Vec<Elf32PhEntry>,
  171. shents: Vec<Elf32ShEntry>,
  172. }
  173. const ELF_MAGIC: [u8; 4] = *b"\x7fELF";
  174. impl Elf32Header {
  175. fn check_valid(&self) -> bool {
  176. self.magic == ELF_MAGIC
  177. && self.version == 1
  178. && self.version2 == 1
  179. && self.eh_size as usize == size_of::<Elf32Header>()
  180. && self.ph_entry_size as usize == size_of::<Elf32PhEntry>()
  181. && self.sh_entry_size as usize == size_of::<Elf32ShEntry>()
  182. }
  183. }
  184. impl ParsedElf32 {
  185. pub fn parse(file: Arc<Dentry>) -> KResult<Self> {
  186. let mut header = UninitBuffer::<Elf32Header>::new();
  187. file.read(&mut header, 0)?;
  188. let header = header.assume_init().map_err(|_| ENOEXEC)?;
  189. if !header.check_valid() {
  190. return Err(ENOEXEC);
  191. }
  192. // TODO: Use `UninitBuffer` for `phents` and `shents`.
  193. let mut phents = vec![Elf32PhEntry::default(); header.ph_entry_count as usize];
  194. let nread = file.read(
  195. &mut ByteBuffer::from(phents.as_mut_slice()),
  196. header.ph_offset as usize,
  197. )?;
  198. if nread != header.ph_entry_count as usize * size_of::<Elf32PhEntry>() {
  199. return Err(ENOEXEC);
  200. }
  201. let mut shents = vec![Elf32ShEntry::default(); header.sh_entry_count as usize];
  202. let nread = file.read(
  203. &mut ByteBuffer::from(shents.as_mut_slice()),
  204. header.sh_offset as usize,
  205. )?;
  206. if nread != header.sh_entry_count as usize * size_of::<Elf32ShEntry>() {
  207. return Err(ENOEXEC);
  208. }
  209. Ok(Self {
  210. entry: header.entry,
  211. file,
  212. phents,
  213. shents,
  214. })
  215. }
  216. /// Load the ELF file into memory. Return the entry point address and the memory list containing the program data.
  217. ///
  218. /// We clear the user space and load the program headers into memory.
  219. /// Can't make a way back if failed from now on.
  220. ///
  221. /// # Return
  222. /// `(entry_ip, sp, mm_list)`
  223. pub fn load(self, args: Vec<CString>, envs: Vec<CString>) -> KResult<(VAddr, VAddr, MMList)> {
  224. let mm_list = MMList::new();
  225. let mut data_segment_end = VAddr::NULL;
  226. for phent in self
  227. .phents
  228. .into_iter()
  229. .filter(|ent| ent.ph_type == Elf32PhType::Load)
  230. {
  231. let vaddr_start = VAddr::from(phent.vaddr as usize);
  232. let vmem_vaddr_end = vaddr_start + phent.mem_size as usize;
  233. let load_vaddr_end = vaddr_start + phent.file_size as usize;
  234. let vaddr = vaddr_start.floor();
  235. let vmem_len = vmem_vaddr_end.ceil() - vaddr;
  236. let file_len = load_vaddr_end.ceil() - vaddr;
  237. let file_offset = phent.offset as usize & !0xfff;
  238. let permission = Permission {
  239. write: phent.flags.contains(Elf32PhFlags::Write),
  240. execute: phent.flags.contains(Elf32PhFlags::Exec),
  241. };
  242. if file_len != 0 {
  243. let real_file_length = load_vaddr_end - vaddr;
  244. mm_list.mmap_fixed(
  245. vaddr,
  246. file_len,
  247. Mapping::File(FileMapping::new(
  248. self.file.clone(),
  249. file_offset,
  250. real_file_length,
  251. )),
  252. permission,
  253. )?;
  254. }
  255. if vmem_len > file_len {
  256. mm_list.mmap_fixed(
  257. vaddr + file_len,
  258. vmem_len - file_len,
  259. Mapping::Anonymous,
  260. permission,
  261. )?;
  262. }
  263. if vaddr + vmem_len > data_segment_end {
  264. data_segment_end = vaddr + vmem_len;
  265. }
  266. }
  267. mm_list.register_break(data_segment_end + 0x10000);
  268. // Map stack area
  269. mm_list.mmap_fixed(
  270. VAddr::from(0xc0000000 - 0x800000), // Stack bottom is at 0xc0000000
  271. 0x800000, // 8MB stack size
  272. Mapping::Anonymous,
  273. Permission {
  274. write: true,
  275. execute: false,
  276. },
  277. )?;
  278. let mut sp = VAddr::from(0xc0000000); // Current stack top
  279. let arg_addrs = push_strings(&mm_list, &mut sp, args)?;
  280. let env_addrs = push_strings(&mm_list, &mut sp, envs)?;
  281. let mut longs = vec![];
  282. longs.push(arg_addrs.len() as u32); // argc
  283. longs.extend(arg_addrs.into_iter()); // args
  284. longs.push(0); // null
  285. longs.extend(env_addrs.into_iter()); // envs
  286. longs.push(0); // null
  287. longs.push(0); // AT_NULL
  288. longs.push(0); // AT_NULL
  289. sp = sp - longs.len() * size_of::<u32>();
  290. sp = sp.floor_to(16);
  291. mm_list.access_mut(sp, longs.len() * size_of::<u32>(), |offset, data| {
  292. data.copy_from_slice(unsafe {
  293. core::slice::from_raw_parts(
  294. longs.as_ptr().byte_add(offset) as *const u8,
  295. data.len(),
  296. )
  297. })
  298. })?;
  299. Ok((VAddr::from(self.entry as usize), sp, mm_list))
  300. }
  301. }
  302. fn push_strings(mm_list: &MMList, sp: &mut VAddr, strings: Vec<CString>) -> KResult<Vec<u32>> {
  303. let mut addrs = vec![];
  304. for string in strings {
  305. let len = string.as_bytes_with_nul().len();
  306. *sp = *sp - len;
  307. mm_list.access_mut(*sp, len, |offset, data| {
  308. data.copy_from_slice(&string.as_bytes_with_nul()[offset..offset + data.len()])
  309. })?;
  310. addrs.push(sp.addr() as u32);
  311. }
  312. Ok(addrs)
  313. }