elf.rs 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. use alloc::{ffi::CString, sync::Arc};
  2. use bitflags::bitflags;
  3. use crate::{
  4. io::{ByteBuffer, UninitBuffer},
  5. kernel::{
  6. constants::ENOEXEC,
  7. mem::{FileMapping, MMList, Mapping, Permission, VAddr},
  8. vfs::dentry::Dentry,
  9. },
  10. prelude::*,
  11. };
  12. #[repr(u8)]
  13. #[allow(dead_code)]
  14. #[derive(Clone, Copy, PartialEq, Eq)]
  15. pub enum ElfFormat {
  16. Elf32 = 1,
  17. Elf64 = 2,
  18. }
  19. #[repr(u8)]
  20. #[allow(dead_code)]
  21. #[derive(Clone, Copy, PartialEq, Eq)]
  22. pub enum ElfEndian {
  23. Little = 1,
  24. Big = 2,
  25. }
  26. #[repr(u8)]
  27. #[allow(dead_code)]
  28. #[derive(Clone, Copy, PartialEq, Eq)]
  29. pub enum ElfABI {
  30. // SystemV = 0,
  31. Linux = 3,
  32. }
  33. #[repr(u16)]
  34. #[allow(dead_code)]
  35. #[derive(Clone, Copy, PartialEq, Eq)]
  36. pub enum ElfType {
  37. Relocatable = 1,
  38. Executable = 2,
  39. Dynamic = 3,
  40. Core = 4,
  41. }
  42. #[repr(u16)]
  43. #[allow(dead_code)]
  44. #[derive(Clone, Copy, PartialEq, Eq)]
  45. pub enum ElfArch {
  46. X86 = 0x03,
  47. Arm = 0x28,
  48. IA64 = 0x32,
  49. X86_64 = 0x3e,
  50. AArch64 = 0xb7,
  51. RiscV = 0xf3,
  52. }
  53. bitflags! {
  54. #[derive(Default, Clone, Copy)]
  55. pub struct Elf32PhFlags: u32 {
  56. const Exec = 1;
  57. const Write = 2;
  58. const Read = 4;
  59. }
  60. #[derive(Default, Clone, Copy)]
  61. pub struct Elf32ShFlags: u32 {
  62. const Write = 1;
  63. const Alloc = 2;
  64. const Exec = 4;
  65. const MaskProc = 0xf0000000;
  66. }
  67. }
  68. #[allow(dead_code)]
  69. #[derive(Default, Clone, Copy, PartialEq, Eq)]
  70. pub enum Elf32PhType {
  71. #[default]
  72. Null = 0,
  73. Load = 1,
  74. Dynamic = 2,
  75. Interp = 3,
  76. Note = 4,
  77. Shlib = 5,
  78. Phdr = 6,
  79. Tls = 7,
  80. Loos = 0x60000000,
  81. Hios = 0x6fffffff,
  82. Loproc = 0x70000000,
  83. Hiproc = 0x7fffffff,
  84. }
  85. #[allow(dead_code)]
  86. #[derive(Default, Clone, Copy, PartialEq, Eq)]
  87. pub enum Elf32ShType {
  88. #[default]
  89. Null = 0,
  90. ProgBits = 1,
  91. SymTab = 2,
  92. StrTab = 3,
  93. Rela = 4,
  94. Hash = 5,
  95. Dynamic = 6,
  96. Note = 7,
  97. NoBits = 8,
  98. Rel = 9,
  99. Shlib = 10,
  100. DynSym = 11,
  101. InitArray = 14,
  102. FiniArray = 15,
  103. PreInitArray = 16,
  104. Group = 17,
  105. SymTabShndx = 18,
  106. Loos = 0x60000000,
  107. Hios = 0x6fffffff,
  108. Loproc = 0x70000000,
  109. Hiproc = 0x7fffffff,
  110. }
  111. #[repr(C, packed)]
  112. #[derive(Clone, Copy)]
  113. pub struct Elf32Header {
  114. /// ELF magic number: 0x7f, "ELF"
  115. pub magic: [u8; 4],
  116. pub format: ElfFormat,
  117. pub endian: ElfEndian,
  118. /// ELF version, should be 1
  119. pub version: u8,
  120. pub abi: ElfABI,
  121. pub abi_version: u8,
  122. padding: [u8; 7],
  123. pub elf_type: ElfType,
  124. pub arch: ElfArch,
  125. /// ELF version, should be 1
  126. pub version2: u32,
  127. pub entry: u32,
  128. pub ph_offset: u32,
  129. pub sh_offset: u32,
  130. pub flags: u32,
  131. pub eh_size: u16,
  132. pub ph_entry_size: u16,
  133. pub ph_entry_count: u16,
  134. pub sh_entry_size: u16,
  135. pub sh_entry_count: u16,
  136. pub sh_str_index: u16,
  137. }
  138. #[repr(C)]
  139. #[derive(Default, Clone, Copy)]
  140. pub struct Elf32PhEntry {
  141. pub ph_type: Elf32PhType,
  142. pub offset: u32,
  143. pub vaddr: u32,
  144. pub paddr: u32,
  145. pub file_size: u32,
  146. pub mem_size: u32,
  147. pub flags: Elf32PhFlags,
  148. /// `0` and `1` for no alignment, otherwise power of `2`
  149. pub align: u32,
  150. }
  151. #[repr(C)]
  152. #[derive(Default, Clone, Copy)]
  153. pub struct Elf32ShEntry {
  154. pub name_offset: u32,
  155. pub sh_type: Elf32ShType,
  156. pub flags: Elf32ShFlags,
  157. pub addr: u32,
  158. pub offset: u32,
  159. pub size: u32,
  160. pub link: u32,
  161. pub info: u32,
  162. pub addr_align: u32,
  163. pub entry_size: u32,
  164. }
  165. #[allow(dead_code)]
  166. pub struct ParsedElf32 {
  167. entry: u32,
  168. file: Arc<Dentry>,
  169. phents: Vec<Elf32PhEntry>,
  170. shents: Vec<Elf32ShEntry>,
  171. }
  172. const ELF_MAGIC: [u8; 4] = *b"\x7fELF";
  173. impl Elf32Header {
  174. fn check_valid(&self) -> bool {
  175. self.magic == ELF_MAGIC
  176. && self.version == 1
  177. && self.version2 == 1
  178. && self.eh_size as usize == size_of::<Elf32Header>()
  179. && self.ph_entry_size as usize == size_of::<Elf32PhEntry>()
  180. && self.sh_entry_size as usize == size_of::<Elf32ShEntry>()
  181. }
  182. }
  183. impl ParsedElf32 {
  184. pub fn parse(file: Arc<Dentry>) -> KResult<Self> {
  185. let mut header = UninitBuffer::<Elf32Header>::new();
  186. file.read(&mut header, 0)?;
  187. let header = header.assume_init().map_err(|_| ENOEXEC)?;
  188. if !header.check_valid() {
  189. return Err(ENOEXEC);
  190. }
  191. // TODO: Use `UninitBuffer` for `phents` and `shents`.
  192. let mut phents = vec![Elf32PhEntry::default(); header.ph_entry_count as usize];
  193. let nread = file.read(
  194. &mut ByteBuffer::from(phents.as_mut_slice()),
  195. header.ph_offset as usize,
  196. )?;
  197. if nread != header.ph_entry_count as usize * size_of::<Elf32PhEntry>() {
  198. return Err(ENOEXEC);
  199. }
  200. let mut shents = vec![Elf32ShEntry::default(); header.sh_entry_count as usize];
  201. let nread = file.read(
  202. &mut ByteBuffer::from(shents.as_mut_slice()),
  203. header.sh_offset as usize,
  204. )?;
  205. if nread != header.sh_entry_count as usize * size_of::<Elf32ShEntry>() {
  206. return Err(ENOEXEC);
  207. }
  208. Ok(Self {
  209. entry: header.entry,
  210. file,
  211. phents,
  212. shents,
  213. })
  214. }
  215. /// Load the ELF file into memory. Return the entry point address and the memory list containing the program data.
  216. ///
  217. /// We clear the user space and load the program headers into memory.
  218. /// Can't make a way back if failed from now on.
  219. ///
  220. /// # Return
  221. /// `(entry_ip, sp, mm_list)`
  222. pub fn load(self, args: Vec<CString>, envs: Vec<CString>) -> KResult<(VAddr, VAddr, MMList)> {
  223. let mm_list = MMList::new();
  224. let mut data_segment_end = VAddr(0);
  225. for phent in self
  226. .phents
  227. .into_iter()
  228. .filter(|ent| ent.ph_type == Elf32PhType::Load)
  229. {
  230. let vaddr_start = VAddr(phent.vaddr as usize);
  231. let vmem_vaddr_end = vaddr_start + phent.mem_size as usize;
  232. let load_vaddr_end = vaddr_start + phent.file_size as usize;
  233. let vaddr = vaddr_start.floor();
  234. let vmem_len = vmem_vaddr_end.ceil() - vaddr;
  235. let file_len = load_vaddr_end.ceil() - vaddr;
  236. let file_offset = phent.offset as usize & !0xfff;
  237. let permission = Permission {
  238. write: phent.flags.contains(Elf32PhFlags::Write),
  239. execute: phent.flags.contains(Elf32PhFlags::Exec),
  240. };
  241. if file_len != 0 {
  242. let real_file_length = load_vaddr_end - vaddr;
  243. mm_list.mmap_fixed(
  244. vaddr,
  245. file_len,
  246. Mapping::File(FileMapping::new(
  247. self.file.clone(),
  248. file_offset,
  249. real_file_length,
  250. )),
  251. permission,
  252. )?;
  253. }
  254. if vmem_len > file_len {
  255. mm_list.mmap_fixed(
  256. vaddr + file_len,
  257. vmem_len - file_len,
  258. Mapping::Anonymous,
  259. permission,
  260. )?;
  261. }
  262. if vaddr + vmem_len > data_segment_end {
  263. data_segment_end = vaddr + vmem_len;
  264. }
  265. }
  266. mm_list.register_break(data_segment_end + 0x10000);
  267. // Map stack area
  268. mm_list.mmap_fixed(
  269. VAddr(0xc0000000 - 0x800000), // Stack bottom is at 0xc0000000
  270. 0x800000, // 8MB stack size
  271. Mapping::Anonymous,
  272. Permission {
  273. write: true,
  274. execute: false,
  275. },
  276. )?;
  277. let mut sp = VAddr::from(0xc0000000); // Current stack top
  278. let arg_addrs = push_strings(&mm_list, &mut sp, args)?;
  279. let env_addrs = push_strings(&mm_list, &mut sp, envs)?;
  280. let mut longs = vec![];
  281. longs.push(arg_addrs.len() as u32); // argc
  282. longs.extend(arg_addrs.into_iter()); // args
  283. longs.push(0); // null
  284. longs.extend(env_addrs.into_iter()); // envs
  285. longs.push(0); // null
  286. longs.push(0); // AT_NULL
  287. longs.push(0); // AT_NULL
  288. sp = sp - longs.len() * size_of::<u32>();
  289. sp = VAddr::from(usize::from(sp) & !0xf); // Align to 16 bytes
  290. mm_list.access_mut(sp, longs.len() * size_of::<u32>(), |offset, data| {
  291. data.copy_from_slice(unsafe {
  292. core::slice::from_raw_parts(
  293. longs.as_ptr().byte_add(offset) as *const u8,
  294. data.len(),
  295. )
  296. })
  297. })?;
  298. Ok((VAddr(self.entry as usize), sp, mm_list))
  299. }
  300. }
  301. fn push_strings(mm_list: &MMList, sp: &mut VAddr, strings: Vec<CString>) -> KResult<Vec<u32>> {
  302. let mut addrs = vec![];
  303. for string in strings {
  304. let len = string.as_bytes_with_nul().len();
  305. *sp = *sp - len;
  306. mm_list.access_mut(*sp, len, |offset, data| {
  307. data.copy_from_slice(&string.as_bytes_with_nul()[offset..offset + data.len()])
  308. })?;
  309. addrs.push(usize::from(*sp) as u32);
  310. }
  311. Ok(addrs)
  312. }