|
|
@@ -21,109 +21,203 @@ use eonix_mm::{
|
|
|
};
|
|
|
|
|
|
use xmas_elf::{
|
|
|
- header::{self, Class, HeaderPt1, Machine_, Type_},
|
|
|
+ header::{self, Class, HeaderPt1, Machine_},
|
|
|
program::{self, ProgramHeader32, ProgramHeader64},
|
|
|
- P32, P64,
|
|
|
};
|
|
|
|
|
|
const INIT_STACK_SIZE: usize = 0x80_0000;
|
|
|
|
|
|
#[derive(Debug, Clone, Copy)]
|
|
|
#[repr(C)]
|
|
|
-pub struct HeaderPt2<P> {
|
|
|
- pub type_: Type_,
|
|
|
- pub machine: Machine_,
|
|
|
- pub version: u32,
|
|
|
- pub entry_point: P,
|
|
|
- pub ph_offset: P,
|
|
|
- pub sh_offset: P,
|
|
|
- pub flags: u32,
|
|
|
- pub header_size: u16,
|
|
|
- pub ph_entry_size: u16,
|
|
|
- pub ph_count: u16,
|
|
|
- pub sh_entry_size: u16,
|
|
|
- pub sh_count: u16,
|
|
|
- pub sh_str_index: u16,
|
|
|
+struct HeaderPt2<P> {
|
|
|
+ type_: header::Type_,
|
|
|
+ machine: Machine_,
|
|
|
+ version: u32,
|
|
|
+ entry_point: P,
|
|
|
+ ph_offset: P,
|
|
|
+ sh_offset: P,
|
|
|
+ flags: u32,
|
|
|
+ header_size: u16,
|
|
|
+ ph_entry_size: u16,
|
|
|
+ ph_count: u16,
|
|
|
+ sh_entry_size: u16,
|
|
|
+ sh_count: u16,
|
|
|
+ sh_str_index: u16,
|
|
|
}
|
|
|
|
|
|
#[derive(Debug, Clone, Copy)]
|
|
|
#[repr(C)]
|
|
|
-pub struct ElfHeader<P> {
|
|
|
- pub pt1: HeaderPt1,
|
|
|
- pub pt2: HeaderPt2<P>,
|
|
|
+struct ElfHeader<P> {
|
|
|
+ pt1: HeaderPt1,
|
|
|
+ pt2: HeaderPt2<P>,
|
|
|
}
|
|
|
|
|
|
-pub struct LdsoLoadInfo {
|
|
|
- pub base: VAddr,
|
|
|
- pub entry_ip: VAddr,
|
|
|
+trait ProgramHeader {
|
|
|
+ fn offset(&self) -> usize;
|
|
|
+ fn file_size(&self) -> usize;
|
|
|
+ fn virtual_addr(&self) -> usize;
|
|
|
+ fn mem_size(&self) -> usize;
|
|
|
+ fn flags(&self) -> program::Flags;
|
|
|
+ fn type_(&self) -> KResult<program::Type>;
|
|
|
}
|
|
|
|
|
|
-pub struct Elf32 {
|
|
|
- pub file: Arc<Dentry>,
|
|
|
- pub elf_header: ElfHeader<P32>,
|
|
|
- pub program_headers: Vec<ProgramHeader32>,
|
|
|
+macro_rules! impl_program_header {
|
|
|
+ ($ProgramHeaderN:ident) => {
|
|
|
+ impl ProgramHeader for $ProgramHeaderN {
|
|
|
+ fn offset(&self) -> usize {
|
|
|
+ self.offset as usize
|
|
|
+ }
|
|
|
+
|
|
|
+ fn file_size(&self) -> usize {
|
|
|
+ self.file_size as usize
|
|
|
+ }
|
|
|
+
|
|
|
+ fn virtual_addr(&self) -> usize {
|
|
|
+ self.virtual_addr as usize
|
|
|
+ }
|
|
|
+
|
|
|
+ fn mem_size(&self) -> usize {
|
|
|
+ self.mem_size as usize
|
|
|
+ }
|
|
|
+
|
|
|
+ fn flags(&self) -> program::Flags {
|
|
|
+ self.flags
|
|
|
+ }
|
|
|
+
|
|
|
+ fn type_(&self) -> KResult<program::Type> {
|
|
|
+ self.get_type().map_err(|_| ENOEXEC)
|
|
|
+ }
|
|
|
+ }
|
|
|
+ };
|
|
|
}
|
|
|
|
|
|
-impl Elf32 {
|
|
|
- const DYN_BASE_ADDR: usize = 0x4000_0000;
|
|
|
- const LDSO_BASE_ADDR: usize = 0xf000_0000;
|
|
|
- const STACK_BASE_ADDR: usize = 0xffff_0000;
|
|
|
+impl_program_header!(ProgramHeader32);
|
|
|
+impl_program_header!(ProgramHeader64);
|
|
|
|
|
|
- pub fn parse(elf_file: Arc<Dentry>) -> KResult<Self> {
|
|
|
- let mut elf_header = UninitBuffer::<ElfHeader<P32>>::new();
|
|
|
- elf_file.read(&mut elf_header, 0)?;
|
|
|
+struct LdsoLoadInfo {
|
|
|
+ base: VAddr,
|
|
|
+ entry_ip: VAddr,
|
|
|
+}
|
|
|
|
|
|
- let elf_header = elf_header.assume_init().map_err(|_| ENOEXEC)?;
|
|
|
+trait ElfAddr {
|
|
|
+ type Integer;
|
|
|
|
|
|
- let ph_offset = elf_header.pt2.ph_offset;
|
|
|
- let ph_count = elf_header.pt2.ph_count;
|
|
|
+ fn from_usize(val: usize) -> Self;
|
|
|
+ fn into_usize(&self) -> usize;
|
|
|
+}
|
|
|
|
|
|
- let mut program_headers = vec![ProgramHeader32::default(); ph_count as usize];
|
|
|
- elf_file.read(
|
|
|
- &mut ByteBuffer::from(program_headers.as_mut_slice()),
|
|
|
- ph_offset as usize,
|
|
|
- )?;
|
|
|
+macro_rules! impl_elf_addr {
|
|
|
+ ($Type:ident) => {
|
|
|
+ impl ElfAddr for $Type {
|
|
|
+ type Integer = $Type;
|
|
|
|
|
|
- Ok(Self {
|
|
|
- file: elf_file,
|
|
|
- elf_header,
|
|
|
- program_headers,
|
|
|
- })
|
|
|
- }
|
|
|
+ fn from_usize(val: usize) -> Self {
|
|
|
+ val as Self::Integer
|
|
|
+ }
|
|
|
+
|
|
|
+ fn into_usize(&self) -> usize {
|
|
|
+ *self as usize
|
|
|
+ }
|
|
|
+ }
|
|
|
+ };
|
|
|
+}
|
|
|
+
|
|
|
+impl_elf_addr!(u32);
|
|
|
+impl_elf_addr!(u64);
|
|
|
+
|
|
|
+trait ElfArch {
|
|
|
+ type Ea: ElfAddr + Clone + Copy;
|
|
|
+ type Ph: ProgramHeader + Clone + Copy + Default;
|
|
|
+
|
|
|
+ const DYN_BASE_ADDR: usize;
|
|
|
+ const LDSO_BASE_ADDR: usize;
|
|
|
+ const STACK_BASE_ADDR: usize;
|
|
|
+}
|
|
|
+
|
|
|
+struct ElfArch32;
|
|
|
+
|
|
|
+struct ElfArch64;
|
|
|
+
|
|
|
+struct Elf<E: ElfArch> {
|
|
|
+ file: Arc<Dentry>,
|
|
|
+ elf_header: ElfHeader<E::Ea>,
|
|
|
+ program_headers: Vec<E::Ph>,
|
|
|
+}
|
|
|
+
|
|
|
+impl ElfArch for ElfArch32 {
|
|
|
+ type Ea = u32;
|
|
|
+ type Ph = ProgramHeader32;
|
|
|
|
|
|
+ const DYN_BASE_ADDR: usize = 0x4000_0000;
|
|
|
+ const LDSO_BASE_ADDR: usize = 0xf000_0000;
|
|
|
+ const STACK_BASE_ADDR: usize = 0xffff_0000;
|
|
|
+}
|
|
|
+
|
|
|
+impl ElfArch for ElfArch64 {
|
|
|
+ type Ea = u64;
|
|
|
+ type Ph = ProgramHeader64;
|
|
|
+
|
|
|
+ const DYN_BASE_ADDR: usize = 0xaaaa_0000_0000;
|
|
|
+ const LDSO_BASE_ADDR: usize = 0xf000_0000_0000;
|
|
|
+ const STACK_BASE_ADDR: usize = 0xffff_ffff_0000;
|
|
|
+}
|
|
|
+
|
|
|
+impl<E: ElfArch> Elf<E> {
|
|
|
fn is_shared_object(&self) -> bool {
|
|
|
self.elf_header.pt2.type_.as_type() == header::Type::SharedObject
|
|
|
}
|
|
|
|
|
|
- fn entry_point(&self) -> u32 {
|
|
|
- self.elf_header.pt2.entry_point
|
|
|
+ fn entry_point(&self) -> usize {
|
|
|
+ self.elf_header.pt2.entry_point.into_usize()
|
|
|
}
|
|
|
|
|
|
- fn ph_count(&self) -> u16 {
|
|
|
- self.elf_header.pt2.ph_count
|
|
|
+ fn ph_count(&self) -> usize {
|
|
|
+ self.elf_header.pt2.ph_count as usize
|
|
|
}
|
|
|
|
|
|
- fn ph_offset(&self) -> u32 {
|
|
|
- self.elf_header.pt2.ph_offset
|
|
|
+ fn ph_offset(&self) -> usize {
|
|
|
+ self.elf_header.pt2.ph_offset.into_usize()
|
|
|
}
|
|
|
|
|
|
- fn ph_entry_size(&self) -> u16 {
|
|
|
- self.elf_header.pt2.ph_entry_size
|
|
|
+ fn ph_entry_size(&self) -> usize {
|
|
|
+ self.elf_header.pt2.ph_entry_size as usize
|
|
|
}
|
|
|
|
|
|
- fn ph_addr(&self) -> KResult<u32> {
|
|
|
+ fn ph_addr(&self) -> KResult<usize> {
|
|
|
let ph_offset = self.ph_offset();
|
|
|
for program_header in &self.program_headers {
|
|
|
- if program_header.offset <= ph_offset
|
|
|
- && ph_offset < program_header.offset + program_header.file_size
|
|
|
+ if program_header.offset() <= ph_offset
|
|
|
+ && ph_offset < program_header.offset() + program_header.file_size()
|
|
|
{
|
|
|
- return Ok(ph_offset - program_header.offset + program_header.virtual_addr);
|
|
|
+ return Ok(ph_offset - program_header.offset() + program_header.virtual_addr());
|
|
|
}
|
|
|
}
|
|
|
Err(ENOEXEC)
|
|
|
}
|
|
|
|
|
|
- pub fn load(&self, args: Vec<CString>, envs: Vec<CString>) -> KResult<LoadInfo> {
|
|
|
+ fn parse(elf_file: Arc<Dentry>) -> KResult<Self> {
|
|
|
+ let mut elf_header = UninitBuffer::<ElfHeader<E::Ea>>::new();
|
|
|
+ elf_file.read(&mut elf_header, 0)?;
|
|
|
+
|
|
|
+ let elf_header = elf_header.assume_init().map_err(|_| ENOEXEC)?;
|
|
|
+
|
|
|
+ let ph_offset = elf_header.pt2.ph_offset;
|
|
|
+ let ph_count = elf_header.pt2.ph_count;
|
|
|
+
|
|
|
+ let mut program_headers = vec![E::Ph::default(); ph_count as usize];
|
|
|
+ elf_file.read(
|
|
|
+ &mut ByteBuffer::from(program_headers.as_mut_slice()),
|
|
|
+ ph_offset.into_usize(),
|
|
|
+ )?;
|
|
|
+
|
|
|
+ Ok(Self {
|
|
|
+ file: elf_file,
|
|
|
+ elf_header,
|
|
|
+ program_headers,
|
|
|
+ })
|
|
|
+ }
|
|
|
+
|
|
|
+ fn load(&self, args: Vec<CString>, envs: Vec<CString>) -> KResult<LoadInfo> {
|
|
|
let mm_list = MMList::new();
|
|
|
|
|
|
// Load Segments
|
|
|
@@ -135,8 +229,7 @@ impl Elf32 {
|
|
|
// Heap
|
|
|
mm_list.register_break(data_segment_end + 0x10000);
|
|
|
|
|
|
- let aux_vec = Elf32::init_aux_vec(
|
|
|
- self,
|
|
|
+ let aux_vec = self.init_aux_vec(
|
|
|
elf_base,
|
|
|
ldso_load_info
|
|
|
.as_ref()
|
|
|
@@ -144,17 +237,17 @@ impl Elf32 {
|
|
|
)?;
|
|
|
|
|
|
// Map stack
|
|
|
- let sp = Elf32::create_and_init_stack(&mm_list, args, envs, aux_vec)?;
|
|
|
+ let sp = self.create_and_init_stack(&mm_list, args, envs, aux_vec)?;
|
|
|
|
|
|
let entry_ip = if let Some(ldso_load_info) = ldso_load_info {
|
|
|
// Normal shared object(DYN)
|
|
|
ldso_load_info.entry_ip.into()
|
|
|
} else if self.is_shared_object() {
|
|
|
// ldso itself
|
|
|
- elf_base + self.entry_point() as usize
|
|
|
+ elf_base + self.entry_point()
|
|
|
} else {
|
|
|
// statically linked executable
|
|
|
- (self.entry_point() as usize).into()
|
|
|
+ self.entry_point().into()
|
|
|
};
|
|
|
|
|
|
Ok(LoadInfo {
|
|
|
@@ -164,46 +257,59 @@ impl Elf32 {
|
|
|
})
|
|
|
}
|
|
|
|
|
|
- fn init_aux_vec(
|
|
|
- elf: &Elf32,
|
|
|
- elf_base: VAddr,
|
|
|
- ldso_base: Option<VAddr>,
|
|
|
- ) -> KResult<AuxVec<u32>> {
|
|
|
- let mut aux_vec: AuxVec<u32> = AuxVec::new();
|
|
|
- let ph_addr = if elf.is_shared_object() {
|
|
|
- elf_base.addr() as u32 + elf.ph_addr()?
|
|
|
+ fn create_and_init_stack(
|
|
|
+ &self,
|
|
|
+ mm_list: &MMList,
|
|
|
+ args: Vec<CString>,
|
|
|
+ envs: Vec<CString>,
|
|
|
+ aux_vec: AuxVec<E::Ea>,
|
|
|
+ ) -> KResult<VAddr> {
|
|
|
+ mm_list.mmap_fixed(
|
|
|
+ VAddr::from(E::STACK_BASE_ADDR - INIT_STACK_SIZE),
|
|
|
+ INIT_STACK_SIZE,
|
|
|
+ Mapping::Anonymous,
|
|
|
+ Permission {
|
|
|
+ read: true,
|
|
|
+ write: true,
|
|
|
+ execute: false,
|
|
|
+ },
|
|
|
+ )?;
|
|
|
+
|
|
|
+ StackInitializer::new(&mm_list, E::STACK_BASE_ADDR, args, envs, aux_vec).init()
|
|
|
+ }
|
|
|
+
|
|
|
+ fn init_aux_vec(&self, elf_base: VAddr, ldso_base: Option<VAddr>) -> KResult<AuxVec<E::Ea>> {
|
|
|
+ let mut aux_vec: AuxVec<E::Ea> = AuxVec::new();
|
|
|
+ let ph_addr = if self.is_shared_object() {
|
|
|
+ elf_base.addr() + self.ph_addr()?
|
|
|
} else {
|
|
|
- elf.ph_addr()?
|
|
|
+ self.ph_addr()?
|
|
|
};
|
|
|
- aux_vec.set(AuxKey::AT_PAGESZ, PAGE_SIZE as u32)?;
|
|
|
- aux_vec.set(AuxKey::AT_PHDR, ph_addr)?;
|
|
|
- aux_vec.set(AuxKey::AT_PHNUM, elf.ph_count() as u32)?;
|
|
|
- aux_vec.set(AuxKey::AT_PHENT, elf.ph_entry_size() as u32)?;
|
|
|
- let elf_entry = if elf.is_shared_object() {
|
|
|
- elf_base.addr() as u32 + elf.entry_point()
|
|
|
+
|
|
|
+ aux_vec.set(AuxKey::AT_PAGESZ, E::Ea::from_usize(PAGE_SIZE))?;
|
|
|
+ aux_vec.set(AuxKey::AT_PHDR, E::Ea::from_usize(ph_addr))?;
|
|
|
+ aux_vec.set(AuxKey::AT_PHNUM, E::Ea::from_usize(self.ph_count()))?;
|
|
|
+ aux_vec.set(AuxKey::AT_PHENT, E::Ea::from_usize(self.ph_entry_size()))?;
|
|
|
+ let elf_entry = if self.is_shared_object() {
|
|
|
+ elf_base.addr() + self.entry_point()
|
|
|
} else {
|
|
|
- elf.entry_point()
|
|
|
+ self.entry_point()
|
|
|
};
|
|
|
- aux_vec.set(AuxKey::AT_ENTRY, elf_entry)?;
|
|
|
+ aux_vec.set(AuxKey::AT_ENTRY, E::Ea::from_usize(elf_entry))?;
|
|
|
|
|
|
if let Some(ldso_base) = ldso_base {
|
|
|
- aux_vec.set(AuxKey::AT_BASE, ldso_base.addr() as u32)?;
|
|
|
+ aux_vec.set(AuxKey::AT_BASE, E::Ea::from_usize(ldso_base.addr()))?;
|
|
|
}
|
|
|
Ok(aux_vec)
|
|
|
}
|
|
|
|
|
|
- pub fn load_segments(&self, mm_list: &MMList) -> KResult<(VAddr, VAddr)> {
|
|
|
- let base: VAddr = if self.is_shared_object() {
|
|
|
- Elf32::DYN_BASE_ADDR
|
|
|
- } else {
|
|
|
- 0
|
|
|
- }
|
|
|
- .into();
|
|
|
+ fn load_segments(&self, mm_list: &MMList) -> KResult<(VAddr, VAddr)> {
|
|
|
+ let base: VAddr = if self.is_shared_object() { E::DYN_BASE_ADDR } else { 0 }.into();
|
|
|
|
|
|
let mut segments_end = VAddr::NULL;
|
|
|
|
|
|
for program_header in &self.program_headers {
|
|
|
- let type_ = program_header.get_type().map_err(|_| ENOEXEC)?;
|
|
|
+ let type_ = program_header.type_().map_err(|_| ENOEXEC)?;
|
|
|
|
|
|
if type_ == program::Type::Load {
|
|
|
let segment_end = self.load_segment(program_header, mm_list, base)?;
|
|
|
@@ -217,25 +323,25 @@ impl Elf32 {
|
|
|
Ok((base, segments_end))
|
|
|
}
|
|
|
|
|
|
- pub fn load_segment(
|
|
|
+ fn load_segment(
|
|
|
&self,
|
|
|
- program_header: &ProgramHeader32,
|
|
|
+ program_header: &E::Ph,
|
|
|
mm_list: &MMList,
|
|
|
base_addr: VAddr,
|
|
|
) -> KResult<VAddr> {
|
|
|
- let virtual_addr = base_addr + program_header.virtual_addr as usize;
|
|
|
- let vmem_vaddr_end = virtual_addr + program_header.mem_size as usize;
|
|
|
- let load_vaddr_end = virtual_addr + program_header.file_size as usize;
|
|
|
+ let virtual_addr = base_addr + program_header.virtual_addr();
|
|
|
+ let vmem_vaddr_end = virtual_addr + program_header.mem_size();
|
|
|
+ let load_vaddr_end = virtual_addr + program_header.file_size();
|
|
|
|
|
|
let vmap_start = virtual_addr.floor();
|
|
|
let vmem_len = vmem_vaddr_end.ceil() - vmap_start;
|
|
|
let file_len = load_vaddr_end.ceil() - vmap_start;
|
|
|
- let file_offset = (program_header.offset as usize).align_down(PAGE_SIZE);
|
|
|
+ let file_offset = (program_header.offset()).align_down(PAGE_SIZE);
|
|
|
|
|
|
let permission = Permission {
|
|
|
- read: program_header.flags.is_read(),
|
|
|
- write: program_header.flags.is_write(),
|
|
|
- execute: program_header.flags.is_execute(),
|
|
|
+ read: program_header.flags().is_read(),
|
|
|
+ write: program_header.flags().is_write(),
|
|
|
+ execute: program_header.flags().is_execute(),
|
|
|
};
|
|
|
|
|
|
if file_len != 0 {
|
|
|
@@ -265,19 +371,19 @@ impl Elf32 {
|
|
|
Ok(vmap_start + vmem_len)
|
|
|
}
|
|
|
|
|
|
- pub fn load_ldso(&self, mm_list: &MMList) -> KResult<Option<LdsoLoadInfo>> {
|
|
|
+ fn load_ldso(&self, mm_list: &MMList) -> KResult<Option<LdsoLoadInfo>> {
|
|
|
let ldso_path = self.ldso_path()?;
|
|
|
|
|
|
if let Some(ldso_path) = ldso_path {
|
|
|
let fs_context = FsContext::global();
|
|
|
let ldso_file =
|
|
|
Dentry::open(fs_context, Path::new(ldso_path.as_bytes()).unwrap(), true).unwrap();
|
|
|
- let ldso_elf = Elf32::parse(ldso_file).unwrap();
|
|
|
+ let ldso_elf = Elf::<E>::parse(ldso_file).unwrap();
|
|
|
|
|
|
- let base = VAddr::from(Elf32::LDSO_BASE_ADDR);
|
|
|
+ let base = VAddr::from(E::LDSO_BASE_ADDR);
|
|
|
|
|
|
for program_header in &ldso_elf.program_headers {
|
|
|
- let type_ = program_header.get_type().map_err(|_| ENOEXEC)?;
|
|
|
+ let type_ = program_header.type_().map_err(|_| ENOEXEC)?;
|
|
|
|
|
|
if type_ == program::Type::Load {
|
|
|
ldso_elf.load_segment(program_header, mm_list, base)?;
|
|
|
@@ -295,11 +401,11 @@ impl Elf32 {
|
|
|
|
|
|
fn ldso_path(&self) -> KResult<Option<String>> {
|
|
|
for program_header in &self.program_headers {
|
|
|
- let type_ = program_header.get_type().map_err(|_| ENOEXEC)?;
|
|
|
+ let type_ = program_header.type_().map_err(|_| ENOEXEC)?;
|
|
|
|
|
|
if type_ == program::Type::Interp {
|
|
|
- let file_size = program_header.file_size as usize;
|
|
|
- let file_offset = program_header.offset as usize;
|
|
|
+ let file_size = program_header.file_size();
|
|
|
+ let file_offset = program_header.offset();
|
|
|
|
|
|
let mut ldso_vec = vec![0u8; file_size - 1]; // -1 due to '\0'
|
|
|
self.file
|
|
|
@@ -310,176 +416,185 @@ impl Elf32 {
|
|
|
}
|
|
|
Ok(None)
|
|
|
}
|
|
|
+}
|
|
|
|
|
|
- fn create_and_init_stack(
|
|
|
- mm_list: &MMList,
|
|
|
- args: Vec<CString>,
|
|
|
- envs: Vec<CString>,
|
|
|
- aux_vec: AuxVec<u32>,
|
|
|
- ) -> KResult<VAddr> {
|
|
|
- mm_list.mmap_fixed(
|
|
|
- VAddr::from(Elf32::STACK_BASE_ADDR - INIT_STACK_SIZE),
|
|
|
- INIT_STACK_SIZE,
|
|
|
- Mapping::Anonymous,
|
|
|
- Permission {
|
|
|
- read: true,
|
|
|
- write: true,
|
|
|
- execute: false,
|
|
|
- },
|
|
|
- )?;
|
|
|
+pub enum ELF {
|
|
|
+ Elf32(Elf<ElfArch32>),
|
|
|
+ Elf64(Elf<ElfArch64>),
|
|
|
+}
|
|
|
|
|
|
- let mut sp = VAddr::from(Elf32::STACK_BASE_ADDR);
|
|
|
+impl ELF {
|
|
|
+ pub fn parse(elf_file: Arc<Dentry>) -> KResult<Self> {
|
|
|
+ let mut header_pt1 = UninitBuffer::<HeaderPt1>::new();
|
|
|
+ elf_file.read(&mut header_pt1, 0)?;
|
|
|
|
|
|
- let env_pointers = Elf32::push_strings(&mm_list, &mut sp, envs)?;
|
|
|
- let arg_pointers = Elf32::push_strings(&mm_list, &mut sp, args)?;
|
|
|
+ let header_pt1 = header_pt1.assume_init().map_err(|_| ENOEXEC)?;
|
|
|
+ assert_eq!(header_pt1.magic, ELF_MAGIC);
|
|
|
|
|
|
- let argc = arg_pointers.len() as u32;
|
|
|
+ match header_pt1.class() {
|
|
|
+ Class::ThirtyTwo => Ok(ELF::Elf32(Elf::parse(elf_file)?)),
|
|
|
+ Class::SixtyFour => Ok(ELF::Elf64(Elf::parse(elf_file)?)),
|
|
|
+ _ => Err(ENOEXEC),
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ pub fn load(&self, args: Vec<CString>, envs: Vec<CString>) -> KResult<LoadInfo> {
|
|
|
+ match &self {
|
|
|
+ ELF::Elf32(elf32) => elf32.load(args, envs),
|
|
|
+ ELF::Elf64(elf64) => elf64.load(args, envs),
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
- Elf32::stack_alignment(&mut sp, &arg_pointers, &env_pointers, &aux_vec);
|
|
|
- Elf32::push_aux_vec(&mm_list, &mut sp, aux_vec)?;
|
|
|
- Elf32::push_pointers(&mm_list, &mut sp, env_pointers)?;
|
|
|
- Elf32::push_pointers(&mm_list, &mut sp, arg_pointers)?;
|
|
|
- Elf32::push_u32(&mm_list, &mut sp, argc)?;
|
|
|
+struct StackInitializer<'a, T> {
|
|
|
+ mm_list: &'a MMList,
|
|
|
+ sp: usize,
|
|
|
+ args: Vec<CString>,
|
|
|
+ envs: Vec<CString>,
|
|
|
+ aux_vec: AuxVec<T>,
|
|
|
+}
|
|
|
|
|
|
- assert_eq!(sp.floor_to(16), sp);
|
|
|
- Ok(sp)
|
|
|
+impl<'a, T: ElfAddr + Clone + Copy> StackInitializer<'a, T> {
|
|
|
+ fn new(
|
|
|
+ mm_list: &'a MMList,
|
|
|
+ sp: usize,
|
|
|
+ args: Vec<CString>,
|
|
|
+ envs: Vec<CString>,
|
|
|
+ aux_vec: AuxVec<T>,
|
|
|
+ ) -> Self {
|
|
|
+ Self {
|
|
|
+ mm_list,
|
|
|
+ sp,
|
|
|
+ args,
|
|
|
+ envs,
|
|
|
+ aux_vec,
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- fn stack_alignment(
|
|
|
- sp: &mut VAddr,
|
|
|
- arg_pointers: &Vec<u32>,
|
|
|
- env_pointers: &Vec<u32>,
|
|
|
- aux_vec: &AuxVec<u32>,
|
|
|
- ) {
|
|
|
- let aux_vec_size = (aux_vec.table().len() + 1) * (size_of::<u32>() * 2);
|
|
|
- let envp_pointers_size = (env_pointers.len() + 1) * size_of::<u32>();
|
|
|
- let argv_pointers_size = (arg_pointers.len() + 1) * size_of::<u32>();
|
|
|
- let argc_size = size_of::<u32>();
|
|
|
- let all_size = aux_vec_size + envp_pointers_size + argv_pointers_size + argc_size;
|
|
|
+ // return sp after stack init
|
|
|
+ fn init(mut self) -> KResult<VAddr> {
|
|
|
+ let env_pointers = self.push_envs()?;
|
|
|
+ let arg_pointers = self.push_args()?;
|
|
|
+
|
|
|
+ self.stack_alignment();
|
|
|
+ self.push_aux_vec()?;
|
|
|
+ self.push_pointers(env_pointers)?;
|
|
|
+ self.push_pointers(arg_pointers)?;
|
|
|
+ self.push_argc(T::from_usize(self.args.len()))?;
|
|
|
|
|
|
- let align_sp = (sp.addr() - all_size).align_down(16);
|
|
|
- *sp = VAddr::from(align_sp + all_size);
|
|
|
+ assert_eq!(self.sp.align_down(16), self.sp);
|
|
|
+ Ok(VAddr::from(self.sp))
|
|
|
}
|
|
|
|
|
|
- fn push_strings(mm_list: &MMList, sp: &mut VAddr, strings: Vec<CString>) -> KResult<Vec<u32>> {
|
|
|
- let mut addrs = Vec::with_capacity(strings.len());
|
|
|
- for string in strings.iter().rev() {
|
|
|
+ fn push_envs(&mut self) -> KResult<Vec<T>> {
|
|
|
+ let mut addrs = Vec::with_capacity(self.envs.len());
|
|
|
+ for string in self.envs.iter().rev() {
|
|
|
let len = string.as_bytes_with_nul().len();
|
|
|
- *sp = *sp - len;
|
|
|
- mm_list.access_mut(*sp, len, |offset, data| {
|
|
|
- data.copy_from_slice(&string.as_bytes_with_nul()[offset..offset + data.len()])
|
|
|
- })?;
|
|
|
- addrs.push(sp.addr() as u32);
|
|
|
+ self.sp -= len;
|
|
|
+ self.mm_list
|
|
|
+ .access_mut(VAddr::from(self.sp), len, |offset, data| {
|
|
|
+ data.copy_from_slice(&string.as_bytes_with_nul()[offset..offset + data.len()])
|
|
|
+ })?;
|
|
|
+ addrs.push(T::from_usize(self.sp));
|
|
|
}
|
|
|
addrs.reverse();
|
|
|
Ok(addrs)
|
|
|
}
|
|
|
|
|
|
- fn push_pointers(mm_list: &MMList, sp: &mut VAddr, mut pointers: Vec<u32>) -> KResult<()> {
|
|
|
- pointers.push(0);
|
|
|
- *sp = *sp - pointers.len() * size_of::<u32>();
|
|
|
+ fn push_args(&mut self) -> KResult<Vec<T>> {
|
|
|
+ let mut addrs = Vec::with_capacity(self.args.len());
|
|
|
+ for string in self.args.iter().rev() {
|
|
|
+ let len = string.as_bytes_with_nul().len();
|
|
|
+ self.sp -= len;
|
|
|
+ self.mm_list
|
|
|
+ .access_mut(VAddr::from(self.sp), len, |offset, data| {
|
|
|
+ data.copy_from_slice(&string.as_bytes_with_nul()[offset..offset + data.len()])
|
|
|
+ })?;
|
|
|
+ addrs.push(T::from_usize(self.sp));
|
|
|
+ }
|
|
|
+ addrs.reverse();
|
|
|
+ Ok(addrs)
|
|
|
+ }
|
|
|
|
|
|
- mm_list.access_mut(*sp, pointers.len() * size_of::<u32>(), |offset, data| {
|
|
|
- data.copy_from_slice(unsafe {
|
|
|
- core::slice::from_raw_parts(
|
|
|
- pointers.as_ptr().byte_add(offset) as *const u8,
|
|
|
- data.len(),
|
|
|
- )
|
|
|
- })
|
|
|
- })?;
|
|
|
+ fn stack_alignment(&mut self) {
|
|
|
+ let aux_vec_size = (self.aux_vec.table().len() + 1) * (size_of::<T>() * 2);
|
|
|
+ let envp_pointers_size = (self.envs.len() + 1) * size_of::<T>();
|
|
|
+ let argv_pointers_size = (self.args.len() + 1) * size_of::<T>();
|
|
|
+ let argc_size = size_of::<T>();
|
|
|
+ let all_size = aux_vec_size + envp_pointers_size + argv_pointers_size + argc_size;
|
|
|
+
|
|
|
+ let align_sp = (self.sp - all_size).align_down(16);
|
|
|
+ self.sp = align_sp + all_size;
|
|
|
+ }
|
|
|
+
|
|
|
+ fn push_pointers(&mut self, mut pointers: Vec<T>) -> KResult<()> {
|
|
|
+ pointers.push(T::from_usize(0));
|
|
|
+ self.sp -= pointers.len() * size_of::<T>();
|
|
|
+
|
|
|
+ self.mm_list.access_mut(
|
|
|
+ VAddr::from(self.sp),
|
|
|
+ pointers.len() * size_of::<T>(),
|
|
|
+ |offset, data| {
|
|
|
+ data.copy_from_slice(unsafe {
|
|
|
+ core::slice::from_raw_parts(
|
|
|
+ pointers.as_ptr().byte_add(offset) as *const u8,
|
|
|
+ data.len(),
|
|
|
+ )
|
|
|
+ })
|
|
|
+ },
|
|
|
+ )?;
|
|
|
|
|
|
Ok(())
|
|
|
}
|
|
|
|
|
|
- fn push_u32(mm_list: &MMList, sp: &mut VAddr, val: u32) -> KResult<()> {
|
|
|
- *sp = *sp - size_of::<u32>();
|
|
|
+ fn push_argc(&mut self, val: T) -> KResult<()> {
|
|
|
+ self.sp -= size_of::<T>();
|
|
|
|
|
|
- mm_list.access_mut(*sp, size_of::<u32>(), |_, data| {
|
|
|
- data.copy_from_slice(unsafe {
|
|
|
- core::slice::from_raw_parts(&val as *const _ as *const u8, data.len())
|
|
|
- })
|
|
|
- })?;
|
|
|
+ self.mm_list
|
|
|
+ .access_mut(VAddr::from(self.sp), size_of::<u32>(), |_, data| {
|
|
|
+ data.copy_from_slice(unsafe {
|
|
|
+ core::slice::from_raw_parts(&val as *const _ as *const u8, data.len())
|
|
|
+ })
|
|
|
+ })?;
|
|
|
|
|
|
Ok(())
|
|
|
}
|
|
|
|
|
|
- fn push_aux_vec(mm_list: &MMList, sp: &mut VAddr, aux_vec: AuxVec<u32>) -> KResult<()> {
|
|
|
- let mut longs: Vec<u32> = vec![];
|
|
|
+ fn push_aux_vec(&mut self) -> KResult<()> {
|
|
|
+ let mut longs: Vec<T> = vec![];
|
|
|
|
|
|
// Write Auxiliary vectors
|
|
|
- let aux_vec: Vec<_> = aux_vec
|
|
|
+ let aux_vec: Vec<_> = self
|
|
|
+ .aux_vec
|
|
|
.table()
|
|
|
.iter()
|
|
|
.map(|(aux_key, aux_value)| (*aux_key, *aux_value))
|
|
|
.collect();
|
|
|
|
|
|
for (aux_key, aux_value) in aux_vec.iter() {
|
|
|
- longs.push(*aux_key as u32);
|
|
|
+ longs.push(T::from_usize(*aux_key as usize));
|
|
|
longs.push(*aux_value);
|
|
|
}
|
|
|
|
|
|
// Write NULL auxiliary
|
|
|
- longs.push(AuxKey::AT_NULL as u32);
|
|
|
- longs.push(0);
|
|
|
-
|
|
|
- *sp = *sp - longs.len() * size_of::<u32>();
|
|
|
-
|
|
|
- mm_list.access_mut(*sp, longs.len() * size_of::<u32>(), |offset, data| {
|
|
|
- data.copy_from_slice(unsafe {
|
|
|
- core::slice::from_raw_parts(
|
|
|
- longs.as_ptr().byte_add(offset) as *const u8,
|
|
|
- data.len(),
|
|
|
- )
|
|
|
- })
|
|
|
- })?;
|
|
|
+ longs.push(T::from_usize(AuxKey::AT_NULL as usize));
|
|
|
+ longs.push(T::from_usize(0));
|
|
|
+
|
|
|
+ self.sp -= longs.len() * size_of::<T>();
|
|
|
+
|
|
|
+ self.mm_list.access_mut(
|
|
|
+ VAddr::from(self.sp),
|
|
|
+ longs.len() * size_of::<T>(),
|
|
|
+ |offset, data| {
|
|
|
+ data.copy_from_slice(unsafe {
|
|
|
+ core::slice::from_raw_parts(
|
|
|
+ longs.as_ptr().byte_add(offset) as *const u8,
|
|
|
+ data.len(),
|
|
|
+ )
|
|
|
+ })
|
|
|
+ },
|
|
|
+ )?;
|
|
|
|
|
|
Ok(())
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
-pub struct Elf64 {
|
|
|
- elf_header: ElfHeader<P64>,
|
|
|
- program_headers: Vec<ProgramHeader64>,
|
|
|
-}
|
|
|
-
|
|
|
-impl Elf64 {
|
|
|
- // const LDSO_BASE_ADDR: usize = 0xffff00000000;
|
|
|
- // const STACK_BASE_ADDR: usize = 0xffff_ff00_0000;
|
|
|
- // const DYN_BASE_ADDR: usize = 0xaaaa00000000;
|
|
|
-
|
|
|
- fn parse(file: Arc<Dentry>) -> KResult<Self> {
|
|
|
- todo!()
|
|
|
- }
|
|
|
-
|
|
|
- fn load(&self, args: Vec<CString>, envs: Vec<CString>) -> KResult<LoadInfo> {
|
|
|
- todo!()
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-pub enum Elf {
|
|
|
- ELF32(Elf32),
|
|
|
- ELF64(Elf64),
|
|
|
-}
|
|
|
-
|
|
|
-impl Elf {
|
|
|
- pub fn parse(elf_file: Arc<Dentry>) -> KResult<Self> {
|
|
|
- let mut header_pt1 = UninitBuffer::<HeaderPt1>::new();
|
|
|
- elf_file.read(&mut header_pt1, 0)?;
|
|
|
-
|
|
|
- let header_pt1 = header_pt1.assume_init().map_err(|_| ENOEXEC)?;
|
|
|
- assert_eq!(header_pt1.magic, ELF_MAGIC);
|
|
|
-
|
|
|
- match header_pt1.class() {
|
|
|
- Class::ThirtyTwo => Ok(Elf::ELF32(Elf32::parse(elf_file)?)),
|
|
|
- Class::SixtyFour => Ok(Elf::ELF64(Elf64::parse(elf_file)?)),
|
|
|
- _ => Err(ENOEXEC),
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- pub fn load(&self, args: Vec<CString>, envs: Vec<CString>) -> KResult<LoadInfo> {
|
|
|
- match &self {
|
|
|
- Elf::ELF32(elf32) => elf32.load(args, envs),
|
|
|
- Elf::ELF64(elf64) => elf64.load(args, envs),
|
|
|
- }
|
|
|
- }
|
|
|
-}
|