Skip to content

Commit

Permalink
fix bugs for pipe's write end closure and exit group
Browse files Browse the repository at this point in the history
  • Loading branch information
Zen4647 committed Nov 14, 2024
1 parent 7380ca7 commit ee23e4d
Show file tree
Hide file tree
Showing 4 changed files with 26 additions and 8 deletions.
12 changes: 9 additions & 3 deletions api/ruxos_posix_api/src/imp/pipe.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
* See the Mulan PSL v2 for more details.
*/

use alloc::sync::Arc;
use alloc::sync::{Weak, Arc};
use core::ffi::c_int;

use axerrno::{LinuxError, LinuxResult};
Expand Down Expand Up @@ -87,6 +87,8 @@ impl PipeRingBuffer {
pub struct Pipe {
readable: bool,
buffer: Arc<Mutex<PipeRingBuffer>>,
// to find the write end when the read end is closed
_write_end_closed: Option<Weak<Mutex<PipeRingBuffer>>>,
}

impl Pipe {
Expand All @@ -95,10 +97,12 @@ impl Pipe {
let read_end = Pipe {
readable: true,
buffer: buffer.clone(),
_write_end_closed: None,
};
let write_end = Pipe {
readable: false,
buffer,
buffer: buffer.clone(),
_write_end_closed: Some(Arc::downgrade(&buffer)),
};
(read_end, write_end)
}
Expand All @@ -112,7 +116,9 @@ impl Pipe {
}

pub fn write_end_close(&self) -> bool {
Arc::strong_count(&self.buffer) == 1
let write_end_count = Arc::weak_count(&self.buffer);
// error!("Pipe::write_end_close <= buffer: {:#?} {:#?}", write_end_count, Arc::as_ptr(&self.buffer));
write_end_count == 0
}
}

Expand Down
9 changes: 6 additions & 3 deletions api/ruxos_posix_api/src/imp/pthread/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use core::cell::UnsafeCell;
use core::ffi::{c_int, c_void};

use axerrno::{LinuxError, LinuxResult};
use ruxtask::AxTaskRef;
use ruxtask::{current, AxTaskRef};
use spin::RwLock;

use crate::ctypes;
Expand Down Expand Up @@ -228,9 +228,12 @@ pub fn sys_pthread_exit(retval: *mut c_void) -> ! {

/// Exits the current thread. The value `retval` will be returned to the joiner.
pub fn sys_exit_group(status: c_int) -> ! {
error!("sys_exit_group <= {:#?}", status);
debug!("sys_exit_group <= status: {:#?}", status);

// TODO: exit all threads, send signal to all threads

// drop all file opened by current task
current().fs.lock().as_mut().unwrap().close_all_files();

#[cfg(feature = "multitask")]
ruxtask::exit(status);
Expand Down Expand Up @@ -322,7 +325,7 @@ pub unsafe fn sys_clone(
TID_TO_PTHREAD.write().insert(tid, ForceSendSync(ptr));
0
};
warn!("will sys_clone <= pid: {}", pid);
debug!("will sys_clone <= pid: {}", pid);
return Ok(pid);
} else {
debug!("ONLY support CLONE_THREAD and SIGCHLD");
Expand Down
3 changes: 1 addition & 2 deletions api/ruxos_posix_api/src/imp/task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ pub fn sys_getppid() -> c_int {

/// Wait for a child process to exit and return its status.
///
/// TOSO, wstatus, options, and rusage are not implemented yet.
/// TODO: part of options, and rusage are not implemented yet.
pub fn sys_wait4(
pid: c_int,
wstatus: *mut c_int,
Expand Down Expand Up @@ -104,7 +104,6 @@ pub fn sys_wait4(
if parent_pid == ruxtask::current().id().as_u64() {
if task.state() == ruxtask::task::TaskState::Exited {
// add to to_remove list
let task_ref = process_map.get(child_pid).unwrap();
unsafe {
// lower 8 bits of exit_code is the signal number, while upper 8 bits of exit_code is the exit status
// according to "bits/waitstatus.h" in glibc source code.
Expand Down
10 changes: 10 additions & 0 deletions modules/ruxtask/src/fs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,16 @@ pub struct FileSystem {
pub root_dir: Arc<RootDirectory>,
}

impl FileSystem {
pub fn close_all_files(&mut self) {
for fd in 0..self.fd_table.capacity() {
if let Some(_) = self.fd_table.get(fd) {
self.fd_table.remove(fd).unwrap();
}
}
}
}

impl Clone for FileSystem {
fn clone(&self) -> Self {
let mut new_fd_table = FlattenObjects::new();
Expand Down

0 comments on commit ee23e4d

Please sign in to comment.