mirror of
https://github.com/lune-org/lune.git
synced 2024-12-12 04:50:36 +00:00
Make clippy happy
This commit is contained in:
parent
bcfc7d2f55
commit
b79d3ce4e2
6 changed files with 46 additions and 30 deletions
|
@ -39,7 +39,7 @@ impl<'lua> FromLua<'lua> for LuauCompileOptions {
|
|||
|
||||
let get_and_check = |name: &'static str| -> LuaResult<Option<u8>> {
|
||||
match t.get(name)? {
|
||||
Some(n @ (0 | 1 | 2)) => Ok(Some(n)),
|
||||
Some(n @ (0..=2)) => Ok(Some(n)),
|
||||
Some(n) => Err(LuaError::runtime(format!(
|
||||
"'{name}' must be one of: 0, 1, or 2 - got {n}"
|
||||
))),
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use std::{cell::Cell, sync::Arc};
|
||||
use std::sync::Arc;
|
||||
|
||||
use hyper::upgrade::Upgraded;
|
||||
use mlua::prelude::*;
|
||||
|
@ -46,7 +46,7 @@ return freeze(setmetatable({
|
|||
|
||||
#[derive(Debug)]
|
||||
pub struct NetWebSocket<T> {
|
||||
close_code: Arc<Cell<Option<u16>>>,
|
||||
close_code: Arc<AsyncMutex<Option<u16>>>,
|
||||
read_stream: Arc<AsyncMutex<SplitStream<WebSocketStream<T>>>>,
|
||||
write_stream: Arc<AsyncMutex<SplitSink<WebSocketStream<T>, WsMessage>>>,
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ where
|
|||
let (write, read) = value.split();
|
||||
|
||||
Self {
|
||||
close_code: Arc::new(Cell::new(None)),
|
||||
close_code: Arc::new(AsyncMutex::new(None)),
|
||||
read_stream: Arc::new(AsyncMutex::new(read)),
|
||||
write_stream: Arc::new(AsyncMutex::new(write)),
|
||||
}
|
||||
|
@ -137,10 +137,16 @@ fn close_code<'lua, T>(
|
|||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
Ok(match socket.close_code.get() {
|
||||
Some(code) => LuaValue::Number(code as f64),
|
||||
None => LuaValue::Nil,
|
||||
})
|
||||
Ok(
|
||||
match *socket
|
||||
.close_code
|
||||
.try_lock()
|
||||
.expect("Failed to lock close code")
|
||||
{
|
||||
Some(code) => LuaValue::Number(code as f64),
|
||||
None => LuaValue::Nil,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
async fn close<'lua, T>(
|
||||
|
@ -204,7 +210,8 @@ where
|
|||
let msg = match item {
|
||||
Ok(Some(WsMessage::Close(msg))) => {
|
||||
if let Some(msg) = &msg {
|
||||
socket.close_code.replace(Some(msg.code.into()));
|
||||
let mut code = socket.close_code.lock().await;
|
||||
*code = Some(msg.code.into());
|
||||
}
|
||||
Ok(Some(WsMessage::Close(msg)))
|
||||
}
|
||||
|
|
|
@ -57,7 +57,12 @@ impl<'fut> Scheduler<'fut> {
|
|||
if thread.status() != LuaThreadStatus::Resumable {
|
||||
// NOTE: Threads that were spawned to resume
|
||||
// with an error will not have a result sender
|
||||
if let Some(sender) = self.thread_senders.borrow_mut().remove(&thread_id) {
|
||||
if let Some(sender) = self
|
||||
.thread_senders
|
||||
.try_lock()
|
||||
.expect("Failed to get thread senders")
|
||||
.remove(&thread_id)
|
||||
{
|
||||
if sender.receiver_count() > 0 {
|
||||
let stored = match res {
|
||||
Err(e) => Err(e),
|
||||
|
|
|
@ -14,8 +14,8 @@ impl<'fut> Scheduler<'fut> {
|
|||
pub(super) fn has_thread(&self) -> bool {
|
||||
!self
|
||||
.threads
|
||||
.try_borrow()
|
||||
.expect("Failed to borrow threads vec")
|
||||
.try_lock()
|
||||
.expect("Failed to lock threads vec")
|
||||
.is_empty()
|
||||
}
|
||||
|
||||
|
@ -27,9 +27,9 @@ impl<'fut> Scheduler<'fut> {
|
|||
pub(super) fn pop_thread(&self) -> LuaResult<Option<SchedulerThread>> {
|
||||
match self
|
||||
.threads
|
||||
.try_borrow_mut()
|
||||
.try_lock()
|
||||
.into_lua_err()
|
||||
.context("Failed to borrow threads vec")?
|
||||
.context("Failed to lock threads vec")?
|
||||
.pop_front()
|
||||
{
|
||||
Some(thread) => Ok(Some(thread)),
|
||||
|
@ -54,9 +54,9 @@ impl<'fut> Scheduler<'fut> {
|
|||
|
||||
self.state.set_thread_error(thread_id, err);
|
||||
self.threads
|
||||
.try_borrow_mut()
|
||||
.try_lock()
|
||||
.into_lua_err()
|
||||
.context("Failed to borrow threads vec")?
|
||||
.context("Failed to lock threads vec")?
|
||||
.push_front(thread);
|
||||
|
||||
// NOTE: We might be resuming futures, need to signal that a
|
||||
|
@ -83,16 +83,18 @@ impl<'fut> Scheduler<'fut> {
|
|||
let thread_id = thread.id();
|
||||
|
||||
self.threads
|
||||
.try_borrow_mut()
|
||||
.try_lock()
|
||||
.into_lua_err()
|
||||
.context("Failed to borrow threads vec")?
|
||||
.context("Failed to lock threads vec")?
|
||||
.push_front(thread);
|
||||
|
||||
// NOTE: We might be resuming the same thread several times and
|
||||
// pushing it to the scheduler several times before it is done,
|
||||
// and we should only ever create one result sender per thread
|
||||
self.thread_senders
|
||||
.borrow_mut()
|
||||
.try_lock()
|
||||
.into_lua_err()
|
||||
.context("Failed to lock thread senders vec")?
|
||||
.entry(thread_id)
|
||||
.or_insert_with(|| SchedulerThreadSender::new(1));
|
||||
|
||||
|
@ -120,16 +122,18 @@ impl<'fut> Scheduler<'fut> {
|
|||
let thread_id = thread.id();
|
||||
|
||||
self.threads
|
||||
.try_borrow_mut()
|
||||
.try_lock()
|
||||
.into_lua_err()
|
||||
.context("Failed to borrow threads vec")?
|
||||
.context("Failed to lock threads vec")?
|
||||
.push_back(thread);
|
||||
|
||||
// NOTE: We might be resuming the same thread several times and
|
||||
// pushing it to the scheduler several times before it is done,
|
||||
// and we should only ever create one result sender per thread
|
||||
self.thread_senders
|
||||
.borrow_mut()
|
||||
.try_lock()
|
||||
.into_lua_err()
|
||||
.context("Failed to lock thread senders vec")?
|
||||
.entry(thread_id)
|
||||
.or_insert_with(|| SchedulerThreadSender::new(1));
|
||||
|
||||
|
@ -149,7 +153,7 @@ impl<'fut> Scheduler<'fut> {
|
|||
thread_id: SchedulerThreadId,
|
||||
) -> LuaResult<LuaMultiValue<'a>> {
|
||||
let mut recv = {
|
||||
let senders = self.thread_senders.borrow();
|
||||
let senders = self.thread_senders.lock().await;
|
||||
let sender = senders
|
||||
.get(&thread_id)
|
||||
.expect("Tried to wait for thread that is not queued");
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
use std::{
|
||||
cell::RefCell,
|
||||
collections::{HashMap, VecDeque},
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
|
@ -37,8 +36,8 @@ type SchedulerFuture<'fut> = Pin<Box<dyn Future<Output = ()> + 'fut>>;
|
|||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct Scheduler<'fut> {
|
||||
state: Arc<SchedulerState>,
|
||||
threads: Arc<RefCell<VecDeque<SchedulerThread>>>,
|
||||
thread_senders: Arc<RefCell<HashMap<SchedulerThreadId, SchedulerThreadSender>>>,
|
||||
threads: Arc<AsyncMutex<VecDeque<SchedulerThread>>>,
|
||||
thread_senders: Arc<AsyncMutex<HashMap<SchedulerThreadId, SchedulerThreadSender>>>,
|
||||
/*
|
||||
FUTURE: Get rid of these, let the tokio runtime handle running
|
||||
and resumption of futures completely, just use our scheduler
|
||||
|
@ -64,11 +63,12 @@ impl<'fut> Scheduler<'fut> {
|
|||
/**
|
||||
Creates a new scheduler.
|
||||
*/
|
||||
#[allow(clippy::arc_with_non_send_sync)] // FIXME: Clippy lints our tokio mutexes that are definitely Send + Sync
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
state: Arc::new(SchedulerState::new()),
|
||||
threads: Arc::new(RefCell::new(VecDeque::new())),
|
||||
thread_senders: Arc::new(RefCell::new(HashMap::new())),
|
||||
threads: Arc::new(AsyncMutex::new(VecDeque::new())),
|
||||
thread_senders: Arc::new(AsyncMutex::new(HashMap::new())),
|
||||
futures_lua: Arc::new(AsyncMutex::new(FuturesUnordered::new())),
|
||||
futures_background: Arc::new(AsyncMutex::new(FuturesUnordered::new())),
|
||||
}
|
||||
|
|
|
@ -118,8 +118,8 @@ pub fn pretty_format_value(
|
|||
COLOR_GREEN.apply_to(
|
||||
s.to_string_lossy()
|
||||
.replace('"', r#"\""#)
|
||||
.replace('\r', r#"\r"#)
|
||||
.replace('\n', r#"\n"#)
|
||||
.replace('\r', r"\r")
|
||||
.replace('\n', r"\n")
|
||||
)
|
||||
)?,
|
||||
LuaValue::Table(ref tab) => {
|
||||
|
|
Loading…
Reference in a new issue