функции драйвера как раз и вынесены - в параметр шаблона - класс
io_t , а сам io_t тоже шаблонный и имеет параметр - класс драйвер
специфической флехи.
#pragma once
#include "ch32v30x.h"
namespace storage
{
struct io_t
{
void (*init)() ;
unsigned char (*xfer)(const uint8_t data) ;
void (*cs_low)() ;
void (*cs_high)() ;
void (*delay_ms)(const size_t val) ;
};
template <typename io_t>
class w25qxx_tt
{
public:
/* Winbond SPIFalsh ID */
#define W25Q80 0xef13
#define W25Q16 0xef14
#define W25Q32 0xef15
#define W25Q64 0xef16
#define W25Q128 0xef17
#define W25Q256 0xef18
#define W25Q512 0xef19
#define W25Q01 0xef20
#define W25Q02 0xef21
enum class command_t : uint8_t
{
ManufactDeviceID = 0x90,
ReadStatusReg = 0x05,
WriteEnable = 0x06,
WriteDisable = 0x04,
PowerDown = 0xB9,
ReleasePowerDown = 0xAB,
SectorErase = 0x20,
SectorErase32addr= 0x21,
PageProgram = 0x02,
PageProgram32addr= 0x12,
ReadData = 0x03,
ReadData32addr = 0x13,
ChipErase = 0xC7,
Enter32addr = 0xb7,
Exit32addr = 0xe9,
};
static constexpr string_view part_name[] =
{
/* 128Kb */ "w25q10", //
/* 256Kb */ "w25q2x", //
/* 512Kb */ "w25q4x", //
/* 1Mb */ "w25q8x", //
/* 2Mb */ "w25q16", //
/* 4Mb */ "w25q32", //
/* 8Mb */ "w25q64", // 22
/* 16Mb */ "w25q128",//
/* 32Mb */ "w25q256",//
/* 64Mb */ "w25q512",//
/* 128Mb */ "w25q01", //
/* 256Mb */ "w25q02", //
};
__ai__ w25qxx_tt() noexcept { init(); }
__ai__ ~w25qxx_tt() noexcept {}
template <const command_t val>
__ais__ void command()
{
io_t::cs_low();
io_t::xfer( static_cast<uint8_t>(val));
io_t::cs_high();
}
__ais__ void write_enable(void) { command<command_t::WriteEnable>(); }
__ais__ void write_disable(void){ command<command_t::WriteDisable>();}
__ais__ void four_byte_addr_enable(void) { command<command_t::Enter32addr>(); }
__ais__ void four_byte_addr_disable(void){ command<command_t::Exit32addr>();}
__ais__ uint8_t read_sr(void)
{
uint8_t byte;
io_t::cs_low();
io_t::xfer( static_cast<uint8_t>(command_t::ReadStatusReg));
byte = io_t::xfer(0Xff);
io_t::cs_high();
return byte;
}
__ais__ bool busy() noexcept { return read_sr() & 0x01 ; }
__ais__ void wait_busy(void) noexcept
{
while ( busy() );
}
__ais__ bool wait_busy(size_t& counter) noexcept
{
while ( busy() && counter-- ) ;
return busy() ;
}
template<typename T>
__ais__ int read(const T& c, const uint32_t sector, const size_t off, uint8_t* data, const size_t size) noexcept
{
const uint32_t addr = sector * sector_size + off ;
unsigned short i;
io_t::cs_low();
io_t::xfer( static_cast<uint8_t>(command_t::ReadData));
io_t::xfer((uint8_t) ((addr) >> 16));
io_t::xfer((uint8_t) ((addr) >> 8));
io_t::xfer((uint8_t) addr);
for (i = 0; i < size; i++)
((uint8_t *)data)[i] = io_t::xfer(0xff);
io_t::cs_high();
// c.con < "\tbd read s" < sector < " o" < off < " z" < size <<= c.con.endl;
return 0;
}
template<typename T>
__ais__ int read32addr(const T& c, const uint32_t sector, const size_t off, uint8_t* data, const size_t size) noexcept
{
const uint32_t addr = sector * sector_size + off ;
unsigned short i;
io_t::cs_low();
io_t::xfer( static_cast<uint8_t>(command_t::ReadData32addr));
io_t::xfer((uint8_t) ((addr) >> 24));
io_t::xfer((uint8_t) ((addr) >> 16));
io_t::xfer((uint8_t) ((addr) >> 8));
io_t::xfer((uint8_t) addr);
for (i = 0; i < size; i++)
((uint8_t *)data)[i] = io_t::xfer(0xff);
io_t::cs_high();
// c.con < "\tbd read s" < sector < " o" < off < " z" < size <<= c.con.endl;
return 0;
}
//--------------------------------------------------
template<typename T>
__ais__ int write(const T& c, const uint32_t sector, const size_t off, const uint8_t* data, const size_t size) noexcept
{
const uint32_t addr = sector * sector_size + off ;
unsigned short i;
write_enable();
io_t::cs_low();
io_t::xfer( static_cast<uint8_t>(command_t::PageProgram));
io_t::xfer((uint8_t) ((addr) >> 16));
io_t::xfer((uint8_t) ((addr) >> 8));
io_t::xfer((uint8_t) addr);
for (i = 0; i < size; i++)
io_t::xfer(data[i]);
io_t::cs_high();
wait_busy();
// c.con < "\tbd write s" < sector < " o" < off < " z" < size <<= c.con.endl;
return 0;
}
template<typename T>
__ais__ int write32addr(const T& c, const uint32_t sector, const size_t off, const uint8_t* data, const size_t size) noexcept
{
const uint32_t addr = sector * sector_size + off ;
unsigned short i;
write_enable();
io_t::cs_low();
io_t::xfer( static_cast<uint8_t>(command_t::PageProgram32addr));
io_t::xfer((uint8_t) ((addr) >> 24));
io_t::xfer((uint8_t) ((addr) >> 16));
io_t::xfer((uint8_t) ((addr) >> 8));
io_t::xfer((uint8_t) addr);
for (i = 0; i < size; i++)
io_t::xfer(data[i]);
io_t::cs_high();
wait_busy();
// c.con < "\tbd write s" < sector < " o" < off < " z" < size <<= c.con.endl;
return 0;
}
//--------------------------------------------------
template<typename T>
__ais__ int erase(const T& c, const uint32_t sector) noexcept
{
const size_t addr = sector * sector_size;
write_enable();
wait_busy();
io_t::cs_low();
io_t::xfer( static_cast<uint8_t>(command_t::SectorErase));
io_t::xfer((uint8_t) ((addr) >> 16));
io_t::xfer((uint8_t) ((addr) >> 8));
io_t::xfer((uint8_t) addr);
io_t::cs_high();
wait_busy();
// c.con < "\tbd erase " < sector <<= c.con.endl;
return 0;
}
template<typename T>
__ais__ int erase32addr(const T& c, const uint32_t sector) noexcept
{
const size_t addr = sector * sector_size;
write_enable();
wait_busy();
io_t::cs_low();
io_t::xfer( static_cast<uint8_t>(command_t::SectorErase32addr));
io_t::xfer((uint8_t) ((addr) >> 24));
io_t::xfer((uint8_t) ((addr) >> 16));
io_t::xfer((uint8_t) ((addr) >> 8));
io_t::xfer((uint8_t) addr);
io_t::cs_high();
wait_busy();
// c.con < "\tbd erase " < sector <<= c.con.endl;
return 0;
}
__ais__ int erase(const uint32_t sector) noexcept
{
const size_t addr = sector * sector_size;
write_enable();
wait_busy();
io_t::cs_low();
io_t::xfer( static_cast<uint8_t>(command_t::SectorErase));
io_t::xfer((uint8_t) ((addr) >> 16));
io_t::xfer((uint8_t) ((addr) >> 8));
io_t::xfer((uint8_t) addr);
io_t::cs_high();
wait_busy();
return 0;
}
__ais__ int erase32addr(const uint32_t sector) noexcept
{
const size_t addr = sector * sector_size;
write_enable();
wait_busy();
io_t::cs_low();
io_t::xfer( static_cast<uint8_t>(command_t::SectorErase32addr));
io_t::xfer((uint8_t) ((addr) >> 24));
io_t::xfer((uint8_t) ((addr) >> 16));
io_t::xfer((uint8_t) ((addr) >> 8));
io_t::xfer((uint8_t) addr);
io_t::cs_high();
wait_busy();
return 0;
}
__ais__ void erase_chip(void) noexcept
{
write_enable();
wait_busy();
command<command_t::ChipErase>();
wait_busy();
}
__ais__ void erase_chip_async(void) noexcept
{
write_enable();
wait_busy();
command<command_t::ChipErase>();
}
__ais__ void write_page(unsigned int addr, const char *data, unsigned short size) noexcept
{
unsigned short i;
write_enable();
io_t::cs_low();
io_t::xfer( static_cast<uint8_t>(command_t::PageProgram));
io_t::xfer((uint8_t) ((addr) >> 16));
io_t::xfer((uint8_t) ((addr) >> 8));
io_t::xfer((uint8_t) addr);
for (i = 0; i < size; i++)
io_t::xfer(data[i]);
io_t::cs_high();
wait_busy();
}
__ais__ void write_page32addr(unsigned int addr, const char *data, unsigned short size) noexcept
{
unsigned short i;
write_enable();
io_t::cs_low();
io_t::xfer( static_cast<uint8_t>(command_t::PageProgram32addr));
io_t::xfer((uint8_t) ((addr) >> 24));
io_t::xfer((uint8_t) ((addr) >> 16));
io_t::xfer((uint8_t) ((addr) >> 8));
io_t::xfer((uint8_t) addr);
for (i = 0; i < size; i++)
io_t::xfer(data[i]);
io_t::cs_high();
wait_busy();
}
__ais__ void write_nocheck(unsigned int addr, const char *data, size_t size) noexcept
{
unsigned short pageremain;
pageremain = 256 - addr % 256;
if (size <= pageremain)
pageremain = size;
while (1)
{
write_page( addr, data, pageremain);
if (size == pageremain)
break;
else
{
data += pageremain;
addr += pageremain;
size -= pageremain;
if (size > 256)
pageremain = 256;
else
pageremain = size;
}
}
}
__ais__ void write_nocheck32addr(unsigned int addr, const char *data, size_t size) noexcept
{
unsigned short pageremain;
pageremain = 256 - addr % 256;
if (size <= pageremain)
pageremain = size;
while (1)
{
write_page32addr( addr, data, pageremain);
if (size == pageremain)
break;
else
{
data += pageremain;
addr += pageremain;
size -= pageremain;
if (size > 256)
pageremain = 256;
else
pageremain = size;
}
}
}
__ais__ unsigned short read_id(void) noexcept
{
unsigned short Temp = 0;
io_t::cs_low();
io_t::xfer( static_cast<uint8_t>(command_t::ManufactDeviceID));
io_t::xfer(0x00);
io_t::xfer(0x00);
io_t::xfer(0x00);
Temp |= io_t::xfer(0xFF) << 8;
Temp |= io_t::xfer(0xFF);
io_t::cs_high();
return Temp;
}
__ais__ void read_bytes(unsigned int addr, char *data, size_t size) noexcept
{
unsigned short i;
io_t::cs_low();
io_t::xfer( static_cast<uint8_t>(command_t::ReadData));
io_t::xfer((unsigned char) ((addr) >> 16));
io_t::xfer((unsigned char) ((addr) >> 8));
io_t::xfer((unsigned char) addr);
for (i = 0; i < size; i++)
data[i] = io_t::xfer(0xff);
io_t::cs_high();
}
__ais__ void read_bytes32addr(unsigned int addr, char *data, size_t size) noexcept
{
unsigned short i;
io_t::cs_low();
io_t::xfer( static_cast<uint8_t>(command_t::ReadData32addr));
io_t::xfer((unsigned char) ((addr) >> 24));
io_t::xfer((unsigned char) ((addr) >> 16));
io_t::xfer((unsigned char) ((addr) >> 8));
io_t::xfer((unsigned char) addr);
for (i = 0; i < size; i++)
data[i] = io_t::xfer(0xff);
io_t::cs_high();
}
__ais__ void write_bytes(unsigned int addr, char *data, size_t size) noexcept
{
unsigned int secpos;
unsigned short secoff, secremain, i;
char* flash_buffer = new char [sector_size] ;
secpos = addr / sector_size;
secoff = addr % sector_size;
secremain = sector_size - secoff;
if (size <= secremain) secremain = size;
while (1)
{
read_bytes(secpos * sector_size, flash_buffer, sector_size);
for (i = 0; i < secremain; i++)
{
if (flash_buffer[secoff + i] != 0Xff) break;
}
if (i < secremain)
{
erase(secpos);
for (i = 0; i < secremain; i++) flash_buffer[i + secoff] = data[i];
write_nocheck(secpos * sector_size, flash_buffer, sector_size);
}
else
write_nocheck(addr, data, secremain);
if (size == secremain) break;
else
{
secpos++;
secoff = 0;
data += secremain;
addr += secremain;
size -= secremain;
if (size > sector_size)
secremain = sector_size;
else
secremain = size;
}
}
delete [] flash_buffer ;
}
__ais__ void write_bytes32addr(unsigned int addr, char *data, size_t size) noexcept
{
unsigned int secpos;
unsigned short secoff, secremain, i;
char* flash_buffer = new char [sector_size] ;
secpos = addr / sector_size;
secoff = addr % sector_size;
secremain = sector_size - secoff;
if (size <= secremain) secremain = size;
while (1)
{
read_bytes32addr(secpos * sector_size, flash_buffer, sector_size);
for (i = 0; i < secremain; i++)
{
if (flash_buffer[secoff + i] != 0Xff) break;
}
if (i < secremain)
{
erase32addr(secpos);
for (i = 0; i < secremain; i++) flash_buffer[i + secoff] = data[i];
write_nocheck32addr(secpos * sector_size, flash_buffer, sector_size);
}
else
write_nocheck32addr(addr, data, secremain);
if (size == secremain) break;
else
{
secpos++;
secoff = 0;
data += secremain;
addr += secremain;
size -= secremain;
if (size > sector_size)
secremain = sector_size;
else
secremain = size;
}
}
delete [] flash_buffer ;
}
constexpr __ais__ void power_down(void) noexcept { command<command_t::PowerDown>(); io_t::delay_us(3); }
constexpr __ais__ void wakeup(void) noexcept { command<command_t::ReleasePowerDown>(); io_t::delay_us(3); }
constexpr __ais__ uint32_t get_size() noexcept { return get_size(read_id()) ; }
constexpr __ais__ uint32_t sector_count() noexcept { return get_size() / sector_size ; }
constexpr __ais__ void init() noexcept { io_t::init(); wakeup(); }
constexpr __ais__ auto info() noexcept
{
const auto device_id = read_id();
struct ret_t { const uint16_t id_code ;
const string_view part_name ;
const size_t size ;
const size_t pages;
const size_t sectors;
const size_t blocks32k;
const size_t blocks64k;
} ;
const auto part_index = ( device_id & 0xff) ;
const auto size = get_size(device_id) ;
return ret_t
{ device_id,
part_name[part_index - 16],
size,
size / 256,
size / ( 4*1024),
size / (32*1024),
size / (64*1024)
} ;
}
static constexpr size_t page_size = 256 ;
static constexpr size_t sector_size = 4096 ;
static constexpr size_t sector_cycle = 100000 ;
protected:
constexpr __ais__ uint32_t get_size(const uint16_t id) noexcept { return 1 << ((id & 0xff) + 1) ;}
private:
};
typedef w25qxx_tt<w25qxx_io_t> w25qxx_t ;
#ifdef __APP_WRAPS_IMPL__
__attribute__((used, init_priority (net_constructor_priority))) w25qxx_t w25qxx ;
#else
extern w25qxx_t w25qxx;
#endif
}
template <typename T> static int stub(const T& context) {return 0;}
template <typename T> static int stub_lock (const T& context) { /*context.con <<= " lock " ;*/ return 0;}
template <typename T> static int stub_unlock(const T& context) { /*context.con <<= " unlock " ;*/ return 0;}
// Calculate CRC-32 with polynomial = 0x04c11db7, software CRC implementation with small lookup table
static uint32_t crc32(const uint32_t val, const void *buffer, const size_t size) noexcept
{
static constexpr uint32_t rtable[16] =
{ 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c };
const uint8_t* data = static_cast<const uint8_t*>(buffer);
auto crc = val ;
for (size_t i = 0; i < size; i++) [[likely]] { crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 0)) & 0xf]; crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 4)) & 0xf]; }
return crc;
}
static uint32_t crchw( const uint32_t val, const uint8_t* buffer, const size_t size) noexcept
{
return crc.process(val, buffer,size);
}
template <typename S /*storage type*/>
struct io_tt
{
typedef S storage_t ;
__ai__ io_tt () { storage_t::four_byte_addr_enable(); }
//static constexpr struct context_t { con_t& con ; } context {::con} ;
static constexpr struct context_t {} context {} ;
static constexpr uint32_t read_size = storage_t::page_size ; // Minimum size of a block read in bytes. All read operations will be a multiple of this value.
static constexpr uint32_t write_size = storage_t::page_size ; // Minimum size of a block write in bytes. All program operations will be a multiple of this value.
static constexpr uint32_t sector_size = storage_t::sector_size ; // Size of an erasable sector in bytes. This does not impact ram consumption and may be larger than the physical erase size. However, non-inlined files take up at minimum one block. Must be a multiple of the read and program sizes.
// Number of erase cycles before littlefs evicts metadata logs and moves
// the metadata to another block. Suggested values are in the
// range 100-1000, with large values having better performance at the cost
// of less consistent wear distribution.
//
// Set to -1 to disable block-level wear-leveling.
static constexpr uint32_t sector_cycles = storage_t::sector_cycle;
// Size of block caches in bytes. Each cache buffers a portion of a block in
// RAM. The littlefs needs a read cache, a program cache, and one additional
// cache per file. Larger caches can improve performance by storing more
// data and reducing the number of disk accesses. Must be a multiple of the
// read and program sizes, and a factor of the block size.
static constexpr uint32_t cache_size = storage_t::page_size ;
// Size of the lookahead buffer in bytes. A larger lookahead buffer
// increases the number of blocks found during an allocation pass. The
// lookahead buffer is stored as a compact bitmap, so each byte of RAM
// can track 8 blocks.
static constexpr uint32_t lookahead_size = storage_t::page_size ;
// Threshold for metadata compaction during lfs_fs_gc in bytes. Metadata
// pairs that exceed this threshold will be compacted during lfs_fs_gc.
// Defaults to ~88% sector_size when zero, though the default may change
// in the future.
//
// Note this only affects lfs_fs_gc. Normal compactions still only occur
// when full.
//
// Set to -1 to disable metadata compaction during lfs_fs_gc.
static constexpr uint32_t compact_thresh = 0;
// Optional upper limit on total space given to metadata pairs in bytes. On
// devices with large blocks (e.g. 128kB) setting this to a low size (2-8kB)
// can help bound the metadata compaction time. Must be <= sector_size.
// Defaults to sector_size when zero.
static constexpr uint32_t metadata_max = 0 ;
// Optional upper limit on inlined files in bytes. Inlined files live in
// metadata and decrease storage requirements, but may be limited to
// improve metadata-related performance. Must be <= cache_size, <=
// attr_max, and <= sector_size/8. Defaults to the largest possible
// inline_max when zero.
//
// Set to -1 to disable inlined files.
static constexpr uint32_t inline_max = 0 ;
// Read a region in a block. Negative error codes are propagated
// to the user.
static constexpr int (*read)(const context_t& context, const uint32_t sector, const size_t off, uint8_t* buffer, const size_t size) = storage_t::read32addr ;
// Program a region in a block. The block must have previously
// been erased. Negative error codes are propagated to the user.
// May return LFS_ERR_CORRUPT if the block should be considered bad.
static constexpr int (*write)(const context_t& context, const uint32_t sector, const size_t off, const uint8_t* buffer, const size_t size) = storage_t::write32addr ;
// Erase a sector. A sector must be erased before being programmed.
// The state of an erased sector is undefined. Negative error codes
// are propagated to the user.
// May return LFS_ERR_CORRUPT if the sector should be considered bad.
static constexpr int (*erase)(const context_t& context, const uint32_t sector) = storage_t::erase32addr ;
// Sync the state of the underlying sector device. Negative error codes
// are propagated to the user.
static constexpr int (*sync)(const context_t& context) = stub ;
// Calculate CRC-32
static constexpr uint32_t (*crc)(const uint32_t val, const void *buffer, const size_t size) = crc32 ;
struct lock_t
{
__ai__ lock_t() noexcept { stub_lock (context); }
__ai__ ~lock_t() noexcept { stub_unlock(context); }
} ;
// Statically allocated read buffer. Must be cache_size.
static uint8_t read_buffer[cache_size] ;
// Statically allocated program buffer. Must be cache_size.
static uint8_t write_buffer[cache_size];
// Statically allocated lookahead buffer. Must be lookahead_size.
static uint8_t lookahead_buffer[cache_size];
// зависимлсти по реализациям libc-like функций
static void* memcpy (void *__restrict dst0 , const void *__restrict src0 , size_t len0) noexcept { return kgp_sdk_libc::memcpy (dst0, src0 , len0); }
static void* memset (void* m, int c, size_t n) noexcept { return kgp_sdk_libc::memset(m, c, n); }
static int memcmp(const void* m1, const void* m2, size_t n) noexcept { return kgp_sdk_libc::memcmp (m1, m2, n); }
static char* strchr(const char* s , const int i) noexcept { return kgp_sdk_libc::strchr(s, i);}
static size_t strspn (const char* s1, const char* s2) noexcept { return kgp_sdk_libc::strspn(s1, s2);}
static size_t strcspn(const char* s, const char* reject) noexcept { return kgp_sdk_libc::strcspn(s, reject);}
static char* strcpy (char* __restrict dst, const char* __restrict src) noexcept { return kgp_sdk_libc::strcpy(dst, src); }
static size_t strlen (const char* str) noexcept { return kgp_sdk_libc::strlen(str); }
};
typedef io_tt<storage::w25qxx_t> io_t ;
struct klfs_format_config_t
{
static constexpr uint32_t (*sector_count)() = []() -> uint32_t { return storage::w25qxx.sector_count(); } ;
// Optional upper limit on length of file names in bytes. No downside for
// larger names except the size of the info struct which is controlled by
// the LFS_NAME_MAX define. Defaults to LFS_NAME_MAX or name_max stored on
// disk when zero.
static constexpr uint32_t name_max = klfs_tt<void>::name_max;
// Optional upper limit on files in bytes. No downside for larger files
// but must be <= LFS_FILE_MAX. Defaults to LFS_FILE_MAX or file_max stored
// on disk when zero.
static constexpr uint32_t file_max = klfs_tt<void>::file_max;
// Optional upper limit on custom attributes in bytes. No downside for
// larger attributes size but must be <= LFS_ATTR_MAX. Defaults to
// LFS_ATTR_MAX or attr_max stored on disk when zero.
static constexpr uint32_t attr_max = klfs_tt<void>::attr_max;
} ;
typedef io_tt<storage::w25qxx_t> io_t ;
typedef klfs_tt<io_t> klfs_t ;
#ifdef __APP_WRAPS_IMPL__
__attribute__((used, init_priority (net_constructor_priority))) klfs_t klfs ;
template<> uint8_t io_t::read_buffer [io_t::cache_size] = {};
template<> uint8_t io_t::write_buffer [io_t::cache_size] = {};
template<> uint8_t io_t::lookahead_buffer[io_t::cache_size] = {};
#else
extern klfs_t klfs ;
#endif-
- О теперь понятнее. Сразу же убился об макро. framer(1095 знак., 13.01.2025 21:04)
- сейчас 32-бит контроллера без DMA не очень-то и найти... в
побайтных SPI-трансферах на одних инлайнах/неинлайнах от фантазий
компиляторописателей можно такие разбросы огрести, что все эти
измерения - частный случай. и вам везёт - выбираете флешки без
неравномерного размера страниц по телу (на плюсах должно решаться,
конечно, удобнее)... ну а wait_busy подразумевает либо тупое
ожидание, либо отдано на откуп вытеснению. при 2-х устройствах на
одном SPI решение будет сильно Vit(54 знак., 13.01.2025 00:53)
- Как втащить поддержку dma мне понятно. Поддержку синхронизации для
мультпоточности уже всунул. Я имел ввиду свойства самой littlefs. - klen(13.01.2025 01:57)
- так там ещё нужно решать тучу моментов стыковки уровня
сектор-кластер. на DMA это в первую очередь и ложится, но оно
торчит на слой выше голых дров. т.е. это не те дрова. ну и если
явно видно ожидающую семантику, то опять же никаких возвратных
статусов не видно. оно, наверно, выглядит проще, но, КМК, тематика
съедена давно, а реализации как раз и соревнуются в вопросах
синхронизации и кэширования. т.е. там самое интересное, а не в API - Vit(13.01.2025 09:13)
- апи тоже важен, он отражает педали алгоритмов. видя мой сценарий - для чего я ваще это все затеял, думаю попробывать добавить управление кэшем, чтоб делать упреждающее чтение файла, или как то добавить это в автомат. при работе с драйвером видеокарты сейчас можно настроить политику шины - то есть дать драйверу априорную информацию о цели работы с файлом для балансировки шины. хочу тут также. мне нужно чтоб чтение всегда было из кеша. одно расстраивает - нет в сh32vxxx klen(27 знак., 13.01.2025 12:51)
- так там ещё нужно решать тучу моментов стыковки уровня
сектор-кластер. на DMA это в первую очередь и ложится, но оно
торчит на слой выше голых дров. т.е. это не те дрова. ну и если
явно видно ожидающую семантику, то опять же никаких возвратных
статусов не видно. оно, наверно, выглядит проще, но, КМК, тематика
съедена давно, а реализации как раз и соревнуются в вопросах
синхронизации и кэширования. т.е. там самое интересное, а не в API - Vit(13.01.2025 09:13)
- Как втащить поддержку dma мне понятно. Поддержку синхронизации для
мультпоточности уже всунул. Я имел ввиду свойства самой littlefs. - klen(13.01.2025 01:57)