# bugfix in archive generator: avoid assert triggered for uncachable files
problem was FILE_CACHED_AT_HIGHER_LEVEL flag preventing a buffer from being added to cache, which was causing trace_entry_causes_io to leak a buffer it allocated. documented the entire problem thoroughly. wdbg_sym: no longer complain if stack trace generation just ended up returning a warning This was SVN commit r3886.
This commit is contained in:
parent
6cf040b325
commit
e2ce59292d
@ -1141,6 +1141,24 @@ LibError file_buf_set_real_fn(FileIOBuf buf, const char* atom_fn)
|
||||
}
|
||||
|
||||
|
||||
// if file_cache_add-ing the given buffer, would it be added?
|
||||
// this is referenced by trace_entry_causes_io; see explanation there.
|
||||
bool file_cache_would_add(size_t size, const char* UNUSED(atom_fn),
|
||||
uint file_flags)
|
||||
{
|
||||
// caller is saying this file shouldn't be cached here.
|
||||
if(file_flags & FILE_CACHED_AT_HIGHER_LEVEL)
|
||||
return false;
|
||||
|
||||
// refuse to cache 0-length files (it would have no benefit and
|
||||
// causes problems due to divide-by-0).
|
||||
if(size == 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// "give" <buf> to the cache, specifying its size and owner filename.
|
||||
// since this data may be shared among users of the cache, it is made
|
||||
// read-only (via MMU) to make sure no one can corrupt/change it.
|
||||
@ -1152,13 +1170,7 @@ LibError file_cache_add(FileIOBuf buf, size_t size, const char* atom_fn,
|
||||
{
|
||||
debug_assert(buf);
|
||||
|
||||
// caller is saying this file shouldn't be cached here.
|
||||
if(file_flags & FILE_CACHED_AT_HIGHER_LEVEL)
|
||||
return INFO_SKIPPED;
|
||||
|
||||
// refuse to cache 0-length files (it would have no benefit and
|
||||
// causes problems due to divide-by-0).
|
||||
if(size == 0)
|
||||
if(!file_cache_would_add(size, atom_fn, file_flags))
|
||||
return INFO_SKIPPED;
|
||||
|
||||
// assign cost
|
||||
|
@ -69,6 +69,11 @@ extern void file_buf_add_padding(FileIOBuf exact_buf, size_t exact_size, size_t
|
||||
// afile_read sets things right by calling this.
|
||||
extern LibError file_buf_set_real_fn(FileIOBuf buf, const char* atom_fn);
|
||||
|
||||
// if file_cache_add-ing the given buffer, would it be added?
|
||||
// this is referenced by trace_entry_causes_io; see explanation there.
|
||||
extern bool file_cache_would_add(size_t size, const char* atom_fn,
|
||||
uint file_flags);
|
||||
|
||||
// "give" <buf> to the cache, specifying its size and owner filename.
|
||||
// since this data may be shared among users of the cache, it is made
|
||||
// read-only (via MMU) to make sure no one can corrupt/change it.
|
||||
|
@ -349,6 +349,18 @@ void trace_gen_random(size_t num_entries)
|
||||
|
||||
// simulate carrying out the entry's TraceOp to determine
|
||||
// whether this IO would be satisfied by the file_buf cache.
|
||||
//
|
||||
// note: TO_IO's handling of uncached buffers means the simulated and
|
||||
// real cache contents will diverge if the real caller doesn't free their
|
||||
// buffer immediately.
|
||||
// this is a bit of a bother, but only slightly influences results
|
||||
// because it works by affecting the cache allocator's eviction pattern.
|
||||
// alternatives:
|
||||
// - only allocate if file_cache_would_add. this would actually
|
||||
// cause divergence whenever skipping any allocation, which is worse.
|
||||
// - maintain a list of "buffers we allocated" and use that instead of
|
||||
// file_cache_retrieve in TO_FREE. this would keep both caches in sync but
|
||||
// add considerable complexity (function would no longer be "stateless").
|
||||
bool trace_entry_causes_io(const TraceEntry* ent)
|
||||
{
|
||||
uint fb_flags = FB_NO_STATS;
|
||||
@ -367,17 +379,25 @@ bool trace_entry_causes_io(const TraceEntry* ent)
|
||||
if(file_flags & FILE_WRITE)
|
||||
return false;
|
||||
buf = file_cache_retrieve(atom_fn, &size, fb_flags);
|
||||
// would not be in cache: add to list of real IOs
|
||||
// would not be in cache
|
||||
if(!buf)
|
||||
{
|
||||
buf = file_buf_alloc(size, atom_fn, fb_flags);
|
||||
(void)file_cache_add(buf, size, atom_fn, file_flags);
|
||||
LibError ret = file_cache_add(buf, size, atom_fn, file_flags);
|
||||
// the cache decided not to add buf (see file_cache_would_add).
|
||||
// since TO_FREE below uses the cache to find out which
|
||||
// buffer was allocated for atom_fn, we have to free it manually.
|
||||
// see note above.
|
||||
if(ret == INFO_SKIPPED)
|
||||
(void)file_buf_free(buf, fb_flags);
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TO_FREE:
|
||||
buf = file_cache_retrieve(atom_fn, &size, fb_flags|FB_NO_ACCOUNTING);
|
||||
// note: if buf == 0, file_buf_free is a no-op. this happens in the
|
||||
// abovementioned cached-at-higher-level case.
|
||||
(void)file_buf_free(buf, fb_flags);
|
||||
break;
|
||||
default:
|
||||
|
@ -1895,7 +1895,7 @@ const wchar_t* debug_dump_stack(wchar_t* buf, size_t max_chars, uint skip, void*
|
||||
ptr_reset_visited();
|
||||
|
||||
LibError err = walk_stack(dump_frame_cb, 0, skip, (const CONTEXT*)pcontext);
|
||||
if(err != ERR_OK)
|
||||
if(err < 0)
|
||||
out(L"(error while building stack trace: %d)", err);
|
||||
|
||||
unlock();
|
||||
|
Loading…
Reference in New Issue
Block a user