
The following patch lifts further restrictions which limited _BitInt to at most 16319 bits up to 65535. The problem was mainly in INTEGER_CST representation, which had 3 unsigned char members to describe lengths in number of 64-bit limbs, which it wanted to fit into 32 bits. This patch removes the third one which was just a cache to save a few compile time cycles for wi::to_offset and enlarges the other two members to unsigned short. Furthermore, the same problem has been in some uses of trailing_wide_int* (in value-range-storage*) and value-range-storage* itself, while other uses of trailing_wide_int* have been fine (e.g. CONST_POLY_INT, where no constants will be larger than 3/5/9/11 limbs depending on target, so 255 limit is plenty). The patch turns all those length representations to be unsigned short for consistency, so value-range-storage* can handle even 16320-65535 bits BITINT_TYPE ranges. The cc1plus growth is about 16K, so not really significant for 38M .text section. Note, the reason for the new limit is unsigned int precision : 16; TYPE_PRECISION limit, if we wanted to overcome that, TYPE_PRECISION would need to use some other member for BITINT_TYPE from all the others and we could reach that way 4194239 limit (65535 * 64 - 1, again implied by INTEGER_CST and value-range-storage*). Dunno if that is worth it or if it is something we want to do for GCC 14 though. 2023-10-14 Jakub Jelinek <jakub@redhat.com> PR c/102989 gcc/ * tree-core.h (struct tree_base): Remove int_length.offset member, change type of int_length.unextended and int_length.extended from unsigned char to unsigned short. * tree.h (TREE_INT_CST_OFFSET_NUNITS): Remove. (wi::extended_tree <N>::get_len): Don't use TREE_INT_CST_OFFSET_NUNITS, instead compute it at runtime from TREE_INT_CST_EXT_NUNITS and TREE_INT_CST_NUNITS. * tree.cc (wide_int_to_tree_1): Don't assert TREE_INT_CST_OFFSET_NUNITS value. (make_int_cst): Don't initialize TREE_INT_CST_OFFSET_NUNITS. * wide-int.h (WIDE_INT_MAX_ELTS): Change from 255 to 1024. (WIDEST_INT_MAX_ELTS): Change from 510 to 2048, adjust comment. (trailing_wide_int_storage): Change m_len type from unsigned char * to unsigned short *. (trailing_wide_int_storage::trailing_wide_int_storage): Change second argument from unsigned char * to unsigned short *. (trailing_wide_ints): Change m_max_len type from unsigned char to unsigned short. Change m_len element type from struct{unsigned char len;} to unsigned short. (trailing_wide_ints <N>::operator []): Remove .len from m_len accesses. * value-range-storage.h (irange_storage::lengths_address): Change return type from const unsigned char * to const unsigned short *. (irange_storage::write_lengths_address): Change return type from unsigned char * to unsigned short *. * value-range-storage.cc (irange_storage::write_lengths_address): Likewise. (irange_storage::lengths_address): Change return type from const unsigned char * to const unsigned short *. (write_wide_int): Change len argument type from unsigned char *& to unsigned short *&. (irange_storage::set_irange): Change len variable type from unsigned char * to unsigned short *. (read_wide_int): Change len argument type from unsigned char to unsigned short. Use trailing_wide_int_storage <unsigned short> instead of trailing_wide_int_storage and trailing_wide_int <unsigned short> instead of trailing_wide_int. (irange_storage::get_irange): Change len variable type from unsigned char * to unsigned short *. (irange_storage::size): Multiply n by sizeof (unsigned short) in len_size variable initialization. (irange_storage::dump): Change len variable type from unsigned char * to unsigned short *. gcc/cp/ * module.cc (trees_out::start, trees_in::start): Remove TREE_INT_CST_OFFSET_NUNITS handling. gcc/testsuite/ * gcc.dg/bitint-38.c: Change into dg-do run test, in addition to checking the addition, division and right shift results at compile time check it also at runtime. * gcc.dg/bitint-39.c: New test.
566 lines
14 KiB
C++
566 lines
14 KiB
C++
/* Support routines for vrange storage.
|
|
Copyright (C) 2022-2023 Free Software Foundation, Inc.
|
|
Contributed by Aldy Hernandez <aldyh@redhat.com>.
|
|
|
|
This file is part of GCC.
|
|
|
|
GCC is free software; you can redistribute it and/or modify
|
|
it under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation; either version 3, or (at your option)
|
|
any later version.
|
|
|
|
GCC is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with GCC; see the file COPYING3. If not see
|
|
<http://www.gnu.org/licenses/>. */
|
|
|
|
#include "config.h"
|
|
#include "system.h"
|
|
#include "coretypes.h"
|
|
#include "backend.h"
|
|
#include "tree.h"
|
|
#include "gimple.h"
|
|
#include "ssa.h"
|
|
#include "tree-pretty-print.h"
|
|
#include "fold-const.h"
|
|
#include "gimple-range.h"
|
|
#include "value-range-storage.h"
|
|
|
|
// Generic memory allocator to share one interface between GC and
|
|
// obstack allocators.
|
|
|
|
class vrange_internal_alloc
|
|
{
|
|
public:
|
|
vrange_internal_alloc () { }
|
|
virtual ~vrange_internal_alloc () { }
|
|
virtual void *alloc (size_t size) = 0;
|
|
virtual void free (void *) = 0;
|
|
private:
|
|
DISABLE_COPY_AND_ASSIGN (vrange_internal_alloc);
|
|
};
|
|
|
|
class vrange_obstack_alloc final: public vrange_internal_alloc
|
|
{
|
|
public:
|
|
vrange_obstack_alloc ()
|
|
{
|
|
obstack_init (&m_obstack);
|
|
}
|
|
virtual ~vrange_obstack_alloc () final override
|
|
{
|
|
obstack_free (&m_obstack, NULL);
|
|
}
|
|
virtual void *alloc (size_t size) final override
|
|
{
|
|
return obstack_alloc (&m_obstack, size);
|
|
}
|
|
virtual void free (void *) final override { }
|
|
private:
|
|
obstack m_obstack;
|
|
};
|
|
|
|
class vrange_ggc_alloc final: public vrange_internal_alloc
|
|
{
|
|
public:
|
|
vrange_ggc_alloc () { }
|
|
virtual ~vrange_ggc_alloc () final override { }
|
|
virtual void *alloc (size_t size) final override
|
|
{
|
|
return ggc_internal_alloc (size);
|
|
}
|
|
virtual void free (void *p) final override
|
|
{
|
|
return ggc_free (p);
|
|
}
|
|
};
|
|
|
|
vrange_allocator::vrange_allocator (bool gc)
|
|
{
|
|
if (gc)
|
|
m_alloc = new vrange_ggc_alloc;
|
|
else
|
|
m_alloc = new vrange_obstack_alloc;
|
|
}
|
|
|
|
vrange_allocator::~vrange_allocator ()
|
|
{
|
|
delete m_alloc;
|
|
}
|
|
|
|
void *
|
|
vrange_allocator::alloc (size_t size)
|
|
{
|
|
return m_alloc->alloc (size);
|
|
}
|
|
|
|
void
|
|
vrange_allocator::free (void *p)
|
|
{
|
|
m_alloc->free (p);
|
|
}
|
|
|
|
// Allocate a new vrange_storage object initialized to R and return
|
|
// it.
|
|
|
|
vrange_storage *
|
|
vrange_allocator::clone (const vrange &r)
|
|
{
|
|
return vrange_storage::alloc (*m_alloc, r);
|
|
}
|
|
|
|
vrange_storage *
|
|
vrange_allocator::clone_varying (tree type)
|
|
{
|
|
if (irange::supports_p (type))
|
|
return irange_storage::alloc (*m_alloc, int_range <1> (type));
|
|
if (frange::supports_p (type))
|
|
return frange_storage::alloc (*m_alloc, frange (type));
|
|
return NULL;
|
|
}
|
|
|
|
vrange_storage *
|
|
vrange_allocator::clone_undefined (tree type)
|
|
{
|
|
if (irange::supports_p (type))
|
|
return irange_storage::alloc (*m_alloc, int_range<1> ());
|
|
if (frange::supports_p (type))
|
|
return frange_storage::alloc (*m_alloc, frange ());
|
|
return NULL;
|
|
}
|
|
|
|
// Allocate a new vrange_storage object initialized to R and return
|
|
// it. Return NULL if R is unsupported.
|
|
|
|
vrange_storage *
|
|
vrange_storage::alloc (vrange_internal_alloc &allocator, const vrange &r)
|
|
{
|
|
if (is_a <irange> (r))
|
|
return irange_storage::alloc (allocator, as_a <irange> (r));
|
|
if (is_a <frange> (r))
|
|
return frange_storage::alloc (allocator, as_a <frange> (r));
|
|
return NULL;
|
|
}
|
|
|
|
// Set storage to R.
|
|
|
|
void
|
|
vrange_storage::set_vrange (const vrange &r)
|
|
{
|
|
if (is_a <irange> (r))
|
|
{
|
|
irange_storage *s = static_cast <irange_storage *> (this);
|
|
gcc_checking_assert (s->fits_p (as_a <irange> (r)));
|
|
s->set_irange (as_a <irange> (r));
|
|
}
|
|
else if (is_a <frange> (r))
|
|
{
|
|
frange_storage *s = static_cast <frange_storage *> (this);
|
|
gcc_checking_assert (s->fits_p (as_a <frange> (r)));
|
|
s->set_frange (as_a <frange> (r));
|
|
}
|
|
else
|
|
gcc_unreachable ();
|
|
}
|
|
|
|
// Restore R from storage.
|
|
|
|
void
|
|
vrange_storage::get_vrange (vrange &r, tree type) const
|
|
{
|
|
if (is_a <irange> (r))
|
|
{
|
|
const irange_storage *s = static_cast <const irange_storage *> (this);
|
|
s->get_irange (as_a <irange> (r), type);
|
|
}
|
|
else if (is_a <frange> (r))
|
|
{
|
|
const frange_storage *s = static_cast <const frange_storage *> (this);
|
|
s->get_frange (as_a <frange> (r), type);
|
|
}
|
|
else
|
|
gcc_unreachable ();
|
|
}
|
|
|
|
// Return TRUE if storage can fit R.
|
|
|
|
bool
|
|
vrange_storage::fits_p (const vrange &r) const
|
|
{
|
|
if (is_a <irange> (r))
|
|
{
|
|
const irange_storage *s = static_cast <const irange_storage *> (this);
|
|
return s->fits_p (as_a <irange> (r));
|
|
}
|
|
if (is_a <frange> (r))
|
|
{
|
|
const frange_storage *s = static_cast <const frange_storage *> (this);
|
|
return s->fits_p (as_a <frange> (r));
|
|
}
|
|
gcc_unreachable ();
|
|
return false;
|
|
}
|
|
|
|
// Return TRUE if the range in storage is equal to R. It is the
|
|
// caller's responsibility to verify that the type of the range in
|
|
// storage matches that of R.
|
|
|
|
bool
|
|
vrange_storage::equal_p (const vrange &r) const
|
|
{
|
|
if (is_a <irange> (r))
|
|
{
|
|
const irange_storage *s = static_cast <const irange_storage *> (this);
|
|
return s->equal_p (as_a <irange> (r));
|
|
}
|
|
if (is_a <frange> (r))
|
|
{
|
|
const frange_storage *s = static_cast <const frange_storage *> (this);
|
|
return s->equal_p (as_a <frange> (r));
|
|
}
|
|
gcc_unreachable ();
|
|
}
|
|
|
|
//============================================================================
|
|
// irange_storage implementation
|
|
//============================================================================
|
|
|
|
unsigned short *
|
|
irange_storage::write_lengths_address ()
|
|
{
|
|
return (unsigned short *)&m_val[(m_num_ranges * 2 + 2)
|
|
* WIDE_INT_MAX_HWIS (m_precision)];
|
|
}
|
|
|
|
const unsigned short *
|
|
irange_storage::lengths_address () const
|
|
{
|
|
return const_cast <irange_storage *> (this)->write_lengths_address ();
|
|
}
|
|
|
|
// Allocate a new irange_storage object initialized to R.
|
|
|
|
irange_storage *
|
|
irange_storage::alloc (vrange_internal_alloc &allocator, const irange &r)
|
|
{
|
|
size_t size = irange_storage::size (r);
|
|
irange_storage *p = static_cast <irange_storage *> (allocator.alloc (size));
|
|
new (p) irange_storage (r);
|
|
return p;
|
|
}
|
|
|
|
// Initialize the storage with R.
|
|
|
|
irange_storage::irange_storage (const irange &r)
|
|
: m_max_ranges (r.num_pairs ())
|
|
{
|
|
m_num_ranges = m_max_ranges;
|
|
set_irange (r);
|
|
}
|
|
|
|
static inline void
|
|
write_wide_int (HOST_WIDE_INT *&val, unsigned short *&len, const wide_int &w)
|
|
{
|
|
*len = w.get_len ();
|
|
for (unsigned i = 0; i < *len; ++i)
|
|
*val++ = w.elt (i);
|
|
++len;
|
|
}
|
|
|
|
// Store R into the current storage.
|
|
|
|
void
|
|
irange_storage::set_irange (const irange &r)
|
|
{
|
|
gcc_checking_assert (fits_p (r));
|
|
|
|
if (r.undefined_p ())
|
|
{
|
|
m_kind = VR_UNDEFINED;
|
|
return;
|
|
}
|
|
if (r.varying_p ())
|
|
{
|
|
m_kind = VR_VARYING;
|
|
return;
|
|
}
|
|
|
|
m_precision = TYPE_PRECISION (r.type ());
|
|
m_num_ranges = r.num_pairs ();
|
|
m_kind = VR_RANGE;
|
|
|
|
HOST_WIDE_INT *val = &m_val[0];
|
|
unsigned short *len = write_lengths_address ();
|
|
|
|
for (unsigned i = 0; i < r.num_pairs (); ++i)
|
|
{
|
|
write_wide_int (val, len, r.lower_bound (i));
|
|
write_wide_int (val, len, r.upper_bound (i));
|
|
}
|
|
|
|
// TODO: We could avoid streaming out the value if the mask is -1.
|
|
irange_bitmask bm = r.m_bitmask;
|
|
write_wide_int (val, len, bm.value ());
|
|
write_wide_int (val, len, bm.mask ());
|
|
|
|
if (flag_checking)
|
|
{
|
|
int_range_max tmp;
|
|
get_irange (tmp, r.type ());
|
|
gcc_checking_assert (tmp == r);
|
|
}
|
|
}
|
|
|
|
static inline void
|
|
read_wide_int (wide_int &w,
|
|
const HOST_WIDE_INT *val, unsigned short len, unsigned prec)
|
|
{
|
|
trailing_wide_int_storage stow (prec, &len,
|
|
const_cast <HOST_WIDE_INT *> (val));
|
|
w = trailing_wide_int (stow);
|
|
}
|
|
|
|
// Restore a range of TYPE from storage into R.
|
|
|
|
void
|
|
irange_storage::get_irange (irange &r, tree type) const
|
|
{
|
|
if (m_kind == VR_UNDEFINED)
|
|
{
|
|
r.set_undefined ();
|
|
return;
|
|
}
|
|
if (m_kind == VR_VARYING)
|
|
{
|
|
r.set_varying (type);
|
|
return;
|
|
}
|
|
|
|
gcc_checking_assert (TYPE_PRECISION (type) == m_precision);
|
|
const HOST_WIDE_INT *val = &m_val[0];
|
|
const unsigned short *len = lengths_address ();
|
|
|
|
// Handle the common case where R can fit the new range.
|
|
if (r.m_max_ranges >= m_num_ranges)
|
|
{
|
|
r.m_kind = VR_RANGE;
|
|
r.m_num_ranges = m_num_ranges;
|
|
r.m_type = type;
|
|
for (unsigned i = 0; i < m_num_ranges * 2; ++i)
|
|
{
|
|
read_wide_int (r.m_base[i], val, *len, m_precision);
|
|
val += *len++;
|
|
}
|
|
}
|
|
// Otherwise build the range piecewise.
|
|
else
|
|
{
|
|
r.set_undefined ();
|
|
for (unsigned i = 0; i < m_num_ranges; ++i)
|
|
{
|
|
wide_int lb, ub;
|
|
read_wide_int (lb, val, *len, m_precision);
|
|
val += *len++;
|
|
read_wide_int (ub, val, *len, m_precision);
|
|
val += *len++;
|
|
int_range<1> tmp (type, lb, ub);
|
|
r.union_ (tmp);
|
|
}
|
|
}
|
|
|
|
wide_int bits_value, bits_mask;
|
|
read_wide_int (bits_value, val, *len, m_precision);
|
|
val += *len++;
|
|
read_wide_int (bits_mask, val, *len, m_precision);
|
|
r.m_bitmask = irange_bitmask (bits_value, bits_mask);
|
|
if (r.m_kind == VR_VARYING)
|
|
r.m_kind = VR_RANGE;
|
|
|
|
if (flag_checking)
|
|
r.verify_range ();
|
|
}
|
|
|
|
bool
|
|
irange_storage::equal_p (const irange &r) const
|
|
{
|
|
if (m_kind == VR_UNDEFINED || r.undefined_p ())
|
|
return m_kind == r.m_kind;
|
|
if (m_kind == VR_VARYING || r.varying_p ())
|
|
return m_kind == r.m_kind;
|
|
|
|
// ?? We could make this faster by doing the comparison in place,
|
|
// without going through get_irange.
|
|
int_range_max tmp;
|
|
get_irange (tmp, r.type ());
|
|
return tmp == r;
|
|
}
|
|
|
|
// Return the size in bytes to allocate storage that can hold R.
|
|
|
|
size_t
|
|
irange_storage::size (const irange &r)
|
|
{
|
|
if (r.undefined_p ())
|
|
return sizeof (irange_storage);
|
|
|
|
unsigned prec = TYPE_PRECISION (r.type ());
|
|
unsigned n = r.num_pairs () * 2 + 2;
|
|
unsigned hwi_size = ((n * WIDE_INT_MAX_HWIS (prec) - 1)
|
|
* sizeof (HOST_WIDE_INT));
|
|
unsigned len_size = n * sizeof (unsigned short);
|
|
return sizeof (irange_storage) + hwi_size + len_size;
|
|
}
|
|
|
|
// Return TRUE if R fits in the current storage.
|
|
|
|
bool
|
|
irange_storage::fits_p (const irange &r) const
|
|
{
|
|
return m_max_ranges >= r.num_pairs ();
|
|
}
|
|
|
|
void
|
|
irange_storage::dump () const
|
|
{
|
|
fprintf (stderr, "irange_storage (prec=%d, ranges=%d):\n",
|
|
m_precision, m_num_ranges);
|
|
|
|
if (m_num_ranges == 0)
|
|
return;
|
|
|
|
const HOST_WIDE_INT *val = &m_val[0];
|
|
const unsigned short *len = lengths_address ();
|
|
int i, j;
|
|
|
|
fprintf (stderr, " lengths = [ ");
|
|
for (i = 0; i < m_num_ranges * 2 + 2; ++i)
|
|
fprintf (stderr, "%d ", len[i]);
|
|
fprintf (stderr, "]\n");
|
|
|
|
for (i = 0; i < m_num_ranges; ++i)
|
|
{
|
|
for (j = 0; j < *len; ++j)
|
|
fprintf (stderr, " [PAIR %d] LB " HOST_WIDE_INT_PRINT_DEC "\n", i,
|
|
*val++);
|
|
++len;
|
|
for (j = 0; j < *len; ++j)
|
|
fprintf (stderr, " [PAIR %d] UB " HOST_WIDE_INT_PRINT_DEC "\n", i,
|
|
*val++);
|
|
++len;
|
|
}
|
|
|
|
// Dump value/mask pair.
|
|
for (j = 0; j < *len; ++j)
|
|
fprintf (stderr, " [VALUE] " HOST_WIDE_INT_PRINT_DEC "\n", *val++);
|
|
++len;
|
|
for (j = 0; j < *len; ++j)
|
|
fprintf (stderr, " [MASK] " HOST_WIDE_INT_PRINT_DEC "\n", *val++);
|
|
}
|
|
|
|
DEBUG_FUNCTION void
|
|
debug (const irange_storage &storage)
|
|
{
|
|
storage.dump ();
|
|
fprintf (stderr, "\n");
|
|
}
|
|
|
|
//============================================================================
|
|
// frange_storage implementation
|
|
//============================================================================
|
|
|
|
// Allocate a new frange_storage object initialized to R.
|
|
|
|
frange_storage *
|
|
frange_storage::alloc (vrange_internal_alloc &allocator, const frange &r)
|
|
{
|
|
size_t size = sizeof (frange_storage);
|
|
frange_storage *p = static_cast <frange_storage *> (allocator.alloc (size));
|
|
new (p) frange_storage (r);
|
|
return p;
|
|
}
|
|
|
|
void
|
|
frange_storage::set_frange (const frange &r)
|
|
{
|
|
gcc_checking_assert (fits_p (r));
|
|
|
|
m_kind = r.m_kind;
|
|
m_min = r.m_min;
|
|
m_max = r.m_max;
|
|
m_pos_nan = r.m_pos_nan;
|
|
m_neg_nan = r.m_neg_nan;
|
|
}
|
|
|
|
void
|
|
frange_storage::get_frange (frange &r, tree type) const
|
|
{
|
|
gcc_checking_assert (r.supports_type_p (type));
|
|
|
|
// Handle explicit NANs.
|
|
if (m_kind == VR_NAN)
|
|
{
|
|
if (HONOR_NANS (type))
|
|
{
|
|
if (m_pos_nan && m_neg_nan)
|
|
r.set_nan (type);
|
|
else
|
|
r.set_nan (type, m_neg_nan);
|
|
}
|
|
else
|
|
r.set_undefined ();
|
|
return;
|
|
}
|
|
if (m_kind == VR_UNDEFINED)
|
|
{
|
|
r.set_undefined ();
|
|
return;
|
|
}
|
|
|
|
// We use the constructor to create the new range instead of writing
|
|
// out the bits into the frange directly, because the global range
|
|
// being read may be being inlined into a function with different
|
|
// restrictions as when it was originally written. We want to make
|
|
// sure the resulting range is canonicalized correctly for the new
|
|
// consumer.
|
|
r = frange (type, m_min, m_max, m_kind);
|
|
|
|
// The constructor will set the NAN bits for HONOR_NANS, but we must
|
|
// make sure to set the NAN sign if known.
|
|
if (HONOR_NANS (type) && (m_pos_nan ^ m_neg_nan) == 1)
|
|
r.update_nan (m_neg_nan);
|
|
else if (!m_pos_nan && !m_neg_nan)
|
|
r.clear_nan ();
|
|
}
|
|
|
|
bool
|
|
frange_storage::equal_p (const frange &r) const
|
|
{
|
|
if (r.undefined_p ())
|
|
return m_kind == VR_UNDEFINED;
|
|
|
|
frange tmp;
|
|
get_frange (tmp, r.type ());
|
|
return tmp == r;
|
|
}
|
|
|
|
bool
|
|
frange_storage::fits_p (const frange &) const
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static vrange_allocator ggc_vrange_allocator (true);
|
|
|
|
vrange_storage *ggc_alloc_vrange_storage (tree type)
|
|
{
|
|
return ggc_vrange_allocator.clone_varying (type);
|
|
}
|
|
|
|
vrange_storage *ggc_alloc_vrange_storage (const vrange &r)
|
|
{
|
|
return ggc_vrange_allocator.clone (r);
|
|
}
|