#include "mul.h"
#include "ncmul.h"
#include "power.h"
+#include "relational.h"
#include "symmetry.h"
#include "lst.h"
#include "print.h"
{
GINAC_ASSERT(seq.size() > 0);
- if (is_of_type(c, print_tree)) {
+ if (is_a<print_tree>(c)) {
c.s << std::string(level, ' ') << class_name()
<< std::hex << ", hash=0x" << hashvalue << ", flags=0x" << flags << std::dec
} else {
- bool is_tex = is_of_type(c, print_latex);
+ bool is_tex = is_a<print_latex>(c);
const ex & base = seq[0];
- bool need_parens = is_ex_exactly_of_type(base, add) || is_ex_exactly_of_type(base, mul)
- || is_ex_exactly_of_type(base, ncmul) || is_ex_exactly_of_type(base, power)
- || is_ex_of_type(base, indexed);
+
+ if (precedence() <= level)
+ c.s << (is_tex ? "{(" : "(");
if (is_tex)
c.s << "{";
- if (need_parens)
- c.s << "(";
- base.print(c);
- if (need_parens)
- c.s << ")";
+ base.print(c, precedence());
if (is_tex)
c.s << "}";
printindices(c, level);
+ if (precedence() <= level)
+ c.s << (is_tex ? ")}" : ")");
}
}
exvector::const_iterator it=seq.begin() + 1, itend = seq.end();
- if (is_of_type(c, print_latex)) {
+ if (is_a<print_latex>(c)) {
// TeX output: group by variance
bool first = true;
/** Rename dummy indices in an expression.
*
- * @param e Expression to be worked on
+ * @param e Expression to work on
* @param local_dummy_indices The set of dummy indices that appear in the
* expression "e"
* @param global_dummy_indices The set of dummy indices that have appeared
}
}
+/** Given a set of indices, extract those of class varidx. */
+static void find_variant_indices(const exvector & v, exvector & variant_indices)
+{
+ exvector::const_iterator it1, itend;
+ for (it1 = v.begin(), itend = v.end(); it1 != itend; ++it1) {
+ if (is_exactly_a<varidx>(*it1))
+ variant_indices.push_back(*it1);
+ }
+}
+
+/** Raise/lower dummy indices in a single indexed objects to canonicalize their
+ * variance.
+ *
+ * @param e Object to work on
+ * @param variant_dummy_indices The set of indices that might need repositioning (will be changed by this function)
+ * @param moved_indices The set of indices that have been repositioned (will be changed by this function)
+ * @return true if 'e' was changed */
+bool reposition_dummy_indices(ex & e, exvector & variant_dummy_indices, exvector & moved_indices)
+{
+ bool something_changed = false;
+
+ // If a dummy index is encountered for the first time in the
+ // product, pull it up, otherwise, pull it down
+ exvector::const_iterator it2, it2start, it2end;
+ for (it2start = ex_to<indexed>(e).seq.begin(), it2end = ex_to<indexed>(e).seq.end(), it2 = it2start + 1; it2 != it2end; ++it2) {
+ if (!is_exactly_a<varidx>(*it2))
+ continue;
+
+ exvector::iterator vit, vitend;
+ for (vit = variant_dummy_indices.begin(), vitend = variant_dummy_indices.end(); vit != vitend; ++vit) {
+ if (it2->op(0).is_equal(vit->op(0))) {
+ if (ex_to<varidx>(*it2).is_covariant()) {
+ e = e.subs(lst(
+ *it2 == ex_to<varidx>(*it2).toggle_variance(),
+ ex_to<varidx>(*it2).toggle_variance() == *it2
+ ));
+ something_changed = true;
+ it2 = ex_to<indexed>(e).seq.begin() + (it2 - it2start);
+ it2start = ex_to<indexed>(e).seq.begin();
+ it2end = ex_to<indexed>(e).seq.end();
+ }
+ moved_indices.push_back(*vit);
+ variant_dummy_indices.erase(vit);
+ goto next_index;
+ }
+ }
+
+ for (vit = moved_indices.begin(), vitend = moved_indices.end(); vit != vitend; ++vit) {
+ if (it2->op(0).is_equal(vit->op(0))) {
+ if (ex_to<varidx>(*it2).is_contravariant()) {
+ e = e.subs(*it2 == ex_to<varidx>(*it2).toggle_variance());
+ something_changed = true;
+ it2 = ex_to<indexed>(e).seq.begin() + (it2 - it2start);
+ it2start = ex_to<indexed>(e).seq.begin();
+ it2end = ex_to<indexed>(e).seq.end();
+ }
+ goto next_index;
+ }
+ }
+
+next_index: ;
+ }
+
+ return something_changed;
+}
+
+/* Ordering that only compares the base expressions of indexed objects. */
+struct ex_base_is_less : public std::binary_function<ex, ex, bool> {
+ bool operator() (const ex &lh, const ex &rh) const
+ {
+ return (is_a<indexed>(lh) ? lh.op(0) : lh).compare(is_a<indexed>(rh) ? rh.op(0) : rh) < 0;
+ }
+};
+
/** Simplify product of indexed expressions (commutative, noncommutative and
* simple squares), return list of free indices. */
ex simplify_indexed_product(const ex & e, exvector & free_indices, exvector & dummy_indices, const scalar_products & sp)
// Find free indices (concatenate them all and call find_free_and_dummy())
// and all dummy indices that appear
exvector un, individual_dummy_indices;
- it1 = v.begin(); itend = v.end();
- while (it1 != itend) {
+ for (it1 = v.begin(), itend = v.end(); it1 != itend; ++it1) {
exvector free_indices_of_factor;
if (is_ex_of_type(*it1, indexed)) {
exvector dummy_indices_of_factor;
} else
free_indices_of_factor = it1->get_free_indices();
un.insert(un.end(), free_indices_of_factor.begin(), free_indices_of_factor.end());
- it1++;
}
exvector local_dummy_indices;
find_free_and_dummy(un, free_indices, local_dummy_indices);
local_dummy_indices.insert(local_dummy_indices.end(), individual_dummy_indices.begin(), individual_dummy_indices.end());
+ // Filter out the dummy indices with variance
+ exvector variant_dummy_indices;
+ find_variant_indices(local_dummy_indices, variant_dummy_indices);
+
+ // Any indices with variance present at all?
+ if (!variant_dummy_indices.empty()) {
+
+ // Yes, bring the product into a canonical order that only depends on
+ // the base expressions of indexed objects
+ if (!non_commutative)
+ std::sort(v.begin(), v.end(), ex_base_is_less());
+
+ exvector moved_indices;
+
+ // Iterate over all indexed objects in the product
+ for (it1 = v.begin(), itend = v.end(); it1 != itend; ++it1) {
+ if (!is_ex_of_type(*it1, indexed))
+ continue;
+
+ if (reposition_dummy_indices(*it1, variant_dummy_indices, moved_indices))
+ something_changed = true;
+ }
+ }
+
ex r;
if (something_changed)
r = non_commutative ? ex(ncmul(v, true)) : ex(mul(v));
return r;
}
+/** This structure stores the original and symmetrized versions of terms
+ * obtained during the simplification of sums. */
+class symminfo {
+public:
+ symminfo() {}
+ ~symminfo() {}
+
+ symminfo(const ex & symmterm_, const ex & orig_)
+ {
+ if (is_a<mul>(orig_)) {
+ ex tmp = orig_.op(orig_.nops()-1);
+ orig = orig_ / tmp;
+ } else
+ orig = orig_;
+
+ if (is_a<mul>(symmterm_)) {
+ coeff = symmterm_.op(symmterm_.nops()-1);
+ symmterm = symmterm_ / coeff;
+ } else {
+ coeff = 1;
+ symmterm = symmterm_;
+ }
+ }
+
+ symminfo(const symminfo & other)
+ {
+ symmterm = other.symmterm;
+ coeff = other.coeff;
+ orig = other.orig;
+ }
+
+ const symminfo & operator=(const symminfo & other)
+ {
+ if (this != &other) {
+ symmterm = other.symmterm;
+ coeff = other.coeff;
+ orig = other.orig;
+ }
+ return *this;
+ }
+
+ ex symmterm;
+ ex coeff;
+ ex orig;
+};
+
+class symminfo_is_less {
+public:
+ bool operator() (const symminfo & si1, const symminfo & si2)
+ {
+ int comp = si1.symmterm.compare(si2.symmterm);
+ if (comp < 0) return true;
+ if (comp > 0) return false;
+ comp = si1.orig.compare(si2.orig);
+ if (comp < 0) return true;
+ if (comp > 0) return false;
+ comp = si1.coeff.compare(si2.coeff);
+ if (comp < 0) return true;
+ return false;
+ }
+};
+
/** Simplify indexed expression, return list of free indices. */
ex simplify_indexed(const ex & e, exvector & free_indices, exvector & dummy_indices, const scalar_products & sp)
{
ex e_expanded = e.expand();
// Simplification of single indexed object: just find the free indices
- // and perform dummy index renaming
+ // and perform dummy index renaming/repositioning
if (is_ex_of_type(e_expanded, indexed)) {
+
+ // Find the dummy indices
const indexed &i = ex_to<indexed>(e_expanded);
exvector local_dummy_indices;
find_free_and_dummy(i.seq.begin() + 1, i.seq.end(), free_indices, local_dummy_indices);
+
+ // Filter out the dummy indices with variance
+ exvector variant_dummy_indices;
+ find_variant_indices(local_dummy_indices, variant_dummy_indices);
+
+ // Any indices with variance present at all?
+ if (!variant_dummy_indices.empty()) {
+
+ // Yes, reposition them
+ exvector moved_indices;
+ reposition_dummy_indices(e_expanded, variant_dummy_indices, moved_indices);
+ }
+
+ // Rename the dummy indices
return rename_dummy_indices(e_expanded, dummy_indices, local_dummy_indices);
}
}
}
+ // If the sum turns out to be zero, we are finished
+ if (sum.is_zero()) {
+ free_indices.clear();
+ return sum;
+ }
+
+ // Symmetrizing over the dummy indices may cancel terms
+ int num_terms_orig = (is_a<add>(sum) ? sum.nops() : 1);
+ if (num_terms_orig > 1 && dummy_indices.size() >= 2) {
+
+ // Construct list of all dummy index symbols
+ lst dummy_syms;
+ for (int i=0; i<dummy_indices.size(); i++)
+ dummy_syms.append(dummy_indices[i].op(0));
+
+ // Symmetrize each term separately and store the resulting
+ // terms in a list of symminfo structures
+ std::vector<symminfo> v;
+ for (int i=0; i<sum.nops(); i++) {
+ ex sum_symm = sum.op(i).symmetrize(dummy_syms);
+ if (is_a<add>(sum_symm))
+ for (int j=0; j<sum_symm.nops(); j++)
+ v.push_back(symminfo(sum_symm.op(j), sum.op(i)));
+ else
+ v.push_back(symminfo(sum_symm, sum.op(i)));
+ }
+
+ // Now add up all the unsymmetrized versions of the terms that
+ // did not cancel out in the symmetrization
+ exvector result;
+ std::sort(v.begin(), v.end(), symminfo_is_less());
+ for (std::vector<symminfo>::iterator i=v.begin(); i!=v.end(); ) {
+ std::vector<symminfo>::iterator j = i;
+ for (j++; j!=v.end() && i->symmterm == j->symmterm; j++) ;
+ for (std::vector<symminfo>::iterator k=i; k!=j; k++)
+ result.push_back((k->coeff)*(i->orig));
+ i = j;
+ }
+ ex sum_symm = (new add(result))->setflag(status_flags::dynallocated);
+ if (sum_symm.is_zero())
+ free_indices.clear();
+ return sum_symm;
+ }
+
return sum;
}