#include "ncmul.h"
#include "power.h"
#include "lst.h"
+#include "inifcns.h" // for symmetrize()
#include "print.h"
#include "archive.h"
#include "utils.h"
return inherited::info(inf);
}
+struct idx_is_not : public std::binary_function<ex, unsigned, bool> {
+ bool operator() (const ex & e, unsigned inf) const {
+ return !(ex_to_idx(e).get_value().info(inf));
+ }
+};
+
bool indexed::all_index_values_are(unsigned inf) const
{
// No indices? Then no property can be fulfilled
return false;
// Check all indices
- exvector::const_iterator it = seq.begin() + 1, itend = seq.end();
- while (it != itend) {
- GINAC_ASSERT(is_ex_of_type(*it, idx));
- if (!ex_to_idx(*it).get_value().info(inf))
- return false;
- it++;
- }
- return true;
+ return find_if(seq.begin() + 1, seq.end(), bind2nd(idx_is_not(), inf)) == seq.end();
}
int indexed::compare_same_type(const basic & other) const
// If the base object is a product, pull out the numeric factor
if (is_ex_exactly_of_type(base, mul) && is_ex_exactly_of_type(base.op(base.nops() - 1), numeric)) {
- exvector v = seq;
+ exvector v(seq);
ex f = ex_to_numeric(base.op(base.nops() - 1));
v[0] = seq[0] / f;
return f * thisexprseq(v);
}
// Canonicalize indices according to the symmetry properties
- if (seq.size() > 2 && (symmetry != unknown && symmetry != mixed)) {
- exvector v = seq;
+ if (seq.size() > 2 && (symmetry == symmetric || symmetry == antisymmetric)) {
+ exvector v(seq);
int sig = canonicalize_indices(v.begin() + 1, v.end(), symmetry == antisymmetric);
if (sig != INT_MAX) {
// Something has changed while sorting indices, more evaluations later
if (v1.size() != v2.size())
return false;
- // And also the indices themselves
- exvector::const_iterator ait = v1.begin(), aitend = v1.end(),
- bit = v2.begin(), bitend = v2.end();
- while (ait != aitend) {
- if (!ait->is_equal(*bit))
- return false;
- ait++; bit++;
- }
- return true;
+ return equal(v1.begin(), v1.end(), v2.begin(), ex_is_equal());
}
exvector indexed::get_indices(void) const
return dummy_indices;
}
+bool indexed::has_dummy_index_for(const ex & i) const
+{
+ exvector::const_iterator it = seq.begin() + 1, itend = seq.end();
+ while (it != itend) {
+ if (is_dummy_pair(*it, i))
+ return true;
+ it++;
+ }
+ return false;
+}
+
exvector indexed::get_free_indices(void) const
{
exvector free_indices, dummy_indices;
return basis.get_free_indices();
}
-/* Function object for STL sort() */
-struct ex_is_less {
- bool operator() (const ex &lh, const ex &rh) const
- {
- return lh.compare(rh) < 0;
- }
-};
-
/** Rename dummy indices in an expression.
*
* @param e Expression to be worked on
if (local_size == 0)
return e;
- sort(local_dummy_indices.begin(), local_dummy_indices.end(), ex_is_less());
-
if (global_size < local_size) {
// More local indices than we encountered before, add the new ones
int remaining = local_size - global_size;
exvector::const_iterator it = local_dummy_indices.begin(), itend = local_dummy_indices.end();
while (it != itend && remaining > 0) {
- exvector::const_iterator git = global_dummy_indices.begin(), gitend = global_dummy_indices.end();
- while (git != gitend) {
- if (it->is_equal(*git))
- goto found;
- git++;
+ if (find_if(global_dummy_indices.begin(), global_dummy_indices.end(), bind2nd(ex_is_equal(), *it)) == global_dummy_indices.end()) {
+ global_dummy_indices.push_back(*it);
+ global_size++;
+ remaining--;
}
- global_dummy_indices.push_back(*it);
- global_size++;
- remaining--;
-found: it++;
+ it++;
}
- sort(global_dummy_indices.begin(), global_dummy_indices.end(), ex_is_less());
}
// Replace index symbols in expression
for (unsigned i=0; i<local_size; i++) {
ex loc_sym = local_dummy_indices[i].op(0);
ex glob_sym = global_dummy_indices[i].op(0);
- if (!loc_sym.is_equal(glob_sym))
+ if (!loc_sym.is_equal(glob_sym)) {
all_equal = false;
- local_syms.append(loc_sym);
- global_syms.append(glob_sym);
+ local_syms.append(loc_sym);
+ global_syms.append(glob_sym);
+ }
}
if (all_equal)
return e;
if (!is_ex_of_type(*it1, indexed))
continue;
+ bool first_noncommutative = (it1->return_type() != return_types::commutative);
+
// Indexed factor found, get free indices and look for contraction
// candidates
exvector free1, dummy1;
if (!is_ex_of_type(*it2, indexed))
continue;
+ bool second_noncommutative = (it2->return_type() != return_types::commutative);
+
// Find free indices of second factor and merge them with free
// indices of first factor
exvector un;
}
if (contracted) {
contraction_done:
- if (non_commutative
+ if (first_noncommutative || second_noncommutative
|| is_ex_exactly_of_type(*it1, add) || is_ex_exactly_of_type(*it2, add)
|| is_ex_exactly_of_type(*it1, mul) || is_ex_exactly_of_type(*it2, mul)
|| is_ex_exactly_of_type(*it1, ncmul) || is_ex_exactly_of_type(*it2, ncmul)) {
// Non-commutative products are always re-expanded to give
// simplify_ncmul() the chance to re-order and canonicalize
// the product
- ex r = (non_commutative ? ex(ncmul(v)) : ex(mul(v)));
+ ex r = (non_commutative ? ex(ncmul(v, true)) : ex(mul(v)));
return simplify_indexed(r, free_indices, dummy_indices, sp);
}
}
// Find free indices (concatenate them all and call find_free_and_dummy())
- exvector un, local_dummy_indices;
+ // and all dummy indices that appear
+ exvector un, individual_dummy_indices;
it1 = v.begin(); itend = v.end();
while (it1 != itend) {
- exvector free_indices_of_factor = it1->get_free_indices();
+ exvector free_indices_of_factor;
+ if (is_ex_of_type(*it1, indexed)) {
+ exvector dummy_indices_of_factor;
+ find_free_and_dummy(ex_to_indexed(*it1).seq.begin() + 1, ex_to_indexed(*it1).seq.end(), free_indices_of_factor, dummy_indices_of_factor);
+ individual_dummy_indices.insert(individual_dummy_indices.end(), dummy_indices_of_factor.begin(), dummy_indices_of_factor.end());
+ } else
+ free_indices_of_factor = it1->get_free_indices();
un.insert(un.end(), free_indices_of_factor.begin(), free_indices_of_factor.end());
it1++;
}
+ exvector local_dummy_indices;
find_free_and_dummy(un, free_indices, local_dummy_indices);
+ local_dummy_indices.insert(local_dummy_indices.end(), individual_dummy_indices.begin(), individual_dummy_indices.end());
ex r;
if (something_changed)
- r = non_commutative ? ex(ncmul(v)) : ex(mul(v));
+ r = non_commutative ? ex(ncmul(v, true)) : ex(mul(v));
else
r = e;
ex e_expanded = e.expand();
// Simplification of single indexed object: just find the free indices
- // (and perform dummy index renaming if
+ // and perform dummy index renaming
if (is_ex_of_type(e_expanded, indexed)) {
const indexed &i = ex_to_indexed(e_expanded);
exvector local_dummy_indices;
return e_expanded;
}
-ex simplify_indexed(const ex & e)
+/** Simplify/canonicalize expression containing indexed objects. This
+ * performs contraction of dummy indices where possible and checks whether
+ * the free indices in sums are consistent.
+ *
+ * @return simplified expression */
+ex ex::simplify_indexed(void) const
{
exvector free_indices, dummy_indices;
scalar_products sp;
- return simplify_indexed(e, free_indices, dummy_indices, sp);
+ return GiNaC::simplify_indexed(*this, free_indices, dummy_indices, sp);
}
-ex simplify_indexed(const ex & e, const scalar_products & sp)
+/** Simplify/canonicalize expression containing indexed objects. This
+ * performs contraction of dummy indices where possible, checks whether
+ * the free indices in sums are consistent, and automatically replaces
+ * scalar products by known values if desired.
+ *
+ * @param sp Scalar products to be replaced automatically
+ * @return simplified expression */
+ex ex::simplify_indexed(const scalar_products & sp) const
{
exvector free_indices, dummy_indices;
- return simplify_indexed(e, free_indices, dummy_indices, sp);
+ return GiNaC::simplify_indexed(*this, free_indices, dummy_indices, sp);
+}
+
+/** Symmetrize expression over its free indices. */
+ex ex::symmetrize(void) const
+{
+ return GiNaC::symmetrize(*this, get_free_indices());
+}
+
+/** Antisymmetrize expression over its free indices. */
+ex ex::antisymmetrize(void) const
+{
+ return GiNaC::antisymmetrize(*this, get_free_indices());
}
//////////