simplify_indexed() raises/lowers dummy indices to canonicalize their variance
authorChristian Bauer <Christian.Bauer@uni-mainz.de>
Thu, 23 May 2002 04:15:15 +0000 (04:15 +0000)
committerChristian Bauer <Christian.Bauer@uni-mainz.de>
Thu, 23 May 2002 04:15:15 +0000 (04:15 +0000)
as far as possible

NEWS
check/exam_indexed.cpp
ginac/indexed.cpp
ginac/tensor.cpp

diff --git a/NEWS b/NEWS
index 79d789f..67f36b9 100644 (file)
--- a/NEWS
+++ b/NEWS
@@ -1,5 +1,10 @@
 This file records noteworthy changes.
 
+1.0.9 (<date>)
+* simplify_indexed() now raises/lowers dummy indices to canonicalize the index
+  variance. This allows some simplifications that weren't possible before,
+  like eps~a.b~c~d*X.a*X~b -> 0.
+
 1.0.8 (31 March 2002)
 * Improvements in memory usage of the expand() methods.
 
index 90197c0..4ba1794 100644 (file)
@@ -136,8 +136,10 @@ static unsigned epsilon_check(void)
        // contraction with symmetric tensor is zero
        result += check_equal_simplify(lorentz_eps(mu, nu, rho, sigma) * indexed(d, sy_symm(), mu_co, nu_co), 0);
        result += check_equal_simplify(lorentz_eps(mu, nu, rho, sigma) * indexed(d, sy_symm(), nu_co, sigma_co, rho_co), 0);
-       ex e = lorentz_eps(mu, nu, rho, sigma) * indexed(d, sy_symm(), mu_co, tau);
-       result += check_equal_simplify(e, e);
+       result += check_equal_simplify(lorentz_eps(mu, nu, rho, sigma) * indexed(d, mu_co) * indexed(d, nu_co), 0);
+       result += check_equal_simplify(lorentz_eps(mu_co, nu, rho, sigma) * indexed(d, mu) * indexed(d, nu_co), 0);
+       ex e = lorentz_eps(mu, nu, rho, sigma) * indexed(d, mu_co) - lorentz_eps(mu_co, nu, rho, sigma) * indexed(d, mu);
+       result += check_equal_simplify(e, 0);
 
        // contractions of epsilon tensors
        result += check_equal_simplify(lorentz_eps(mu, nu, rho, sigma) * lorentz_eps(mu_co, nu_co, rho_co, sigma_co), -24);
@@ -305,7 +307,7 @@ static unsigned spinor_check(void)
        unsigned result = 0;
 
        symbol psi("psi");
-       spinidx A(symbol("A"), 2), B(symbol("B"), 2), C(symbol("C"), 2);
+       spinidx A(symbol("A")), B(symbol("B")), C(symbol("C")), D(symbol("D"));
        ex A_co = A.toggle_variance(), B_co = B.toggle_variance();
        ex e;
 
@@ -327,6 +329,8 @@ static unsigned spinor_check(void)
        result += check_equal_simplify(e, -indexed(psi, A_co));
        e = spinor_metric(A_co, B_co) * indexed(psi, A);
        result += check_equal_simplify(e, indexed(psi, B_co));
+       e = spinor_metric(D, A) * spinor_metric(A_co, B_co) * spinor_metric(B, C) - spinor_metric(D, A_co) * spinor_metric(A, B_co) * spinor_metric(B, C);
+       result += check_equal_simplify(e, 0);
 
        return result;
 }
index 765a07c..eec7b11 100644 (file)
@@ -29,6 +29,7 @@
 #include "mul.h"
 #include "ncmul.h"
 #include "power.h"
+#include "relational.h"
 #include "symmetry.h"
 #include "lst.h"
 #include "print.h"
@@ -553,6 +554,14 @@ static ex rename_dummy_indices(const ex & e, exvector & global_dummy_indices, ex
        }
 }
 
+/* Ordering that only compares the base expressions of indexed objects. */
+struct ex_base_is_less : public std::binary_function<ex, ex, bool> {
+       bool operator() (const ex &lh, const ex &rh) const
+       {
+               return (is_a<indexed>(lh) ? lh.op(0) : lh).compare(is_a<indexed>(rh) ? rh.op(0) : rh) < 0;
+       }
+};
+
 /** Simplify product of indexed expressions (commutative, noncommutative and
  *  simple squares), return list of free indices. */
 ex simplify_indexed_product(const ex & e, exvector & free_indices, exvector & dummy_indices, const scalar_products & sp)
@@ -670,8 +679,7 @@ contraction_done:
        // Find free indices (concatenate them all and call find_free_and_dummy())
        // and all dummy indices that appear
        exvector un, individual_dummy_indices;
-       it1 = v.begin(); itend = v.end();
-       while (it1 != itend) {
+       for (it1 = v.begin(), itend = v.end(); it1 != itend; ++it1) {
                exvector free_indices_of_factor;
                if (is_ex_of_type(*it1, indexed)) {
                        exvector dummy_indices_of_factor;
@@ -680,12 +688,76 @@ contraction_done:
                } else
                        free_indices_of_factor = it1->get_free_indices();
                un.insert(un.end(), free_indices_of_factor.begin(), free_indices_of_factor.end());
-               it1++;
        }
        exvector local_dummy_indices;
        find_free_and_dummy(un, free_indices, local_dummy_indices);
        local_dummy_indices.insert(local_dummy_indices.end(), individual_dummy_indices.begin(), individual_dummy_indices.end());
 
+       // Filter out the dummy indices with variance
+       exvector variant_dummy_indices;
+       for (it1 = local_dummy_indices.begin(), itend = local_dummy_indices.end(); it1 != itend; ++it1) {
+               if (is_exactly_a<varidx>(*it1))
+                       variant_dummy_indices.push_back(*it1);
+       }
+
+       // Any indices with variance present at all?
+       if (!variant_dummy_indices.empty()) {
+
+               // Yes, bring the product into a canonical order that only depends on
+               // the base expressions of indexed objects
+               if (!non_commutative)
+                       std::sort(v.begin(), v.end(), ex_base_is_less());
+
+               exvector moved_indices;
+
+               // Iterate over all indexed objects in the product
+               for (it1 = v.begin(), itend = v.end(); it1 != itend; ++it1) {
+                       if (!is_ex_of_type(*it1, indexed))
+                               continue;
+
+                       ex new_it1;
+                       bool it1_dirty = false; // It this is true, then new_it1 holds a new value for *it1
+
+                       // If a dummy index is encountered for the first time in the
+                       // product, pull it up, otherwise, pull it down
+                       exvector::iterator it2, it2end;
+                       for (it2 = const_cast<indexed &>(ex_to<indexed>(*it1)).seq.begin(), it2end = const_cast<indexed &>(ex_to<indexed>(*it1)).seq.end(); it2 != it2end; ++it2) {
+                               if (!is_exactly_a<varidx>(*it2))
+                                       continue;
+
+                               exvector::iterator vit, vitend;
+                               for (vit = variant_dummy_indices.begin(), vitend = variant_dummy_indices.end(); vit != vitend; ++vit) {
+                                       if (it2->op(0).is_equal(vit->op(0))) {
+                                               if (ex_to<varidx>(*it2).is_covariant()) {
+                                                       new_it1 = (it1_dirty ? new_it1 : *it1).subs(*it2 == ex_to<varidx>(*it2).toggle_variance());
+                                                       it1_dirty = true;
+                                                       something_changed = true;
+                                               }
+                                               moved_indices.push_back(*vit);
+                                               variant_dummy_indices.erase(vit);
+                                               goto next_index;
+                                       }
+                               }
+
+                               for (vit = moved_indices.begin(), vitend = moved_indices.end(); vit != vitend; ++vit) {
+                                       if (it2->op(0).is_equal(vit->op(0))) {
+                                               if (ex_to<varidx>(*it2).is_contravariant()) {
+                                                       new_it1 = (it1_dirty ? new_it1 : *it1).subs(*it2 == ex_to<varidx>(*it2).toggle_variance());
+                                                       it1_dirty = true;
+                                                       something_changed = true;
+                                               }
+                                               goto next_index;
+                                       }
+                               }
+
+next_index:            ;
+                       }
+
+                       if (it1_dirty)
+                               *it1 = new_it1;
+               }
+       }
+
        ex r;
        if (something_changed)
                r = non_commutative ? ex(ncmul(v, true)) : ex(mul(v));
index fb2e596..ccae243 100644 (file)
@@ -510,7 +510,7 @@ bool tensepsilon::contract_with(exvector::iterator self, exvector::iterator othe
 
        } else if (other->return_type() == return_types::commutative) {
 
-#if 1
+#if 0
                // This handles eps.i.j.k * p.j * p.k = 0 and related cases.
                // Actually, simplify_indexed() can handle most of them on its own
                // but one specific case that is not covered there is