3 * Implementation of GiNaC's indexed expressions. */
6 * GiNaC Copyright (C) 1999-2016 Johannes Gutenberg University Mainz, Germany
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 #include "relational.h"
31 #include "operators.h"
47 GINAC_IMPLEMENT_REGISTERED_CLASS_OPT(indexed, exprseq,
48 print_func<print_context>(&indexed::do_print).
49 print_func<print_latex>(&indexed::do_print_latex).
50 print_func<print_tree>(&indexed::do_print_tree))
53 // default constructor
56 indexed::indexed() : symtree(not_symmetric())
64 indexed::indexed(const ex & b) : inherited{b}, symtree(not_symmetric())
69 indexed::indexed(const ex & b, const ex & i1) : inherited{b, i1}, symtree(not_symmetric())
74 indexed::indexed(const ex & b, const ex & i1, const ex & i2) : inherited{b, i1, i2}, symtree(not_symmetric())
79 indexed::indexed(const ex & b, const ex & i1, const ex & i2, const ex & i3) : inherited{b, i1, i2, i3}, symtree(not_symmetric())
84 indexed::indexed(const ex & b, const ex & i1, const ex & i2, const ex & i3, const ex & i4) : inherited{b, i1, i2, i3, i4}, symtree(not_symmetric())
89 indexed::indexed(const ex & b, const symmetry & symm, const ex & i1, const ex & i2) : inherited{b, i1, i2}, symtree(symm)
94 indexed::indexed(const ex & b, const symmetry & symm, const ex & i1, const ex & i2, const ex & i3) : inherited{b, i1, i2, i3}, symtree(symm)
99 indexed::indexed(const ex & b, const symmetry & symm, const ex & i1, const ex & i2, const ex & i3, const ex & i4) : inherited{b, i1, i2, i3, i4}, symtree(symm)
104 indexed::indexed(const ex & b, const exvector & v) : inherited{b}, symtree(not_symmetric())
106 seq.insert(seq.end(), v.begin(), v.end());
110 indexed::indexed(const ex & b, const symmetry & symm, const exvector & v) : inherited{b}, symtree(symm)
112 seq.insert(seq.end(), v.begin(), v.end());
116 indexed::indexed(const symmetry & symm, const exprseq & es) : inherited(es), symtree(symm)
120 indexed::indexed(const symmetry & symm, const exvector & v) : inherited(v), symtree(symm)
124 indexed::indexed(const symmetry & symm, exvector && v) : inherited(std::move(v)), symtree(symm)
132 void indexed::read_archive(const archive_node &n, lst &sym_lst)
134 inherited::read_archive(n, sym_lst);
135 if (!n.find_ex("symmetry", symtree, sym_lst)) {
136 // GiNaC versions <= 0.9.0 had an unsigned "symmetry" property
138 n.find_unsigned("symmetry", symm);
147 symtree = not_symmetric();
150 const_cast<symmetry &>(ex_to<symmetry>(symtree)).validate(seq.size() - 1);
153 GINAC_BIND_UNARCHIVER(indexed);
155 void indexed::archive(archive_node &n) const
157 inherited::archive(n);
158 n.add_ex("symmetry", symtree);
162 // functions overriding virtual functions from base classes
165 void indexed::printindices(const print_context & c, unsigned level) const
167 if (seq.size() > 1) {
169 auto it = seq.begin() + 1, itend = seq.end();
171 if (is_a<print_latex>(c)) {
173 // TeX output: group by variance
175 bool covariant = true;
177 while (it != itend) {
178 bool cur_covariant = (is_a<varidx>(*it) ? ex_to<varidx>(*it).is_covariant() : true);
179 if (first || cur_covariant != covariant) { // Variance changed
180 // The empty {} prevents indices from ending up on top of each other
183 covariant = cur_covariant;
199 while (it != itend) {
207 void indexed::print_indexed(const print_context & c, const char *openbrace, const char *closebrace, unsigned level) const
209 if (precedence() <= level)
210 c.s << openbrace << '(';
212 seq[0].print(c, precedence());
214 printindices(c, level);
215 if (precedence() <= level)
216 c.s << ')' << closebrace;
219 void indexed::do_print(const print_context & c, unsigned level) const
221 print_indexed(c, "", "", level);
224 void indexed::do_print_latex(const print_latex & c, unsigned level) const
226 print_indexed(c, "{", "}", level);
229 void indexed::do_print_tree(const print_tree & c, unsigned level) const
231 c.s << std::string(level, ' ') << class_name() << " @" << this
232 << std::hex << ", hash=0x" << hashvalue << ", flags=0x" << flags << std::dec
233 << ", " << seq.size()-1 << " indices"
234 << ", symmetry=" << symtree << std::endl;
235 seq[0].print(c, level + c.delta_indent);
236 printindices(c, level + c.delta_indent);
239 bool indexed::info(unsigned inf) const
241 if (inf == info_flags::indexed) return true;
242 if (inf == info_flags::has_indices) return seq.size() > 1;
243 return inherited::info(inf);
246 bool indexed::all_index_values_are(unsigned inf) const
248 // No indices? Then no property can be fulfilled
253 return find_if(seq.begin() + 1, seq.end(),
254 [inf](const ex & e) { return !(ex_to<idx>(e).get_value().info(inf)); }) == seq.end();
257 int indexed::compare_same_type(const basic & other) const
259 GINAC_ASSERT(is_a<indexed>(other));
260 return inherited::compare_same_type(other);
263 ex indexed::eval() const
265 const ex &base = seq[0];
267 // If the base object is 0, the whole object is 0
271 // If the base object is a product, pull out the numeric factor
272 if (is_exactly_a<mul>(base) && is_exactly_a<numeric>(base.op(base.nops() - 1))) {
274 ex f = ex_to<numeric>(base.op(base.nops() - 1));
276 return f * thiscontainer(v);
279 if((typeid(*this) == typeid(indexed)) && seq.size()==1)
282 // Canonicalize indices according to the symmetry properties
283 if (seq.size() > 2) {
285 GINAC_ASSERT(is_exactly_a<symmetry>(symtree));
286 int sig = canonicalize(v.begin() + 1, ex_to<symmetry>(symtree));
287 if (sig != std::numeric_limits<int>::max()) {
288 // Something has changed while sorting indices, more evaluations later
291 return ex(sig) * thiscontainer(v);
295 // Let the class of the base object perform additional evaluations
296 return ex_to<basic>(base).eval_indexed(*this);
299 ex indexed::real_part() const
301 if(op(0).info(info_flags::real))
303 return real_part_function(*this).hold();
306 ex indexed::imag_part() const
308 if(op(0).info(info_flags::real))
310 return imag_part_function(*this).hold();
313 ex indexed::thiscontainer(const exvector & v) const
315 return indexed(ex_to<symmetry>(symtree), v);
318 ex indexed::thiscontainer(exvector && v) const
320 return indexed(ex_to<symmetry>(symtree), std::move(v));
323 unsigned indexed::return_type() const
325 if(is_a<matrix>(op(0)))
326 return return_types::commutative;
328 return op(0).return_type();
331 ex indexed::expand(unsigned options) const
333 GINAC_ASSERT(seq.size() > 0);
335 if (options & expand_options::expand_indexed) {
336 ex newbase = seq[0].expand(options);
337 if (is_exactly_a<add>(newbase)) {
339 for (size_t i=0; i<newbase.nops(); i++) {
341 s[0] = newbase.op(i);
342 sum += thiscontainer(s).expand(options);
346 if (!are_ex_trivially_equal(newbase, seq[0])) {
349 return ex_to<indexed>(thiscontainer(s)).inherited::expand(options);
352 return inherited::expand(options);
356 // virtual functions which can be overridden by derived classes
362 // non-virtual functions in this class
365 /** Check whether all indices are of class idx and validate the symmetry
366 * tree. This function is used internally to make sure that all constructed
367 * indexed objects really carry indices and not some other classes. */
368 void indexed::validate() const
370 GINAC_ASSERT(seq.size() > 0);
371 auto it = seq.begin() + 1, itend = seq.end();
372 while (it != itend) {
374 throw(std::invalid_argument("indices of indexed object must be of type idx"));
378 if (!symtree.is_zero()) {
379 if (!is_exactly_a<symmetry>(symtree))
380 throw(std::invalid_argument("symmetry of indexed object must be of type symmetry"));
381 const_cast<symmetry &>(ex_to<symmetry>(symtree)).validate(seq.size() - 1);
385 /** Implementation of ex::diff() for an indexed object always returns 0.
388 ex indexed::derivative(const symbol & s) const
397 struct idx_is_equal_ignore_dim {
398 bool operator() (const ex &lh, const ex &rh) const
404 // Replacing the dimension might cause an error (e.g. with
405 // index classes that only work in a fixed number of dimensions)
406 return lh.is_equal(ex_to<idx>(rh).replace_dim(ex_to<idx>(lh).get_dim()));
413 /** Check whether two sorted index vectors are consistent (i.e. equal). */
414 static bool indices_consistent(const exvector & v1, const exvector & v2)
416 // Number of indices must be the same
417 if (v1.size() != v2.size())
420 return equal(v1.begin(), v1.end(), v2.begin(), idx_is_equal_ignore_dim());
423 exvector indexed::get_indices() const
425 GINAC_ASSERT(seq.size() >= 1);
426 return exvector(seq.begin() + 1, seq.end());
429 exvector indexed::get_dummy_indices() const
431 exvector free_indices, dummy_indices;
432 find_free_and_dummy(seq.begin() + 1, seq.end(), free_indices, dummy_indices);
433 return dummy_indices;
436 exvector indexed::get_dummy_indices(const indexed & other) const
438 exvector indices = get_free_indices();
439 exvector other_indices = other.get_free_indices();
440 indices.insert(indices.end(), other_indices.begin(), other_indices.end());
441 exvector dummy_indices;
442 find_dummy_indices(indices, dummy_indices);
443 return dummy_indices;
446 bool indexed::has_dummy_index_for(const ex & i) const
448 auto it = seq.begin() + 1, itend = seq.end();
449 while (it != itend) {
450 if (is_dummy_pair(*it, i))
457 exvector indexed::get_free_indices() const
459 exvector free_indices, dummy_indices;
460 find_free_and_dummy(seq.begin() + 1, seq.end(), free_indices, dummy_indices);
464 exvector add::get_free_indices() const
466 exvector free_indices;
467 for (size_t i=0; i<nops(); i++) {
469 free_indices = op(i).get_free_indices();
471 exvector free_indices_of_term = op(i).get_free_indices();
472 if (!indices_consistent(free_indices, free_indices_of_term))
473 throw (std::runtime_error("add::get_free_indices: inconsistent indices in sum"));
479 exvector mul::get_free_indices() const
481 // Concatenate free indices of all factors
483 for (size_t i=0; i<nops(); i++) {
484 exvector free_indices_of_factor = op(i).get_free_indices();
485 un.insert(un.end(), free_indices_of_factor.begin(), free_indices_of_factor.end());
488 // And remove the dummy indices
489 exvector free_indices, dummy_indices;
490 find_free_and_dummy(un, free_indices, dummy_indices);
494 exvector ncmul::get_free_indices() const
496 // Concatenate free indices of all factors
498 for (size_t i=0; i<nops(); i++) {
499 exvector free_indices_of_factor = op(i).get_free_indices();
500 un.insert(un.end(), free_indices_of_factor.begin(), free_indices_of_factor.end());
503 // And remove the dummy indices
504 exvector free_indices, dummy_indices;
505 find_free_and_dummy(un, free_indices, dummy_indices);
509 struct is_summation_idx {
510 bool operator()(const ex & e)
512 return is_dummy_pair(e, e);
516 exvector integral::get_free_indices() const
518 if (a.get_free_indices().size() || b.get_free_indices().size())
519 throw (std::runtime_error("integral::get_free_indices: boundary values should not have free indices"));
520 return f.get_free_indices();
523 template<class T> size_t number_of_type(const exvector&v)
527 if (is_exactly_a<T>(it))
532 /** Rename dummy indices in an expression.
534 * @param e Expression to work on
535 * @param local_dummy_indices The set of dummy indices that appear in the
537 * @param global_dummy_indices The set of dummy indices that have appeared
538 * before and which we would like to use in "e", too. This gets updated
540 template<class T> static ex rename_dummy_indices(const ex & e, exvector & global_dummy_indices, exvector & local_dummy_indices)
542 size_t global_size = number_of_type<T>(global_dummy_indices),
543 local_size = number_of_type<T>(local_dummy_indices);
545 // Any local dummy indices at all?
549 if (global_size < local_size) {
551 // More local indices than we encountered before, add the new ones
553 size_t old_global_size = global_size;
554 int remaining = local_size - global_size;
555 auto it = local_dummy_indices.begin(), itend = local_dummy_indices.end();
556 while (it != itend && remaining > 0) {
557 if (is_exactly_a<T>(*it) &&
558 find_if(global_dummy_indices.begin(), global_dummy_indices.end(),
559 [it](const ex &lh) { return idx_is_equal_ignore_dim()(lh, *it); }) == global_dummy_indices.end()) {
560 global_dummy_indices.push_back(*it);
567 // If this is the first set of local indices, do nothing
568 if (old_global_size == 0)
571 GINAC_ASSERT(local_size <= global_size);
573 // Construct vectors of index symbols
574 exvector local_syms, global_syms;
575 local_syms.reserve(local_size);
576 global_syms.reserve(local_size);
577 for (size_t i=0; local_syms.size()!=local_size; i++)
578 if(is_exactly_a<T>(local_dummy_indices[i]))
579 local_syms.push_back(local_dummy_indices[i].op(0));
580 shaker_sort(local_syms.begin(), local_syms.end(), ex_is_less(), ex_swap());
581 for (size_t i=0; global_syms.size()!=local_size; i++) // don't use more global symbols than necessary
582 if(is_exactly_a<T>(global_dummy_indices[i]))
583 global_syms.push_back(global_dummy_indices[i].op(0));
584 shaker_sort(global_syms.begin(), global_syms.end(), ex_is_less(), ex_swap());
586 // Remove common indices
587 exvector local_uniq, global_uniq;
588 set_difference(local_syms.begin(), local_syms.end(), global_syms.begin(), global_syms.end(), std::back_insert_iterator<exvector>(local_uniq), ex_is_less());
589 set_difference(global_syms.begin(), global_syms.end(), local_syms.begin(), local_syms.end(), std::back_insert_iterator<exvector>(global_uniq), ex_is_less());
591 // Replace remaining non-common local index symbols by global ones
592 if (local_uniq.empty())
595 while (global_uniq.size() > local_uniq.size())
596 global_uniq.pop_back();
597 return e.subs(lst(local_uniq.begin(), local_uniq.end()), lst(global_uniq.begin(), global_uniq.end()), subs_options::no_pattern);
601 /** Given a set of indices, extract those of class varidx. */
602 static void find_variant_indices(const exvector & v, exvector & variant_indices)
604 exvector::const_iterator it1, itend;
605 for (it1 = v.begin(), itend = v.end(); it1 != itend; ++it1) {
606 if (is_exactly_a<varidx>(*it1))
607 variant_indices.push_back(*it1);
611 /** Raise/lower dummy indices in a single indexed objects to canonicalize their
614 * @param e Object to work on
615 * @param variant_dummy_indices The set of indices that might need repositioning (will be changed by this function)
616 * @param moved_indices The set of indices that have been repositioned (will be changed by this function)
617 * @return true if 'e' was changed */
618 bool reposition_dummy_indices(ex & e, exvector & variant_dummy_indices, exvector & moved_indices)
620 bool something_changed = false;
622 // Find dummy symbols that occur twice in the same indexed object.
623 exvector local_var_dummies;
624 local_var_dummies.reserve(e.nops()/2);
625 for (size_t i=1; i<e.nops(); ++i) {
626 if (!is_a<varidx>(e.op(i)))
628 for (size_t j=i+1; j<e.nops(); ++j) {
629 if (is_dummy_pair(e.op(i), e.op(j))) {
630 local_var_dummies.push_back(e.op(i));
631 for (auto k = variant_dummy_indices.begin(); k!=variant_dummy_indices.end(); ++k) {
632 if (e.op(i).op(0) == k->op(0)) {
633 variant_dummy_indices.erase(k);
642 // In the case where a dummy symbol occurs twice in the same indexed object
643 // we try all possibilities of raising/lowering and keep the least one in
644 // the sense of ex_is_less.
646 size_t numpossibs = 1 << local_var_dummies.size();
647 for (size_t i=0; i<numpossibs; ++i) {
649 for (size_t j=0; j<local_var_dummies.size(); ++j) {
652 ex curr_idx = local_var_dummies[j];
653 ex curr_toggle = ex_to<varidx>(curr_idx).toggle_variance();
654 m[curr_idx] = curr_toggle;
655 m[curr_toggle] = curr_idx;
657 try_e = e.subs(m, subs_options::no_pattern);
659 if(ex_is_less()(try_e, optimal_e))
661 something_changed = true;
666 if (!is_a<indexed>(e))
669 exvector seq = ex_to<indexed>(e).seq;
671 // If a dummy index is encountered for the first time in the
672 // product, pull it up, otherwise, pull it down
673 for (auto it2 = seq.begin()+1, it2end = seq.end(); it2 != it2end; ++it2) {
674 if (!is_exactly_a<varidx>(*it2))
677 exvector::iterator vit, vitend;
678 for (vit = variant_dummy_indices.begin(), vitend = variant_dummy_indices.end(); vit != vitend; ++vit) {
679 if (it2->op(0).is_equal(vit->op(0))) {
680 if (ex_to<varidx>(*it2).is_covariant()) {
682 * N.B. we don't want to use
685 * *it2 == ex_to<varidx>(*it2).toggle_variance(),
686 * ex_to<varidx>(*it2).toggle_variance() == *it2
687 * }, subs_options::no_pattern);
689 * since this can trigger non-trivial repositioning of indices,
690 * e.g. due to non-trivial symmetry properties of e, thus
691 * invalidating iterators
693 *it2 = ex_to<varidx>(*it2).toggle_variance();
694 something_changed = true;
696 moved_indices.push_back(*vit);
697 variant_dummy_indices.erase(vit);
702 for (vit = moved_indices.begin(), vitend = moved_indices.end(); vit != vitend; ++vit) {
703 if (it2->op(0).is_equal(vit->op(0))) {
704 if (ex_to<varidx>(*it2).is_contravariant()) {
705 *it2 = ex_to<varidx>(*it2).toggle_variance();
706 something_changed = true;
715 if (something_changed)
716 e = ex_to<indexed>(e).thiscontainer(seq);
718 return something_changed;
721 /* Ordering that only compares the base expressions of indexed objects. */
722 struct ex_base_is_less {
723 bool operator() (const ex &lh, const ex &rh) const
725 return (is_a<indexed>(lh) ? lh.op(0) : lh).compare(is_a<indexed>(rh) ? rh.op(0) : rh) < 0;
729 /* An auxiliary function used by simplify_indexed() and expand_dummy_sum()
730 * It returns an exvector of factors from the supplied product */
731 static void product_to_exvector(const ex & e, exvector & v, bool & non_commutative)
733 // Remember whether the product was commutative or noncommutative
734 // (because we chop it into factors and need to reassemble later)
735 non_commutative = is_exactly_a<ncmul>(e);
737 // Collect factors in an exvector, store squares twice
738 v.reserve(e.nops() * 2);
740 if (is_exactly_a<power>(e)) {
741 // We only get called for simple squares, split a^2 -> a*a
742 GINAC_ASSERT(e.op(1).is_equal(_ex2));
743 v.push_back(e.op(0));
744 v.push_back(e.op(0));
746 for (size_t i=0; i<e.nops(); i++) {
748 if (is_exactly_a<power>(f) && f.op(1).is_equal(_ex2)) {
749 v.push_back(f.op(0));
750 v.push_back(f.op(0));
751 } else if (is_exactly_a<ncmul>(f)) {
752 // Noncommutative factor found, split it as well
753 non_commutative = true; // everything becomes noncommutative, ncmul will sort out the commutative factors later
754 for (size_t j=0; j<f.nops(); j++)
755 v.push_back(f.op(j));
762 template<class T> ex idx_symmetrization(const ex& r,const exvector& local_dummy_indices)
763 { exvector dummy_syms;
764 dummy_syms.reserve(r.nops());
765 for (auto & it : local_dummy_indices)
766 if(is_exactly_a<T>(it))
767 dummy_syms.push_back(it.op(0));
768 if(dummy_syms.size() < 2)
770 ex q=symmetrize(r, dummy_syms);
774 // Forward declaration needed in absence of friend injection, C.f. [namespace.memdef]:
775 ex simplify_indexed(const ex & e, exvector & free_indices, exvector & dummy_indices, const scalar_products & sp);
777 /** Simplify product of indexed expressions (commutative, noncommutative and
778 * simple squares), return list of free indices. */
779 ex simplify_indexed_product(const ex & e, exvector & free_indices, exvector & dummy_indices, const scalar_products & sp)
781 // Collect factors in an exvector
784 // Remember whether the product was commutative or noncommutative
785 // (because we chop it into factors and need to reassemble later)
786 bool non_commutative;
787 product_to_exvector(e, v, non_commutative);
789 // Perform contractions
790 bool something_changed = false;
791 bool has_nonsymmetric = false;
792 GINAC_ASSERT(v.size() > 1);
793 exvector::iterator it1, itend = v.end(), next_to_last = itend - 1;
794 for (it1 = v.begin(); it1 != next_to_last; it1++) {
797 if (!is_a<indexed>(*it1))
800 bool first_noncommutative = (it1->return_type() != return_types::commutative);
801 bool first_nonsymmetric = ex_to<symmetry>(ex_to<indexed>(*it1).get_symmetry()).has_nonsymmetric();
803 // Indexed factor found, get free indices and look for contraction
805 exvector free1, dummy1;
806 find_free_and_dummy(ex_to<indexed>(*it1).seq.begin() + 1, ex_to<indexed>(*it1).seq.end(), free1, dummy1);
808 exvector::iterator it2;
809 for (it2 = it1 + 1; it2 != itend; it2++) {
811 if (!is_a<indexed>(*it2))
814 bool second_noncommutative = (it2->return_type() != return_types::commutative);
816 // Find free indices of second factor and merge them with free
817 // indices of first factor
819 find_free_and_dummy(ex_to<indexed>(*it2).seq.begin() + 1, ex_to<indexed>(*it2).seq.end(), un, dummy1);
820 un.insert(un.end(), free1.begin(), free1.end());
822 // Check whether the two factors share dummy indices
823 exvector free, dummy;
824 find_free_and_dummy(un, free, dummy);
825 size_t num_dummies = dummy.size();
826 if (num_dummies == 0)
829 // At least one dummy index, is it a defined scalar product?
830 bool contracted = false;
831 if (free.empty() && it1->nops()==2 && it2->nops()==2) {
833 ex dim = minimal_dim(
834 ex_to<idx>(it1->op(1)).get_dim(),
835 ex_to<idx>(it2->op(1)).get_dim()
838 // User-defined scalar product?
839 if (sp.is_defined(*it1, *it2, dim)) {
841 // Yes, substitute it
842 *it1 = sp.evaluate(*it1, *it2, dim);
844 goto contraction_done;
848 // Try to contract the first one with the second one
849 contracted = ex_to<basic>(it1->op(0)).contract_with(it1, it2, v);
852 // That didn't work; maybe the second object knows how to
853 // contract itself with the first one
854 contracted = ex_to<basic>(it2->op(0)).contract_with(it2, it1, v);
858 if (first_noncommutative || second_noncommutative
859 || is_exactly_a<add>(*it1) || is_exactly_a<add>(*it2)
860 || is_exactly_a<mul>(*it1) || is_exactly_a<mul>(*it2)
861 || is_exactly_a<ncmul>(*it1) || is_exactly_a<ncmul>(*it2)) {
863 // One of the factors became a sum or product:
864 // re-expand expression and run again
865 // Non-commutative products are always re-expanded to give
866 // eval_ncmul() the chance to re-order and canonicalize
868 bool is_a_product = (is_exactly_a<mul>(*it1) || is_exactly_a<ncmul>(*it1)) &&
869 (is_exactly_a<mul>(*it2) || is_exactly_a<ncmul>(*it2));
870 ex r = (non_commutative ? ex(ncmul(std::move(v))) : ex(mul(std::move(v))));
872 // If new expression is a product we can call this function again,
873 // otherwise we need to pass argument to simplify_indexed() to be expanded
875 return simplify_indexed_product(r, free_indices, dummy_indices, sp);
877 return simplify_indexed(r, free_indices, dummy_indices, sp);
880 // Both objects may have new indices now or they might
881 // even not be indexed objects any more, so we have to
883 something_changed = true;
886 else if (!has_nonsymmetric &&
887 (first_nonsymmetric ||
888 ex_to<symmetry>(ex_to<indexed>(*it2).get_symmetry()).has_nonsymmetric())) {
889 has_nonsymmetric = true;
894 // Find free indices (concatenate them all and call find_free_and_dummy())
895 // and all dummy indices that appear
896 exvector un, individual_dummy_indices;
897 for (it1 = v.begin(), itend = v.end(); it1 != itend; ++it1) {
898 exvector free_indices_of_factor;
899 if (is_a<indexed>(*it1)) {
900 exvector dummy_indices_of_factor;
901 find_free_and_dummy(ex_to<indexed>(*it1).seq.begin() + 1, ex_to<indexed>(*it1).seq.end(), free_indices_of_factor, dummy_indices_of_factor);
902 individual_dummy_indices.insert(individual_dummy_indices.end(), dummy_indices_of_factor.begin(), dummy_indices_of_factor.end());
904 free_indices_of_factor = it1->get_free_indices();
905 un.insert(un.end(), free_indices_of_factor.begin(), free_indices_of_factor.end());
907 exvector local_dummy_indices;
908 find_free_and_dummy(un, free_indices, local_dummy_indices);
909 local_dummy_indices.insert(local_dummy_indices.end(), individual_dummy_indices.begin(), individual_dummy_indices.end());
911 // Filter out the dummy indices with variance
912 exvector variant_dummy_indices;
913 find_variant_indices(local_dummy_indices, variant_dummy_indices);
915 // Any indices with variance present at all?
916 if (!variant_dummy_indices.empty()) {
918 // Yes, bring the product into a canonical order that only depends on
919 // the base expressions of indexed objects
920 if (!non_commutative)
921 std::sort(v.begin(), v.end(), ex_base_is_less());
923 exvector moved_indices;
925 // Iterate over all indexed objects in the product
926 for (it1 = v.begin(), itend = v.end(); it1 != itend; ++it1) {
927 if (!is_a<indexed>(*it1))
930 if (reposition_dummy_indices(*it1, variant_dummy_indices, moved_indices))
931 something_changed = true;
936 if (something_changed)
937 r = non_commutative ? ex(ncmul(std::move(v))) : ex(mul(std::move(v)));
941 // The result should be symmetric with respect to exchange of dummy
942 // indices, so if the symmetrization vanishes, the whole expression is
943 // zero. This detects things like eps.i.j.k * p.j * p.k = 0.
944 if (has_nonsymmetric) {
945 ex q = idx_symmetrization<idx>(r, local_dummy_indices);
947 free_indices.clear();
950 q = idx_symmetrization<varidx>(q, local_dummy_indices);
952 free_indices.clear();
955 q = idx_symmetrization<spinidx>(q, local_dummy_indices);
957 free_indices.clear();
962 // Dummy index renaming
963 r = rename_dummy_indices<idx>(r, dummy_indices, local_dummy_indices);
964 r = rename_dummy_indices<varidx>(r, dummy_indices, local_dummy_indices);
965 r = rename_dummy_indices<spinidx>(r, dummy_indices, local_dummy_indices);
967 // Product of indexed object with a scalar?
968 if (is_exactly_a<mul>(r) && r.nops() == 2
969 && is_exactly_a<numeric>(r.op(1)) && is_a<indexed>(r.op(0)))
970 return ex_to<basic>(r.op(0).op(0)).scalar_mul_indexed(r.op(0), ex_to<numeric>(r.op(1)));
975 /** This structure stores the original and symmetrized versions of terms
976 * obtained during the simplification of sums. */
979 terminfo(const ex & orig_, const ex & symm_) : orig(orig_), symm(symm_) {}
981 ex orig; /**< original term */
982 ex symm; /**< symmetrized term */
985 class terminfo_is_less {
987 bool operator() (const terminfo & ti1, const terminfo & ti2) const
989 return (ti1.symm.compare(ti2.symm) < 0);
993 /** This structure stores the individual symmetrized terms obtained during
994 * the simplification of sums. */
997 symminfo() : num(0) {}
999 symminfo(const ex & symmterm_, const ex & orig_, size_t num_) : orig(orig_), num(num_)
1001 if (is_exactly_a<mul>(symmterm_) && is_exactly_a<numeric>(symmterm_.op(symmterm_.nops()-1))) {
1002 coeff = symmterm_.op(symmterm_.nops()-1);
1003 symmterm = symmterm_ / coeff;
1006 symmterm = symmterm_;
1010 ex symmterm; /**< symmetrized term */
1011 ex coeff; /**< coefficient of symmetrized term */
1012 ex orig; /**< original term */
1013 size_t num; /**< how many symmetrized terms resulted from the original term */
1016 class symminfo_is_less_by_symmterm {
1018 bool operator() (const symminfo & si1, const symminfo & si2) const
1020 return (si1.symmterm.compare(si2.symmterm) < 0);
1024 class symminfo_is_less_by_orig {
1026 bool operator() (const symminfo & si1, const symminfo & si2) const
1028 return (si1.orig.compare(si2.orig) < 0);
1032 bool hasindex(const ex &x, const ex &sym)
1034 if(is_a<idx>(x) && x.op(0)==sym)
1037 for(size_t i=0; i<x.nops(); ++i)
1038 if(hasindex(x.op(i), sym))
1043 /** Simplify indexed expression, return list of free indices. */
1044 ex simplify_indexed(const ex & e, exvector & free_indices, exvector & dummy_indices, const scalar_products & sp)
1046 // Expand the expression
1047 ex e_expanded = e.expand();
1049 // Simplification of single indexed object: just find the free indices
1050 // and perform dummy index renaming/repositioning
1051 if (is_a<indexed>(e_expanded)) {
1053 // Find the dummy indices
1054 const indexed &i = ex_to<indexed>(e_expanded);
1055 exvector local_dummy_indices;
1056 find_free_and_dummy(i.seq.begin() + 1, i.seq.end(), free_indices, local_dummy_indices);
1058 // Filter out the dummy indices with variance
1059 exvector variant_dummy_indices;
1060 find_variant_indices(local_dummy_indices, variant_dummy_indices);
1062 // Any indices with variance present at all?
1063 if (!variant_dummy_indices.empty()) {
1065 // Yes, reposition them
1066 exvector moved_indices;
1067 reposition_dummy_indices(e_expanded, variant_dummy_indices, moved_indices);
1070 // Rename the dummy indices
1071 e_expanded = rename_dummy_indices<idx>(e_expanded, dummy_indices, local_dummy_indices);
1072 e_expanded = rename_dummy_indices<varidx>(e_expanded, dummy_indices, local_dummy_indices);
1073 e_expanded = rename_dummy_indices<spinidx>(e_expanded, dummy_indices, local_dummy_indices);
1077 // Simplification of sum = sum of simplifications, check consistency of
1078 // free indices in each term
1079 if (is_exactly_a<add>(e_expanded)) {
1082 free_indices.clear();
1084 for (size_t i=0; i<e_expanded.nops(); i++) {
1085 exvector free_indices_of_term;
1086 ex term = simplify_indexed(e_expanded.op(i), free_indices_of_term, dummy_indices, sp);
1087 if (!term.is_zero()) {
1089 free_indices = free_indices_of_term;
1093 if (!indices_consistent(free_indices, free_indices_of_term)) {
1094 std::ostringstream s;
1095 s << "simplify_indexed: inconsistent indices in sum: ";
1096 s << exprseq(free_indices) << " vs. " << exprseq(free_indices_of_term);
1097 throw (std::runtime_error(s.str()));
1099 if (is_a<indexed>(sum) && is_a<indexed>(term))
1100 sum = ex_to<basic>(sum.op(0)).add_indexed(sum, term);
1107 // If the sum turns out to be zero, we are finished
1108 if (sum.is_zero()) {
1109 free_indices.clear();
1113 // More than one term and more than one dummy index?
1114 size_t num_terms_orig = (is_exactly_a<add>(sum) ? sum.nops() : 1);
1115 if (num_terms_orig < 2 || dummy_indices.size() < 2)
1118 // Chop the sum into terms and symmetrize each one over the dummy
1120 std::vector<terminfo> terms;
1121 for (size_t i=0; i<sum.nops(); i++) {
1122 const ex & term = sum.op(i);
1123 exvector dummy_indices_of_term;
1124 dummy_indices_of_term.reserve(dummy_indices.size());
1125 for (auto & i : dummy_indices)
1126 if (hasindex(term,i.op(0)))
1127 dummy_indices_of_term.push_back(i);
1128 ex term_symm = idx_symmetrization<idx>(term, dummy_indices_of_term);
1129 term_symm = idx_symmetrization<varidx>(term_symm, dummy_indices_of_term);
1130 term_symm = idx_symmetrization<spinidx>(term_symm, dummy_indices_of_term);
1131 if (term_symm.is_zero())
1133 terms.push_back(terminfo(term, term_symm));
1136 // Sort by symmetrized terms
1137 std::sort(terms.begin(), terms.end(), terminfo_is_less());
1139 // Combine equal symmetrized terms
1140 std::vector<terminfo> terms_pass2;
1141 for (std::vector<terminfo>::const_iterator i=terms.begin(); i!=terms.end(); ) {
1144 while (j != terms.end() && j->symm == i->symm) {
1148 terms_pass2.push_back(terminfo(i->orig * num, i->symm * num));
1152 // If there is only one term left, we are finished
1153 if (terms_pass2.size() == 1)
1154 return terms_pass2[0].orig;
1156 // Chop the symmetrized terms into subterms
1157 std::vector<symminfo> sy;
1158 for (auto & i : terms_pass2) {
1159 if (is_exactly_a<add>(i.symm)) {
1160 size_t num = i.symm.nops();
1161 for (size_t j=0; j<num; j++)
1162 sy.push_back(symminfo(i.symm.op(j), i.orig, num));
1164 sy.push_back(symminfo(i.symm, i.orig, 1));
1167 // Sort by symmetrized subterms
1168 std::sort(sy.begin(), sy.end(), symminfo_is_less_by_symmterm());
1170 // Combine equal symmetrized subterms
1171 std::vector<symminfo> sy_pass2;
1173 for (auto i=sy.begin(); i!=sy.end(); ) {
1175 // Combine equal terms
1177 if (j != sy.end() && j->symmterm == i->symmterm) {
1179 // More than one term, collect the coefficients
1180 ex coeff = i->coeff;
1181 while (j != sy.end() && j->symmterm == i->symmterm) {
1186 // Add combined term to result
1187 if (!coeff.is_zero())
1188 result.push_back(coeff * i->symmterm);
1192 // Single term, store for second pass
1193 sy_pass2.push_back(*i);
1199 // Were there any remaining terms that didn't get combined?
1200 if (sy_pass2.size() > 0) {
1202 // Yes, sort by their original terms
1203 std::sort(sy_pass2.begin(), sy_pass2.end(), symminfo_is_less_by_orig());
1205 for (std::vector<symminfo>::const_iterator i=sy_pass2.begin(); i!=sy_pass2.end(); ) {
1207 // How many symmetrized terms of this original term are left?
1210 while (j != sy_pass2.end() && j->orig == i->orig) {
1215 if (num == i->num) {
1217 // All terms left, then add the original term to the result
1218 result.push_back(i->orig);
1222 // Some terms were combined with others, add up the remaining symmetrized terms
1223 std::vector<symminfo>::const_iterator k;
1224 for (k=i; k!=j; k++)
1225 result.push_back(k->coeff * k->symmterm);
1232 // Add all resulting terms
1233 ex sum_symm = dynallocate<add>(result);
1234 if (sum_symm.is_zero())
1235 free_indices.clear();
1239 // Simplification of products
1240 if (is_exactly_a<mul>(e_expanded)
1241 || is_exactly_a<ncmul>(e_expanded)
1242 || (is_exactly_a<power>(e_expanded) && is_a<indexed>(e_expanded.op(0)) && e_expanded.op(1).is_equal(_ex2)))
1243 return simplify_indexed_product(e_expanded, free_indices, dummy_indices, sp);
1245 // Cannot do anything
1246 free_indices.clear();
1250 /** Simplify/canonicalize expression containing indexed objects. This
1251 * performs contraction of dummy indices where possible and checks whether
1252 * the free indices in sums are consistent.
1254 * @param options Simplification options (currently unused)
1255 * @return simplified expression */
1256 ex ex::simplify_indexed(unsigned options) const
1258 exvector free_indices, dummy_indices;
1260 return GiNaC::simplify_indexed(*this, free_indices, dummy_indices, sp);
1263 /** Simplify/canonicalize expression containing indexed objects. This
1264 * performs contraction of dummy indices where possible, checks whether
1265 * the free indices in sums are consistent, and automatically replaces
1266 * scalar products by known values if desired.
1268 * @param sp Scalar products to be replaced automatically
1269 * @param options Simplification options (currently unused)
1270 * @return simplified expression */
1271 ex ex::simplify_indexed(const scalar_products & sp, unsigned options) const
1273 exvector free_indices, dummy_indices;
1274 return GiNaC::simplify_indexed(*this, free_indices, dummy_indices, sp);
1277 /** Symmetrize expression over its free indices. */
1278 ex ex::symmetrize() const
1280 return GiNaC::symmetrize(*this, get_free_indices());
1283 /** Antisymmetrize expression over its free indices. */
1284 ex ex::antisymmetrize() const
1286 return GiNaC::antisymmetrize(*this, get_free_indices());
1289 /** Symmetrize expression by cyclic permutation over its free indices. */
1290 ex ex::symmetrize_cyclic() const
1292 return GiNaC::symmetrize_cyclic(*this, get_free_indices());
1299 spmapkey::spmapkey(const ex & v1_, const ex & v2_, const ex & dim_) : dim(dim_)
1301 // If indexed, extract base objects
1302 ex s1 = is_a<indexed>(v1_) ? v1_.op(0) : v1_;
1303 ex s2 = is_a<indexed>(v2_) ? v2_.op(0) : v2_;
1305 // Enforce canonical order in pair
1306 if (s1.compare(s2) > 0) {
1315 bool spmapkey::operator==(const spmapkey &other) const
1317 if (!v1.is_equal(other.v1))
1319 if (!v2.is_equal(other.v2))
1321 if (is_a<wildcard>(dim) || is_a<wildcard>(other.dim))
1324 return dim.is_equal(other.dim);
1327 bool spmapkey::operator<(const spmapkey &other) const
1329 int cmp = v1.compare(other.v1);
1332 cmp = v2.compare(other.v2);
1336 // Objects are equal, now check dimensions
1337 if (is_a<wildcard>(dim) || is_a<wildcard>(other.dim))
1340 return dim.compare(other.dim) < 0;
1343 void spmapkey::debugprint() const
1345 std::cerr << "(" << v1 << "," << v2 << "," << dim << ")";
1348 void scalar_products::add(const ex & v1, const ex & v2, const ex & sp)
1350 spm[spmapkey(v1, v2)] = sp;
1353 void scalar_products::add(const ex & v1, const ex & v2, const ex & dim, const ex & sp)
1355 spm[spmapkey(v1, v2, dim)] = sp;
1358 void scalar_products::add_vectors(const lst & l, const ex & dim)
1360 // Add all possible pairs of products
1361 for (auto & it1 : l)
1362 for (auto & it2 : l)
1363 add(it1, it2, it1 * it2);
1366 void scalar_products::clear()
1371 /** Check whether scalar product pair is defined. */
1372 bool scalar_products::is_defined(const ex & v1, const ex & v2, const ex & dim) const
1374 return spm.find(spmapkey(v1, v2, dim)) != spm.end();
1377 /** Return value of defined scalar product pair. */
1378 ex scalar_products::evaluate(const ex & v1, const ex & v2, const ex & dim) const
1380 return spm.find(spmapkey(v1, v2, dim))->second;
1383 void scalar_products::debugprint() const
1385 std::cerr << "map size=" << spm.size() << std::endl;
1386 for (auto & it : spm) {
1387 const spmapkey & k = it.first;
1388 std::cerr << "item key=";
1390 std::cerr << ", value=" << it.second << std::endl;
1394 exvector get_all_dummy_indices_safely(const ex & e)
1396 if (is_a<indexed>(e))
1397 return ex_to<indexed>(e).get_dummy_indices();
1398 else if (is_a<power>(e) && e.op(1)==2) {
1399 return e.op(0).get_free_indices();
1401 else if (is_a<mul>(e) || is_a<ncmul>(e)) {
1403 exvector free_indices;
1404 for (std::size_t i = 0; i < e.nops(); ++i) {
1405 exvector dummies_of_factor = get_all_dummy_indices_safely(e.op(i));
1406 dummies.insert(dummies.end(), dummies_of_factor.begin(),
1407 dummies_of_factor.end());
1408 exvector free_of_factor = e.op(i).get_free_indices();
1409 free_indices.insert(free_indices.begin(), free_of_factor.begin(),
1410 free_of_factor.end());
1412 exvector free_out, dummy_out;
1413 find_free_and_dummy(free_indices.begin(), free_indices.end(), free_out,
1415 dummies.insert(dummies.end(), dummy_out.begin(), dummy_out.end());
1418 else if(is_a<add>(e)) {
1420 for(std::size_t i = 0; i < e.nops(); ++i) {
1421 exvector dummies_of_term = get_all_dummy_indices_safely(e.op(i));
1422 sort(dummies_of_term.begin(), dummies_of_term.end());
1424 set_union(result.begin(), result.end(), dummies_of_term.begin(),
1425 dummies_of_term.end(), std::back_inserter<exvector>(new_vec),
1427 result.swap(new_vec);
1434 /** Returns all dummy indices from the exvector */
1435 exvector get_all_dummy_indices(const ex & e)
1439 product_to_exvector(e, p, nc);
1440 auto ip = p.begin(), ipend = p.end();
1442 while (ip != ipend) {
1443 if (is_a<indexed>(*ip)) {
1444 v1 = ex_to<indexed>(*ip).get_dummy_indices();
1445 v.insert(v.end(), v1.begin(), v1.end());
1447 while (ip1 != ipend) {
1448 if (is_a<indexed>(*ip1)) {
1449 v1 = ex_to<indexed>(*ip).get_dummy_indices(ex_to<indexed>(*ip1));
1450 v.insert(v.end(), v1.begin(), v1.end());
1460 lst rename_dummy_indices_uniquely(const exvector & va, const exvector & vb)
1462 exvector common_indices;
1463 set_intersection(va.begin(), va.end(), vb.begin(), vb.end(), std::back_insert_iterator<exvector>(common_indices), ex_is_less());
1464 if (common_indices.empty()) {
1465 return lst{lst{}, lst{}};
1467 exvector new_indices, old_indices;
1468 old_indices.reserve(2*common_indices.size());
1469 new_indices.reserve(2*common_indices.size());
1470 exvector::const_iterator ip = common_indices.begin(), ipend = common_indices.end();
1471 while (ip != ipend) {
1472 ex newsym = dynallocate<symbol>();
1474 if(is_exactly_a<spinidx>(*ip))
1475 newidx = dynallocate<spinidx>(newsym, ex_to<spinidx>(*ip).get_dim(),
1476 ex_to<spinidx>(*ip).is_covariant(),
1477 ex_to<spinidx>(*ip).is_dotted());
1478 else if (is_exactly_a<varidx>(*ip))
1479 newidx = dynallocate<varidx>(newsym, ex_to<varidx>(*ip).get_dim(),
1480 ex_to<varidx>(*ip).is_covariant());
1482 newidx = dynallocate<idx>(newsym, ex_to<idx>(*ip).get_dim());
1483 old_indices.push_back(*ip);
1484 new_indices.push_back(newidx);
1485 if(is_a<varidx>(*ip)) {
1486 old_indices.push_back(ex_to<varidx>(*ip).toggle_variance());
1487 new_indices.push_back(ex_to<varidx>(newidx).toggle_variance());
1491 return lst{lst(old_indices.begin(), old_indices.end()), lst(new_indices.begin(), new_indices.end())};
1495 ex rename_dummy_indices_uniquely(const exvector & va, const exvector & vb, const ex & b)
1497 lst indices_subs = rename_dummy_indices_uniquely(va, vb);
1498 return (indices_subs.op(0).nops()>0 ? b.subs(ex_to<lst>(indices_subs.op(0)), ex_to<lst>(indices_subs.op(1)), subs_options::no_pattern|subs_options::no_index_renaming) : b);
1501 ex rename_dummy_indices_uniquely(const ex & a, const ex & b)
1503 exvector va = get_all_dummy_indices_safely(a);
1504 if (va.size() > 0) {
1505 exvector vb = get_all_dummy_indices_safely(b);
1506 if (vb.size() > 0) {
1507 sort(va.begin(), va.end(), ex_is_less());
1508 sort(vb.begin(), vb.end(), ex_is_less());
1509 lst indices_subs = rename_dummy_indices_uniquely(va, vb);
1510 if (indices_subs.op(0).nops() > 0)
1511 return b.subs(ex_to<lst>(indices_subs.op(0)), ex_to<lst>(indices_subs.op(1)), subs_options::no_pattern|subs_options::no_index_renaming);
1517 ex rename_dummy_indices_uniquely(exvector & va, const ex & b, bool modify_va)
1519 if (va.size() > 0) {
1520 exvector vb = get_all_dummy_indices_safely(b);
1521 if (vb.size() > 0) {
1522 sort(vb.begin(), vb.end(), ex_is_less());
1523 lst indices_subs = rename_dummy_indices_uniquely(va, vb);
1524 if (indices_subs.op(0).nops() > 0) {
1526 for (auto & i : ex_to<lst>(indices_subs.op(1)))
1528 exvector uncommon_indices;
1529 set_difference(vb.begin(), vb.end(), indices_subs.op(0).begin(), indices_subs.op(0).end(), std::back_insert_iterator<exvector>(uncommon_indices), ex_is_less());
1530 for (auto & ip : uncommon_indices)
1532 sort(va.begin(), va.end(), ex_is_less());
1534 return b.subs(ex_to<lst>(indices_subs.op(0)), ex_to<lst>(indices_subs.op(1)), subs_options::no_pattern|subs_options::no_index_renaming);
1541 ex expand_dummy_sum(const ex & e, bool subs_idx)
1543 ex e_expanded = e.expand();
1544 pointer_to_map_function_1arg<bool> fcn(expand_dummy_sum, subs_idx);
1545 if (is_a<add>(e_expanded) || is_a<lst>(e_expanded) || is_a<matrix>(e_expanded)) {
1546 return e_expanded.map(fcn);
1547 } else if (is_a<ncmul>(e_expanded) || is_a<mul>(e_expanded) || is_a<power>(e_expanded) || is_a<indexed>(e_expanded)) {
1549 if (is_a<indexed>(e_expanded))
1550 v = ex_to<indexed>(e_expanded).get_dummy_indices();
1552 v = get_all_dummy_indices(e_expanded);
1553 ex result = e_expanded;
1554 for (const auto & nu : v) {
1555 if (ex_to<idx>(nu).get_dim().info(info_flags::nonnegint)) {
1556 int idim = ex_to<numeric>(ex_to<idx>(nu).get_dim()).to_int();
1558 for (int i=0; i < idim; i++) {
1559 if (subs_idx && is_a<varidx>(nu)) {
1560 ex other = ex_to<varidx>(nu).toggle_variance();
1561 en += result.subs(lst{
1563 other == idx(i, idim)
1566 en += result.subs( nu.op(0) == i );
1578 } // namespace GiNaC