[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Getfem-commits] (no subject)
From: |
Yves Renard |
Subject: |
[Getfem-commits] (no subject) |
Date: |
Sun, 6 May 2018 14:55:20 -0400 (EDT) |
branch: devel-yves-generic-assembly-modifs
commit 203047f6d3c94f74cc83eb2063f68992c65b47c1
Author: Yves Renard <address@hidden>
Date: Sun May 6 20:25:27 2018 +0200
extend transpose operation a bit
---
doc/sphinx/source/userdoc/gasm_high.rst | 2 +-
src/getfem_generic_assembly_compile_and_exec.cc | 71 ++++++++++++++++++++-----
src/getfem_generic_assembly_semantic.cc | 47 ++++++++++------
3 files changed, 90 insertions(+), 30 deletions(-)
diff --git a/doc/sphinx/source/userdoc/gasm_high.rst
b/doc/sphinx/source/userdoc/gasm_high.rst
index c879f7b..cd3fb2f 100644
--- a/doc/sphinx/source/userdoc/gasm_high.rst
+++ b/doc/sphinx/source/userdoc/gasm_high.rst
@@ -480,7 +480,7 @@ Unary operators
- ``-`` the unary minus operator: change the sign of an expression.
- - ``'`` stands for the transpose of a matrix or line view of a vector.
+ - ``'`` stands for the transpose of a matrix or line view of a vector. It a
tensor ``A`` is of order greater than two,``A'`` denotes the inversion of the
two first indices.
- ``Contract(A, i, j)`` stands for the contraction of tensor A with respect
to its ith and jth indices. The first index is numbered 1. For instance,
``Contract(A, 1, 2)`` is equivalent to ``Trace(A)`` for a matrix ``A``.
diff --git a/src/getfem_generic_assembly_compile_and_exec.cc
b/src/getfem_generic_assembly_compile_and_exec.cc
index af7d52a..68d7548 100644
--- a/src/getfem_generic_assembly_compile_and_exec.cc
+++ b/src/getfem_generic_assembly_compile_and_exec.cc
@@ -1741,22 +1741,56 @@ namespace getfem {
struct ga_instruction_transpose : public ga_instruction {
base_tensor &t;
const base_tensor &tc1;
+ size_type n1, n2, nn;
virtual int exec() {
GA_DEBUG_INFO("Instruction: transpose");
GA_DEBUG_ASSERT(t.size() == tc1.size(), "Wrong sizes");
- size_type order = t.sizes().size();
- size_type s1 = t.sizes()[order-2], s2 = t.sizes()[order-1];
- size_type s = t.size() / (s1*s2);
- for (size_type i = 0; i < s1; ++i)
- for (size_type j = 0; j < s2; ++j) {
- base_tensor::iterator it = t.begin() + s*(i + s1*j);
- base_tensor::const_iterator it1 = tc1.begin() + s*(j + s2*i);
- for (size_type k = 0; k < s; ++k) *it++ = *it1++;
- }
+
+ size_type n0 = tc1.size() / (n1*n2*nn);
+ auto it = t.begin();
+ for (size_type i = 0; i < nn; ++i) {
+ size_type s1 = i*n1*n2*n0;
+ for (size_type j = 0; j < n1; ++j) {
+ size_type s2 = s1 + j*n0;
+ for (size_type k = 0; k < n2; ++k) {
+ size_type s3 = s2 + k*n1*n0;
+ for (size_type l = 0; l < n0; ++l, ++it)
+ *it = tc1[s3+l];
+ }
+ }
+ }
+ GA_DEBUG_ASSERT(it == t.end(), "Wrong sizes");
return 0;
}
- ga_instruction_transpose(base_tensor &t_, const base_tensor &tc1_)
- : t(t_), tc1(tc1_) {}
+ ga_instruction_transpose(base_tensor &t_, const base_tensor &tc1_,
+ size_type n1_, size_type n2_, size_type nn_)
+ : t(t_), tc1(tc1_), n1(n1_), n2(n2_), nn(nn_) {}
+ };
+
+ struct ga_instruction_transpose_no_test : public ga_instruction {
+ base_tensor &t;
+ const base_tensor &tc1;
+ size_type n1, n2, nn;
+ virtual int exec() {
+ GA_DEBUG_INFO("Instruction: transpose");
+ GA_DEBUG_ASSERT(t.size() == tc1.size(), "Wrong sizes");
+
+ auto it = t.begin();
+ for (size_type i = 0; i < nn; ++i) {
+ size_type s1 = i*n1*n2;
+ for (size_type j = 0; j < n1; ++j) {
+ size_type s2 = s1 + j;
+ for (size_type k = 0; k < n2; ++k, ++it)
+ *it = tc1[s2 + k*n1];
+ }
+ }
+ GA_DEBUG_ASSERT(it == t.end(), "Wrong sizes");
+ return 0;
+ }
+ ga_instruction_transpose_no_test(base_tensor &t_, const base_tensor &tc1_,
+ size_type n1_, size_type n2_,
+ size_type nn_)
+ : t(t_), tc1(tc1_), n1(n1_), n2(n2_), nn(nn_) {}
};
struct ga_instruction_transpose_test : public ga_instruction {
@@ -5719,9 +5753,18 @@ namespace getfem {
break;
case GA_QUOTE:
- if (pnode->tensor_proper_size() != 1) {
- pgai = std::make_shared<ga_instruction_transpose>
- (pnode->tensor(), child0->tensor());
+ if (pnode->tensor_proper_size() > 1) {
+ size_type n1 = child0->tensor_proper_size(0);
+ size_type n2 = child0->tensor_proper_size(1);
+ size_type nn = 1;
+ for (size_type i = 2; i < child0->tensor_order(); ++i)
+ nn *= child0->tensor_proper_size(i);
+ if (child0->nb_test_functions() == 0)
+ pgai = std::make_shared<ga_instruction_transpose_no_test>
+ (pnode->tensor(), child0->tensor(), n1, n2, nn);
+ else
+ pgai = std::make_shared<ga_instruction_transpose>
+ (pnode->tensor(), child0->tensor(), n1, n2, nn);
rmi.instructions.push_back(std::move(pgai));
} else {
pnode->t.set_to_copy(child0->t);
diff --git a/src/getfem_generic_assembly_semantic.cc
b/src/getfem_generic_assembly_semantic.cc
index 07340cb..431655f 100644
--- a/src/getfem_generic_assembly_semantic.cc
+++ b/src/getfem_generic_assembly_semantic.cc
@@ -819,8 +819,11 @@ namespace getfem {
mi = size0;
if (child0->tensor_proper_size() == 1)
{ tree.replace_node_by_child(pnode, 0); pnode = child0; break; }
- else if (dim0 == 2) std::swap(mi.back(), mi[size0.size()-2]);
- else { size_type N = mi.back(); mi.back() = 1; mi.push_back(N); }
+ else if (dim0 == 1)
+ { size_type N = mi.back(); mi.back() = 1; mi.push_back(N); }
+ else std::swap(mi[child0->nb_test_functions()],
+ mi[child0->nb_test_functions()+1]);
+
pnode->t.adjust_sizes(mi);
pnode->test_function_type = child0->test_function_type;
@@ -830,22 +833,29 @@ namespace getfem {
pnode->interpolate_name_test2 = child0->interpolate_name_test2;
pnode->qdim1 = child0->qdim1;
pnode->qdim2 = child0->qdim2;
- if (all_cte) {
+ if (child0->node_type == GA_NODE_ZERO) {
+ pnode->node_type = GA_NODE_ZERO;
+ gmm::clear(pnode->tensor().as_vector());
+ tree.clear_children(pnode);
+ } else if (all_cte) {
pnode->node_type = GA_NODE_CONSTANT;
pnode->test_function_type = 0;
- if (dim0 == 2) {
- for (size_type i = 0; i < mi.back(); ++i)
- for (size_type j = 0; j < mi[size0.size()-2]; ++j)
- pnode->tensor()(j, i) = child0->tensor()(i,j);
- } else if (dim0 == 1) {
- for (size_type i = 0; i < mi.back(); ++i)
+
+ if (dim0 == 1) {
+ for (size_type i = 0; i < mi.back(); ++i)
pnode->tensor()(0, i) = child0->tensor()[i];
+ } else {
+ size_type n1 = child0->tensor_proper_size(0);
+ size_type n2 = child0->tensor_proper_size(1);
+ size_type nn = child0->tensor().size()/(n1*n2);
+ auto it = pnode->tensor().begin();
+ for (size_type i = 0; i < nn; ++i)
+ for (size_type j = 0; j < n1; ++j)
+ for (size_type k = 0; k < n2; ++k, ++it)
+ *it = child0->tensor()[j+k*n1+i*n1*n2];
+ GA_DEBUG_ASSERT(it == pnode->tensor().end(), "Wrong sizes");
}
tree.clear_children(pnode);
- } else if (child0->node_type == GA_NODE_ZERO) {
- pnode->node_type = GA_NODE_ZERO;
- gmm::clear(pnode->tensor().as_vector());
- tree.clear_children(pnode);
}
break;
@@ -3628,8 +3638,15 @@ namespace getfem {
ga_node_grad(tree, workspace, m, child0);
break;
-
-
+ // case GA_QUOTE:
+ // Si vecteur alors reshape(grad argument)
+ // Si matrice, alors inversion premier indice du gradient de l'argument
+ // .. il faut donc �tendre le quote aux indices multiples ...
+ // ga_node_grad(tree, workspace, m, child0);
+
+ // case GA_SYM: remplacer par (T+T')/2 et appel recursif (avec marked
+ // mis � jour sur les fils)
+
#ifdef continue_here
case GA_QUOTE: case GA_SYM: case GA_SKEW: