taalhaataahir01022001
taalhaataahir01022001
MModular
Created by TomLucidor on 11/15/2024 in #questions
Has anyone tried to get an LLM to code in Mojo, or convert Python into Mojo?
https://github.com/tairov/llama2.mojo There are also few examples on modular github repo via MAX: https://github.com/modularml/max/tree/main/examples
3 replies
MModular
Created by taalhaataahir01022001 on 10/8/2024 in #questions
Reshape MAX API OP giving error
And it's not the case with reshape only. e.g., this program uses split op:
fn main() raises:
seed(43)
var session = engine.InferenceSession()
var t = Tensor[DType.float32].randn((6, 4))
print("t:\n", t)

var splitting = Graph(in_types=List[Type](TensorType(DType.float32, 6,4)))
var s = ops.split[2](splitting[0], ((2, 4)))
var o = List[Symbol] ()
o.append(s[0])
o.append(s[1])
splitting.output(o)
splitting.verify()
var splt = session.load(splitting)

results = splt.execute("input0", t)
var output = results.get[DType.float32]("output0")
print("output:\n",output)

var output1 = results.get[DType.float32] ("output1")
print("output1:\n", output1)
fn main() raises:
seed(43)
var session = engine.InferenceSession()
var t = Tensor[DType.float32].randn((6, 4))
print("t:\n", t)

var splitting = Graph(in_types=List[Type](TensorType(DType.float32, 6,4)))
var s = ops.split[2](splitting[0], ((2, 4)))
var o = List[Symbol] ()
o.append(s[0])
o.append(s[1])
splitting.output(o)
splitting.verify()
var splt = session.load(splitting)

results = splt.execute("input0", t)
var output = results.get[DType.float32]("output0")
print("output:\n",output)

var output1 = results.get[DType.float32] ("output1")
print("output1:\n", output1)
And this is the output log:
t:
Tensor([[1.2788280248641968, -1.2873831987380981, -0.56482106447219849, 1.5331770181655884],
[0.80901926755905151, -0.24991747736930847, -0.8085181713104248, 0.42681333422660828],
[1.8432825803756714, -0.60748469829559326, 0.24848183989524841, -0.3757627010345459],
[1.6167833805084229, 1.0938529968261719, 0.35813808441162109, 0.071096442639827728],
[1.5071860551834106, -0.54008448123931885, -0.18514452874660492, 0.30583590269088745],
[0.34934023022651672, -1.6976139545440674, -1.2638821601867676, -0.97573941946029663]], dtype=float32, shape=6x4)
output:
Tensor([[2.2701208643344754e+37, 4.2833490159016683e-41, -0.56482106447219849, 1.5331770181655884],
[0.80901926755905151, -0.24991747736930847, -0.8085181713104248, 0.42681333422660828]], dtype=float32, shape=2x4)
output1:
Tensor([[1.8432825803756714, -0.60748469829559326, 0.24848183989524841, -0.3757627010345459],
[1.6167833805084229, 1.0938529968261719, 0.35813808441162109, 0.071096442639827728],
[1.5071860551834106, -0.54008448123931885, -0.18514452874660492, 0.30583590269088745],
[0.34934023022651672, -1.6976139545440674, -1.2638821601867676, -0.97573941946029663]], dtype=float32, shape=4x4)
t:
Tensor([[1.2788280248641968, -1.2873831987380981, -0.56482106447219849, 1.5331770181655884],
[0.80901926755905151, -0.24991747736930847, -0.8085181713104248, 0.42681333422660828],
[1.8432825803756714, -0.60748469829559326, 0.24848183989524841, -0.3757627010345459],
[1.6167833805084229, 1.0938529968261719, 0.35813808441162109, 0.071096442639827728],
[1.5071860551834106, -0.54008448123931885, -0.18514452874660492, 0.30583590269088745],
[0.34934023022651672, -1.6976139545440674, -1.2638821601867676, -0.97573941946029663]], dtype=float32, shape=6x4)
output:
Tensor([[2.2701208643344754e+37, 4.2833490159016683e-41, -0.56482106447219849, 1.5331770181655884],
[0.80901926755905151, -0.24991747736930847, -0.8085181713104248, 0.42681333422660828]], dtype=float32, shape=2x4)
output1:
Tensor([[1.8432825803756714, -0.60748469829559326, 0.24848183989524841, -0.3757627010345459],
[1.6167833805084229, 1.0938529968261719, 0.35813808441162109, 0.071096442639827728],
[1.5071860551834106, -0.54008448123931885, -0.18514452874660492, 0.30583590269088745],
[0.34934023022651672, -1.6976139545440674, -1.2638821601867676, -0.97573941946029663]], dtype=float32, shape=4x4)
It is also extracting the wrong first 2 outputs in row 1.
4 replies
MModular
Created by taalhaataahir01022001 on 10/8/2024 in #questions
Reshape MAX API OP giving error
Sorry my bad!! Didn't make the correct imports Also I'm facing some issues with the code. ops.reshapes is showing unexpected behaviour. Here's my code:
from max.graph.type import TensorType, Dim, Type
from random import seed
from max import engine
from max.tensor import Tensor
from max.graph import Graph, ops

fn main() raises:
seed(43)
var session = engine.InferenceSession()
var t = Tensor[DType.float32].randn((1, 2, 2))
print("t:\n", t)
dim1 = Dim(2)
dim2 = Dim(2)
x = List[Dim]()
x.append(dim1)
x.append(dim2)

var graph0 = Graph(in_types=List[Type](TensorType(DType.float32, 1, 2, 2)))
var r = ops.reshape(graph0[0], x)
graph0.output(r)
graph0.verify()
var rs = session.load(graph0)
results = rs.execute("input0", t)
var output = results.get[DType.float32]("output0")
print("output:\n",output)
from max.graph.type import TensorType, Dim, Type
from random import seed
from max import engine
from max.tensor import Tensor
from max.graph import Graph, ops

fn main() raises:
seed(43)
var session = engine.InferenceSession()
var t = Tensor[DType.float32].randn((1, 2, 2))
print("t:\n", t)
dim1 = Dim(2)
dim2 = Dim(2)
x = List[Dim]()
x.append(dim1)
x.append(dim2)

var graph0 = Graph(in_types=List[Type](TensorType(DType.float32, 1, 2, 2)))
var r = ops.reshape(graph0[0], x)
graph0.output(r)
graph0.verify()
var rs = session.load(graph0)
results = rs.execute("input0", t)
var output = results.get[DType.float32]("output0")
print("output:\n",output)
Output: t: Tensor([[[1.2788280248641968, -1.2873831987380981], [-0.56482106447219849, 1.5331770181655884]]], dtype=float32, shape=1x2x2) output: Tensor([[0.0, 0.0], [-0.56482106447219849, 1.5331770181655884]], dtype=float32, shape=2x2) But if I add the following code at the end of my code:
print("t.reshape((2,2)):\n", t.reshape((2,2)))
print("t.reshape((2,2)):\n", t.reshape((2,2)))
The output becomes correct: t: Tensor([[[1.2788280248641968, -1.2873831987380981], [-0.56482106447219849, 1.5331770181655884]]], dtype=float32, shape=1x2x2) output: Tensor([[1.2788280248641968, -1.2873831987380981], [-0.56482106447219849, 1.5331770181655884]], dtype=float32, shape=2x2) t.reshape((2,2)): Tensor([[1.2788280248641968, -1.2873831987380981], [-0.56482106447219849, 1.5331770181655884]], dtype=float32, shape=2x2)
4 replies
MModular
Created by taalhaataahir01022001 on 8/29/2024 in #questions
Dynamic Shapes to MAX GRAPH API
Now getting same error issue with Layer norm i.e.,
var graph2 = Graph(in_types=List[Type](TensorType(DType.float32, "a", "b", "c"),TensorType(DType.float32, "c"), TensorType(DType.float32, "c")))
var graph2 = Graph(in_types=List[Type](TensorType(DType.float32, "a", "b", "c"),TensorType(DType.float32, "c"), TensorType(DType.float32, "c")))
gives error Unhandled exception caught during execution: Unsupported dim type: symbolic expression Previously it was working fine!!
3 replies
MModular
Created by JulianJS on 7/22/2024 in #questions
Matrix Multiplication (matmul): `numpy` hard to beat? even by mojo?
4 replies
MModular
Created by taalhaataahir01022001 on 6/4/2024 in #questions
Time taken in Inference session
I'm new and sorry for asking such basic questions. 1. I am compiling my code using mojo i.e.,
mojo build matmul.mojo
mojo build matmul.mojo
and then running the executable. Does this mean that the compilation of the graph using the MAX Engine compiler occurs at runtime and that optimizations on the graph are also performed during runtime? 2. During execution, does the Mojo compiler invoke the MAX Engine compiler to compile and optimize the graph? 3. I have written my entire model in Mojo. Now, I am planning to break down my model into custom operations in MAX Graph. Will this provide performance benefits since the MAX Engine compiler might further optimize my custom operations written in Mojo?
4 replies
MModular
Created by taalhaataahir01022001 on 5/21/2024 in #questions
x86 Intrinsics
Thanks working now
5 replies
MModular
Created by taalhaataahir01022001 on 3/12/2024 in #questions
GRAPH API MOTensor not working
got it! never imported it 😛
from max.graph import MOTensor
from max.graph import MOTensor
2 replies
MModular
Created by taalhaataahir01022001 on 1/14/2024 in #questions
Create 1D array of objects
Plus it is not only the case when I update the Dynamic Vector of objects but it also creates new objects using the copy constructor even if I try to access any specific index. e.g.,
print(arg[0].sz)
print(arg[0].sz)
or
if arg[1].sz == 0
if arg[1].sz == 0
also invokes the copy constructors. And it has nothing to do with weather the vector is passed to a function via reference or not. Even If I access the vector of objects in the same scope where it's created, copy constructor is called. I tried to compare it with the C++ vectors to see if Dynamic Vectors are equivalent to vectors in C++:
class MyClass {
...
...
}
void printData(std::vector<MyClass>& myObjects) {
for (int i =0; i<3; i++) {
printf("%d\n",myObjects[i].getData());
}
}
int main() {
// Create a vector of objects of MyClass
std::vector<MyClass> myObjects;
myObjects.push_back(MyClass(1));
myObjects.push_back(MyClass(2));
myObjects.push_back(MyClass(3));
printf("%d\n", myObjects[0].getData())
printData(myObjects);
}
class MyClass {
...
...
}
void printData(std::vector<MyClass>& myObjects) {
for (int i =0; i<3; i++) {
printf("%d\n",myObjects[i].getData());
}
}
int main() {
// Create a vector of objects of MyClass
std::vector<MyClass> myObjects;
myObjects.push_back(MyClass(1));
myObjects.push_back(MyClass(2));
myObjects.push_back(MyClass(3));
printf("%d\n", myObjects[0].getData())
printData(myObjects);
}
Both printf("%d\n", myObjects[0].getData()) and printData function doesnot invoke any constructor which makes sense.
17 replies
MModular
Created by taalhaataahir01022001 on 1/14/2024 in #questions
Create 1D array of objects
And you can download it using apt install perf. Maybe you might need 1-2 more dependencies which can easily be found on web
17 replies
MModular
Created by taalhaataahir01022001 on 1/14/2024 in #questions
Create 1D array of objects
There's a tool named perf It's pretty easy to use. just do

perf record ./executable

perf record ./executable
And it records the time taken inside each function. To view the output as shown above in the ss:
perf report
perf report
17 replies
MModular
Created by taalhaataahir01022001 on 1/14/2024 in #questions
Create 1D array of objects
No description
17 replies
MModular
Created by taalhaataahir01022001 on 1/14/2024 in #questions
Create 1D array of objects
sure thanks
17 replies
MModular
Created by taalhaataahir01022001 on 1/14/2024 in #questions
Create 1D array of objects
No description
17 replies
MModular
Created by taalhaataahir01022001 on 1/14/2024 in #questions
Create 1D array of objects
Although the DynamicVector "c" is updated outside the "newfunc2" but why is it the case that new "HuffmanEntry" object is being created while updating the "sz" argument of the object which is already stored in vector c? And How to avoid this i.e., update the object store within the dynamic vector instead of creating a new object?
17 replies
MModular
Created by taalhaataahir01022001 on 1/14/2024 in #questions
Create 1D array of objects
@sora I was using Dynamic Vectors previously but I was getting this issue. Here I've created an example code:
struct HuffmanEntry(CollectionElement):
var sz: Int
var codeword: Int
var decoded: Int

fn __init__(inout self, sz: Int, codeword: Int, decoded: Int):
self.sz = sz
self.codeword = codeword
self.decoded = decoded

fn __copyinit__(inout self, existing: Self):
print("CopyInit")
self.sz = existing.sz
self.codeword = existing.codeword
self.decoded = existing.decoded

fn __moveinit__(inout self, owned existing: Self):
self.sz = existing.sz
self.codeword = existing.codeword
self.decoded = existing.decoded


fn newfunc(inout arg: HuffmanEntry) -> Int:
arg.sz = 3
return 0

fn newfunc2(inout arg: DynamicVector[HuffmanEntry]) -> Int:
for i in range(3):
arg[i].sz = 4
return 0

fn main() raises:
var a = HuffmanEntry(0,0,0)
let b: Int = newfunc(a)

var c = DynamicVector[HuffmanEntry] ()

c.append(a)
c.append(a)
c.append(a)

let d:Int = newfunc2(c)
struct HuffmanEntry(CollectionElement):
var sz: Int
var codeword: Int
var decoded: Int

fn __init__(inout self, sz: Int, codeword: Int, decoded: Int):
self.sz = sz
self.codeword = codeword
self.decoded = decoded

fn __copyinit__(inout self, existing: Self):
print("CopyInit")
self.sz = existing.sz
self.codeword = existing.codeword
self.decoded = existing.decoded

fn __moveinit__(inout self, owned existing: Self):
self.sz = existing.sz
self.codeword = existing.codeword
self.decoded = existing.decoded


fn newfunc(inout arg: HuffmanEntry) -> Int:
arg.sz = 3
return 0

fn newfunc2(inout arg: DynamicVector[HuffmanEntry]) -> Int:
for i in range(3):
arg[i].sz = 4
return 0

fn main() raises:
var a = HuffmanEntry(0,0,0)
let b: Int = newfunc(a)

var c = DynamicVector[HuffmanEntry] ()

c.append(a)
c.append(a)
c.append(a)

let d:Int = newfunc2(c)
In the above code, the copyinit function is called 6 times. 3 times when I use the append function (which is completely fine). But when I send the DynamicVector "c" ti the nxt function i.e., newfunc2 and update the member variable "sz" , the copy constructor is called again. Why is it the case. According to my understanding when you use "in out" with the argument, the original object is sent to the function as a reference and not the copy of that object.
17 replies
MModular
Created by taalhaataahir01022001 on 1/14/2024 in #questions
Create 1D array of objects
No description
17 replies
MModular
Created by taalhaataahir01022001 on 1/6/2024 in #questions
convert UInt8 to UInt16
Thank you soo much 🙂
3 replies
MModular
Created by taalhaataahir01022001 on 1/5/2024 in #questions
How to convert a byte to integer?
Thanks! it helps
4 replies
MModular
Created by taalhaataahir01022001 on 1/5/2024 in #questions
How to convert a byte to integer?
No description
4 replies