Skip to content

Commit 5f56152

Browse files
committed
Use smaller model for tests with petals
1 parent 739ff29 commit 5f56152

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

backend/python/petals/test_petals.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,8 @@ def test_load_model(self):
4747
self.setUp()
4848
with grpc.insecure_channel("localhost:50051") as channel:
4949
stub = backend_pb2_grpc.BackendStub(channel)
50-
response = stub.LoadModel(backend_pb2.ModelOptions(Model="petals-team/StableBeluga"))
50+
response = stub.LoadModel(backend_pb2.ModelOptions(Model="bigscience/bloom-560m"))
51+
print(response)
5152
self.assertTrue(response.success)
5253
self.assertEqual(response.message, "Model loaded successfully")
5354
except Exception as err:
@@ -64,7 +65,7 @@ def test_text(self):
6465
self.setUp()
6566
with grpc.insecure_channel("localhost:50051") as channel:
6667
stub = backend_pb2_grpc.BackendStub(channel)
67-
response = stub.LoadModel(backend_pb2.ModelOptions(Model="petals-team/StableBeluga"))
68+
response = stub.LoadModel(backend_pb2.ModelOptions(Model="bigscience/bloom-560m"))
6869
self.assertTrue(response.success)
6970
req = backend_pb2.PredictOptions(prompt="The capital of France is")
7071
resp = stub.Predict(req)

0 commit comments

Comments
 (0)