How to use the msrest.authentication.CognitiveServicesCredentials function in msrest

To help you get started, we’ve selected a few msrest examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Azure / azure-sdk-for-python / tools / azure-sdk-tools / devtools_testutils / View on Github external
'kind': self.kind,
                    'properties': {}
            time.sleep(10)  # it takes a few seconds to create a cognitive services account
            self.resource = cogsci_account
            self.cogsci_key = self.client.accounts.list_keys(, name).key1
            # FIXME: LuisAuthoringClient and LuisRuntimeClient need authoring key from ARM API (coming soon-ish)
            self.resource = FakeCognitiveServicesAccount("https://{}".format(self.location))
            self.cogsci_key = 'ZmFrZV9hY29jdW50X2tleQ=='

        if self.legacy:
            return {
                self.parameter_name: self.resource.endpoint,
                '{}_key'.format(self.parameter_name): CognitiveServicesCredentials(self.cogsci_key),
            return {
                self.parameter_name: self.resource.endpoint,
                '{}_key'.format(self.parameter_name): self.cogsci_key,
github Azure-Samples / cognitive-services-python-sdk-samples / samples / language / luis / View on Github external
def runtime(subscription_key):

    This will execute LUIS prediction
    client = LUISRuntimeClient(

        query = "Look for hotels near LAX airport"
        print("Executing query: {}".format(query))
        result = client.prediction.resolve(
            "bce13896-4de3-4783-9696-737d8fde8cd1",  # LUIS Application ID

        print("\nDetected intent: {} (score: {:d}%)".format(
        print("Detected entities:")
        for entity in result.entities:
github Azure-Samples / cognitive-services-python-sdk-samples / samples / search / View on Github external
def image_search(subscription_key):

    This will search images for (canadian rockies) then verify number of results and print out first image result, pivot suggestion, and query expansion.
    client = ImageSearchClient(

        image_results ="canadian rockies")
        print("Search images for query \"canadian rockies\"")

        # Image results
        if image_results.value:
            first_image_result = image_results.value[0]
            print("Image result count: {}".format(len(image_results.value)))
            print("First image insights token: {}".format(
            print("First image thumbnail url: {}".format(
            print("First image content url: {}".format(
github Azure-Samples / cognitive-services-quickstart-code / python / LUIS / sdk-3x / View on Github external
# get_status returns a list of training statuses, one for each model. Loop through them and make sure all are done.
		waiting = any(map(lambda x: 'Queued' == x.details.status or 'InProgress' == x.details.status, info))
		if waiting:
			print ("Waiting 10 seconds for training to complete...")
			print ("trained")
			waiting = False
	responseEndpointInfo = client.apps.publish(app_id, versionId, is_staging=False)
	runtimeCredentials = CognitiveServicesCredentials(authoringKey)
	clientRuntime = LUISRuntimeClient(endpoint=predictionEndpoint, credentials=runtimeCredentials)

    # Production == slot name
	predictionRequest = { "query" : "I want two small pepperoni pizzas with more salsa" }
	predictionResponse = clientRuntime.prediction.get_slot_prediction(app_id, "Production", predictionRequest)
	print("Top intent: {}".format(predictionResponse.prediction.top_intent))
	print("Sentiment: {}".format (predictionResponse.prediction.sentiment))
	print("Intents: ")

	for intent in predictionResponse.prediction.intents:
		print("\t{}".format (json.dumps (intent)))
	print("Entities: {}".format (predictionResponse.prediction.entities))
github Azure-Samples / cognitive-services-quickstart-code / python / LUIS / python-sdk-authoring-prediction / View on Github external
from azure.cognitiveservices.language.luis.authoring import LUISAuthoringClient
from msrest.authentication import CognitiveServicesCredentials

import datetime, json, os, time


authoring_endpoint = ""

# Instantiate a LUIS client
client = LUISAuthoringClient(authoring_endpoint, CognitiveServicesCredentials(authoring_key))

def create_app():
	# Create a new LUIS app
	app_name    = "Contoso {}".format(
	app_desc    = "Flight booking app built with LUIS Python SDK."
	app_version = "0.1"
	app_locale  = "en-us"

	app_id = client.apps.add(dict(name=app_name,

	print("Created LUIS app {}\n    with ID {}".format(app_name, app_id))
github Azure-Samples / cognitive-services-python-sdk-samples / samples / vision / View on Github external
def image_analysis_in_stream(subscription_key):

    This will analyze an image from a stream and return all available features.
    client = ComputerVisionClient(
        endpoint="https://" + COMPUTERVISION_LOCATION + "",

    with open(os.path.join(IMAGES_FOLDER, "house.jpg"), "rb") as image_stream:
        image_analysis = client.analyze_image_in_stream(
                VisualFeatureTypes.image_type, # Could use simple str "ImageType"
                VisualFeatureTypes.faces,      # Could use simple str "Faces"
                VisualFeatureTypes.categories, # Could use simple str "Categories"
                VisualFeatureTypes.color,      # Could use simple str "Color"
                VisualFeatureTypes.tags,       # Could use simple str "Tags"
                VisualFeatureTypes.description # Could use simple str "Description"

    print("This image can be described as: {}\n".format(image_analysis.description.captions[0].text))
github Azure-Samples / cognitive-services-python-sdk-samples / samples / View on Github external
def entire_detect(subscription_key):
    print("Sample of detecting anomalies in the entire series.")

    endpoint = "https://{}".format(ANOMALYDETECTOR_LOCATION)
        client = AnomalyDetectorClient(endpoint, CognitiveServicesCredentials(subscription_key))
        request = get_request()
        response = client.entire_detect(request)
        if True in response.is_anomaly:
            print("Anomaly was detected from the series at index:")
            for i in range(len(request.series)):
                if response.is_anomaly[i]:
            print("There is no anomaly detected from the series.")
    except Exception as e:
        if isinstance(e, APIErrorException):
            print("Error code: {}".format(e.error.code))
            print("Error message: {}".format(e.error.message))
github Azure-Samples / cognitive-services-quickstart-code / python / LUIS / python-sdk-authoring-prediction / View on Github external
# Use public app ID or replace with your own trained and published app's ID
# to query your own app
# public appID = 'df67dcdb-c37d-46af-88e1-8b97951ca1c2'
print("luisAppID: {}".format(luisAppID))

# `production` or `staging`
luisSlotName = 'production'
print("luisSlotName: {}".format(luisSlotName))

# Instantiate a LUIS runtime client
clientRuntime = LUISRuntimeClient(runtime_endpoint, CognitiveServicesCredentials(runtime_key))

def predict(app_id, slot_name):

	request = { "query" : "turn on all lights" }

	# Note be sure to specify, using the slot_name parameter, whether your application is in staging or production.
	response = clientRuntime.prediction.get_slot_prediction(app_id=app_id, slot_name=slot_name, prediction_request=request)

	print("Top intent: {}".format(response.prediction.top_intent))
	print("Sentiment: {}".format (response.prediction.sentiment))
	print("Intents: ")

	for intent in response.prediction.intents:
github Azure-Samples / cognitive-services-python-sdk-samples / samples / search / View on Github external
def video_search_with_filtering(subscription_key):

    This will search videos for (Bellevue Trailer) that is free, short and 1080p resolution then verify number of results and print out id, name and url of first video result
    client = VideoSearchAPI(CognitiveServicesCredentials(subscription_key))

        video_result =
            query="Bellevue Trailer",
  ,  # Can use the str "free" too
            length=VideoLength.short,   # Can use the str "short" too
            resolution=VideoResolution.hd1080p  # Can use the str "hd1080p" too
        print("Search videos for query \"Bellevue Trailer\" that is free, short and 1080p resolution")

        if video_result.value:
            first_video_result = video_result.value[0]
            print("Video result count: {}".format(len(video_result.value)))
            print("First video id: {}".format(first_video_result.video_id))
            print("First video name: {}".format(
            print("First video url: {}".format(first_video_result.content_url))
github Azure-Samples / cognitive-services-personalizer-samples / quickstarts / python / View on Github external
key_var_name = 'PERSONALIZER_KEY'
if not key_var_name in os.environ:
	raise Exception('Please set/export the environment variable: {}'.format(key_var_name))
personalizer_key = os.environ[key_var_name]

# Replace :
endpoint_var_name = 'PERSONALIZER_ENDPOINT'
if not endpoint_var_name in os.environ:
	raise Exception('Please set/export the environment variable: {}'.format(endpoint_var_name))
personalizer_endpoint = os.environ[endpoint_var_name]

# Instantiate a Personalizer client
client = PersonalizerClient(personalizer_endpoint, CognitiveServicesCredentials(personalizer_key))

def get_actions():
    action1 = RankableAction(id='pasta', features=[{"taste":"salty", "spice_level":"medium"},{"nutrition_level":5,"cuisine":"italian"}])
    action2 = RankableAction(id='ice cream', features=[{"taste":"sweet", "spice_level":"none"}, { "nutritional_level": 2 }])
    action3 = RankableAction(id='juice', features=[{"taste":"sweet", 'spice_level':'none'}, {'nutritional_level': 5}, {'drink':True}])
    action4 = RankableAction(id='salad', features=[{'taste':'salty', 'spice_level':'none'},{'nutritional_level': 2}])
    return [action1, action2, action3, action4]

def get_user_preference():
    res = {}
    taste_features = ['salty','sweet']
    pref = input("What type of food would you prefer? Enter number 1.salty 2.sweet\n")